id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/bpy-2.91a0-cp37-cp37m-manylinux2014_x86_64.whl/bpy-2.91a0.data/scripts/2.91/scripts/addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py
|
import math
import bpy
from mathutils import Matrix, Quaternion, Vector
from . import gltf2_blender_export_keys
from io_scene_gltf2.blender.com import gltf2_blender_math
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2.blender.exp import gltf2_blender_gather_skins
from io_scene_gltf2.blender.exp import gltf2_blender_gather_cameras
from io_scene_gltf2.blender.exp import gltf2_blender_gather_mesh
from io_scene_gltf2.blender.exp import gltf2_blender_gather_joints
from io_scene_gltf2.blender.exp import gltf2_blender_gather_lights
from ..com.gltf2_blender_extras import generate_extras
from io_scene_gltf2.io.com import gltf2_io
from io_scene_gltf2.io.com import gltf2_io_extensions
from io_scene_gltf2.io.exp.gltf2_io_user_extensions import export_user_extensions
from io_scene_gltf2.io.com.gltf2_io_debug import print_console
def gather_node(blender_object, library, blender_scene, dupli_object_parent, export_settings):
# custom cache to avoid cache miss when called from animation
# with blender_scene=None
# invalidate cache if export settings have changed
if not hasattr(gather_node, "__export_settings") or export_settings != gather_node.__export_settings:
gather_node.__cache = {}
gather_node.__export_settings = export_settings
if blender_scene is None and (blender_object.name, library) in gather_node.__cache:
return gather_node.__cache[(blender_object.name, library)]
node = __gather_node(blender_object, library, blender_scene, dupli_object_parent, export_settings)
gather_node.__cache[(blender_object.name, library)] = node
return node
@cached
def __gather_node(blender_object, library, blender_scene, dupli_object_parent, export_settings):
# If blender_scene is None, we are coming from animation export
# Check to know if object is exported is already done, so we don't check
# again if object is instanced in scene : this check was already done when exporting object itself
if not __filter_node(blender_object, blender_scene, export_settings):
return None
node = gltf2_io.Node(
camera=__gather_camera(blender_object, export_settings),
children=__gather_children(blender_object, blender_scene, export_settings),
extensions=__gather_extensions(blender_object, export_settings),
extras=__gather_extras(blender_object, export_settings),
matrix=__gather_matrix(blender_object, export_settings),
mesh=__gather_mesh(blender_object, library, export_settings),
name=__gather_name(blender_object, export_settings),
rotation=None,
scale=None,
skin=__gather_skin(blender_object, export_settings),
translation=None,
weights=__gather_weights(blender_object, export_settings)
)
# If node mesh is skined, transforms should be ignored at import, so no need to set them here
if node.skin is None:
node.translation, node.rotation, node.scale = __gather_trans_rot_scale(blender_object, export_settings)
if export_settings[gltf2_blender_export_keys.YUP]:
# Checking node.extensions is making sure that the type of lamp is managed, and will be exported
if blender_object.type == 'LIGHT' and export_settings[gltf2_blender_export_keys.LIGHTS] and node.extensions:
correction_node = __get_correction_node(blender_object, export_settings)
correction_node.extensions = {"KHR_lights_punctual": node.extensions["KHR_lights_punctual"]}
del node.extensions["KHR_lights_punctual"]
node.children.append(correction_node)
if blender_object.type == 'CAMERA' and export_settings[gltf2_blender_export_keys.CAMERAS]:
correction_node = __get_correction_node(blender_object, export_settings)
correction_node.camera = node.camera
node.children.append(correction_node)
node.camera = None
export_user_extensions('gather_node_hook', export_settings, node, blender_object)
return node
def __filter_node(blender_object, blender_scene, export_settings):
if blender_object.users == 0:
return False
if blender_scene is not None:
instanced = any([blender_object.name in layer.objects for layer in blender_scene.view_layers])
if instanced is False:
# Check if object is from a linked collection
if any([blender_object.name in coll.objects for coll in bpy.data.collections if coll.library is not None]):
pass
else:
# Not instanced, not linked -> We don't keep this object
return False
if export_settings[gltf2_blender_export_keys.SELECTED] and blender_object.select_get() is False:
return False
return True
def __gather_camera(blender_object, export_settings):
if blender_object.type != 'CAMERA':
return None
return gltf2_blender_gather_cameras.gather_camera(blender_object.data, export_settings)
def __gather_children(blender_object, blender_scene, export_settings):
children = []
# standard children
for _child_object in blender_object.children:
if _child_object.parent_bone:
# this is handled further down,
# as the object should be a child of the specific bone,
# not the Armature object
continue
child_object = _child_object.proxy if _child_object.proxy else _child_object
node = gather_node(child_object,
child_object.library.name if child_object.library else None,
blender_scene, None, export_settings)
if node is not None:
children.append(node)
# blender dupli objects
if blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection:
for dupli_object in blender_object.instance_collection.objects:
if dupli_object.parent is not None:
continue
if dupli_object.type == "ARMATURE":
continue # There is probably a proxy
node = gather_node(dupli_object,
dupli_object.library.name if dupli_object.library else None,
blender_scene, blender_object.name, export_settings)
if node is not None:
children.append(node)
# blender bones
if blender_object.type == "ARMATURE":
root_joints = []
if export_settings["gltf_def_bones"] is False:
bones = blender_object.pose.bones
else:
bones, _, _ = gltf2_blender_gather_skins.get_bone_tree(None, blender_object)
bones = [blender_object.pose.bones[b.name] for b in bones]
for blender_bone in bones:
if not blender_bone.parent:
joint = gltf2_blender_gather_joints.gather_joint(blender_object, blender_bone, export_settings)
children.append(joint)
root_joints.append(joint)
# handle objects directly parented to bones
direct_bone_children = [child for child in blender_object.children if child.parent_bone]
def find_parent_joint(joints, name):
for joint in joints:
if joint.name == name:
return joint
parent_joint = find_parent_joint(joint.children, name)
if parent_joint:
return parent_joint
return None
for child in direct_bone_children:
# find parent joint
parent_joint = find_parent_joint(root_joints, child.parent_bone)
if not parent_joint:
continue
child_node = gather_node(child, None, None, None, export_settings)
if child_node is None:
continue
blender_bone = blender_object.pose.bones[parent_joint.name]
# fix rotation
if export_settings[gltf2_blender_export_keys.YUP]:
rot = child_node.rotation
if rot is None:
rot = [0, 0, 0, 1]
rot_quat = Quaternion(rot)
axis_basis_change = Matrix(
((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, -1.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0)))
mat = child.matrix_parent_inverse @ child.matrix_basis
mat = mat @ axis_basis_change
_, rot_quat, _ = mat.decompose()
child_node.rotation = [rot_quat[1], rot_quat[2], rot_quat[3], rot_quat[0]]
# fix translation (in blender bone's tail is the origin for children)
trans, _, _ = child.matrix_local.decompose()
if trans is None:
trans = [0, 0, 0]
# bones go down their local y axis
if blender_bone.matrix.to_scale()[1] >= 1e-6:
bone_tail = [0, blender_bone.length / blender_bone.matrix.to_scale()[1], 0]
else:
bone_tail = [0,0,0] # If scale is 0, tail == head
child_node.translation = [trans[idx] + bone_tail[idx] for idx in range(3)]
parent_joint.children.append(child_node)
return children
def __gather_extensions(blender_object, export_settings):
extensions = {}
if export_settings["gltf_lights"] and (blender_object.type == "LAMP" or blender_object.type == "LIGHT"):
blender_lamp = blender_object.data
light = gltf2_blender_gather_lights.gather_lights_punctual(
blender_lamp,
export_settings
)
if light is not None:
light_extension = gltf2_io_extensions.ChildOfRootExtension(
name="KHR_lights_punctual",
path=["lights"],
extension=light
)
extensions["KHR_lights_punctual"] = gltf2_io_extensions.Extension(
name="KHR_lights_punctual",
extension={
"light": light_extension
}
)
return extensions if extensions else None
def __gather_extras(blender_object, export_settings):
if export_settings['gltf_extras']:
return generate_extras(blender_object)
return None
def __gather_matrix(blender_object, export_settings):
# return blender_object.matrix_local
return []
def __gather_mesh(blender_object, library, export_settings):
if blender_object.type in ['CURVE', 'SURFACE', 'FONT']:
return __gather_mesh_from_nonmesh(blender_object, library, export_settings)
if blender_object.type != "MESH":
return None
# If not using vertex group, they are irrelevant for caching --> ensure that they do not trigger a cache miss
vertex_groups = blender_object.vertex_groups
modifiers = blender_object.modifiers
if len(vertex_groups) == 0:
vertex_groups = None
if len(modifiers) == 0:
modifiers = None
if export_settings[gltf2_blender_export_keys.APPLY]:
armature_modifiers = {}
if export_settings[gltf2_blender_export_keys.SKINS]:
# temporarily disable Armature modifiers if exporting skins
for idx, modifier in enumerate(blender_object.modifiers):
if modifier.type == 'ARMATURE':
armature_modifiers[idx] = modifier.show_viewport
modifier.show_viewport = False
depsgraph = bpy.context.evaluated_depsgraph_get()
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
for prop in blender_object.data.keys():
blender_mesh[prop] = blender_object.data[prop]
skip_filter = True
if export_settings[gltf2_blender_export_keys.SKINS]:
# restore Armature modifiers
for idx, show_viewport in armature_modifiers.items():
blender_object.modifiers[idx].show_viewport = show_viewport
else:
blender_mesh = blender_object.data
skip_filter = False
# If no skin are exported, no need to have vertex group, this will create a cache miss
if not export_settings[gltf2_blender_export_keys.SKINS]:
vertex_groups = None
modifiers = None
else:
# Check if there is an armature modidier
if len([mod for mod in blender_object.modifiers if mod.type == "ARMATURE"]) == 0:
vertex_groups = None # Not needed if no armature, avoid a cache miss
modifiers = None
materials = tuple(ms.material for ms in blender_object.material_slots)
material_names = tuple(None if mat is None else mat.name for mat in materials)
# retrieve armature
# Because mesh data will be transforms to skeleton space,
# we can't instantiate multiple object at different location, skined by same armature
blender_object_for_skined_data = None
if export_settings[gltf2_blender_export_keys.SKINS]:
for idx, modifier in enumerate(blender_object.modifiers):
if modifier.type == 'ARMATURE':
blender_object_for_skined_data = blender_object
result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh,
library,
blender_object_for_skined_data,
vertex_groups,
modifiers,
skip_filter,
material_names,
export_settings)
if export_settings[gltf2_blender_export_keys.APPLY]:
blender_mesh_owner.to_mesh_clear()
return result
def __gather_mesh_from_nonmesh(blender_object, library, export_settings):
"""Handles curves, surfaces, text, etc."""
needs_to_mesh_clear = False
try:
# Convert to a mesh
try:
if export_settings[gltf2_blender_export_keys.APPLY]:
depsgraph = bpy.context.evaluated_depsgraph_get()
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
# TODO: do we need preserve_all_data_layers?
else:
blender_mesh_owner = blender_object
blender_mesh = blender_mesh_owner.to_mesh()
except Exception:
return None
needs_to_mesh_clear = True
skip_filter = True
material_names = tuple([ms.material.name for ms in blender_object.material_slots if ms.material is not None])
vertex_groups = None
modifiers = None
blender_object_for_skined_data = None
result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh,
library,
blender_object_for_skined_data,
vertex_groups,
modifiers,
skip_filter,
material_names,
export_settings)
finally:
if needs_to_mesh_clear:
blender_mesh_owner.to_mesh_clear()
return result
def __gather_name(blender_object, export_settings):
return blender_object.name
def __gather_trans_rot_scale(blender_object, export_settings):
if blender_object.matrix_parent_inverse == Matrix.Identity(4):
trans = blender_object.location
if blender_object.rotation_mode in ['QUATERNION', 'AXIS_ANGLE']:
rot = blender_object.rotation_quaternion
else:
rot = blender_object.rotation_euler.to_quaternion()
sca = blender_object.scale
else:
# matrix_local = matrix_parent_inverse*location*rotation*scale
# Decomposing matrix_local gives less accuracy, but is needed if matrix_parent_inverse is not the identity.
if blender_object.matrix_local[3][3] != 0.0:
trans, rot, sca = blender_object.matrix_local.decompose()
else:
# Some really weird cases, scale is null (if parent is null when evaluation is done)
print_console('WARNING', 'Some nodes are 0 scaled during evaluation. Result can be wrong')
trans = blender_object.location
if blender_object.rotation_mode in ['QUATERNION', 'AXIS_ANGLE']:
rot = blender_object.rotation_quaternion
else:
rot = blender_object.rotation_euler.to_quaternion()
sca = blender_object.scale
# make sure the rotation is normalized
rot.normalize()
trans = __convert_swizzle_location(trans, export_settings)
rot = __convert_swizzle_rotation(rot, export_settings)
sca = __convert_swizzle_scale(sca, export_settings)
if blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection:
trans -= __convert_swizzle_location(
blender_object.instance_collection.instance_offset, export_settings)
translation, rotation, scale = (None, None, None)
trans[0], trans[1], trans[2] = gltf2_blender_math.round_if_near(trans[0], 0.0), gltf2_blender_math.round_if_near(trans[1], 0.0), \
gltf2_blender_math.round_if_near(trans[2], 0.0)
rot[0], rot[1], rot[2], rot[3] = gltf2_blender_math.round_if_near(rot[0], 1.0), gltf2_blender_math.round_if_near(rot[1], 0.0), \
gltf2_blender_math.round_if_near(rot[2], 0.0), gltf2_blender_math.round_if_near(rot[3], 0.0)
sca[0], sca[1], sca[2] = gltf2_blender_math.round_if_near(sca[0], 1.0), gltf2_blender_math.round_if_near(sca[1], 1.0), \
gltf2_blender_math.round_if_near(sca[2], 1.0)
if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:
translation = [trans[0], trans[1], trans[2]]
if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:
rotation = [rot[1], rot[2], rot[3], rot[0]]
if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:
scale = [sca[0], sca[1], sca[2]]
return translation, rotation, scale
def __gather_skin(blender_object, export_settings):
modifiers = {m.type: m for m in blender_object.modifiers}
if "ARMATURE" not in modifiers or modifiers["ARMATURE"].object is None:
return None
# no skin needed when the modifier is linked without having a vertex group
vertex_groups = blender_object.vertex_groups
if len(vertex_groups) == 0:
return None
# check if any vertices in the mesh are part of a vertex group
depsgraph = bpy.context.evaluated_depsgraph_get()
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
if not any(vertex.groups is not None and len(vertex.groups) > 0 for vertex in blender_mesh.vertices):
return None
# Prevent infinite recursive error. A mesh can't have an Armature modifier
# and be bone parented to a bone of this armature
# In that case, ignore the armature modifier, keep only the bone parenting
if blender_object.parent is not None \
and blender_object.parent_type == 'BONE' \
and blender_object.parent.name == modifiers["ARMATURE"].object.name:
return None
# Skins and meshes must be in the same glTF node, which is different from how blender handles armatures
return gltf2_blender_gather_skins.gather_skin(modifiers["ARMATURE"].object, export_settings)
def __gather_weights(blender_object, export_settings):
return None
def __get_correction_node(blender_object, export_settings):
correction_quaternion = __convert_swizzle_rotation(
Quaternion((1.0, 0.0, 0.0), math.radians(-90.0)), export_settings)
correction_quaternion = [correction_quaternion[1], correction_quaternion[2],
correction_quaternion[3], correction_quaternion[0]]
return gltf2_io.Node(
camera=None,
children=[],
extensions=None,
extras=None,
matrix=None,
mesh=None,
name=blender_object.name + '_Orientation',
rotation=correction_quaternion,
scale=None,
skin=None,
translation=None,
weights=None
)
def __convert_swizzle_location(loc, export_settings):
"""Convert a location from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((loc[0], loc[2], -loc[1]))
else:
return Vector((loc[0], loc[1], loc[2]))
def __convert_swizzle_rotation(rot, export_settings):
"""
Convert a quaternion rotation from Blender coordinate system to glTF coordinate system.
'w' is still at first position.
"""
if export_settings[gltf2_blender_export_keys.YUP]:
return Quaternion((rot[0], rot[1], rot[3], -rot[2]))
else:
return Quaternion((rot[0], rot[1], rot[2], rot[3]))
def __convert_swizzle_scale(scale, export_settings):
"""Convert a scale from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((scale[0], scale[2], scale[1]))
else:
return Vector((scale[0], scale[1], scale[2]))
|
PypiClean
|
/binarylane_cli-0.16.0.tar.gz/binarylane_cli-0.16.0/src/binarylane/console/commands/api/post_v_2_servers_server_id_actions_uncancel.py
|
from __future__ import annotations
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple, Union
from binarylane.api.server_actions.post_v_2_servers_server_id_actions_uncancel import sync_detailed
from binarylane.models.action_response import ActionResponse
from binarylane.models.problem_details import ProblemDetails
from binarylane.models.uncancel import Uncancel
from binarylane.models.uncancel_type import UncancelType
from binarylane.models.validation_problem_details import ValidationProblemDetails
if TYPE_CHECKING:
from binarylane.client import Client
import binarylane.console.commands.api.get_v2_servers as api_get_v2_servers
from binarylane.console.parser import Mapping, PrimitiveAttribute
from binarylane.console.runners.action import ActionRunner
class CommandRequest:
server_id: int
json_body: Uncancel
def __init__(self, server_id: int, json_body: Uncancel) -> None:
self.server_id = server_id
self.json_body = json_body
class Command(ActionRunner):
@property
def reference_url(self) -> str:
return "https://api.binarylane.com.au/reference/#tag/ServerActions/paths/~1v2~1servers~1%7Bserver_id%7D~1actions#Uncancel/post"
def create_mapping(self) -> Mapping:
mapping = Mapping(CommandRequest)
def lookup_server_id(ref: str) -> Union[None, int]:
return api_get_v2_servers.Command(self._context).lookup(ref)
mapping.add(
PrimitiveAttribute(
"server_id",
int,
required=True,
option_name=None,
metavar="server",
description="""The ID or name of the server on which the action should be performed.""",
lookup=lookup_server_id,
)
)
json_body = mapping.add_json_body(Uncancel)
json_body.add(
PrimitiveAttribute(
"type",
UncancelType,
required=True,
option_name="type",
)
)
return mapping
@property
def ok_response_type(self) -> type:
return ActionResponse
def request(
self,
client: Client,
request: object,
) -> Tuple[HTTPStatus, Union[ActionResponse, None, ProblemDetails, ValidationProblemDetails]]:
assert isinstance(request, CommandRequest)
# HTTPStatus.OK: ActionResponse
# HTTPStatus.ACCEPTED: Any
# HTTPStatus.BAD_REQUEST: ValidationProblemDetails
# HTTPStatus.NOT_FOUND: ProblemDetails
# HTTPStatus.UNPROCESSABLE_ENTITY: ProblemDetails
# HTTPStatus.UNAUTHORIZED: Any
page_response = sync_detailed(
server_id=request.server_id,
client=client,
json_body=request.json_body,
)
return page_response.status_code, page_response.parsed
|
PypiClean
|
/mpl-add-ons-0.0.9.tar.gz/mpl-add-ons-0.0.9/matplotlib_add_ons/choose_report_windows.py
|
from tkinter import filedialog
from matplotlib.backends.backend_pdf import PdfPages
import tkinter as tk
import ttkbootstrap as tkb
class SafeToplevel(tkb.Toplevel):
def destroy(self):
try:
super().destroy() # Call the original destroy method
except tk.TclError:
pass # Ignore the error
class ChooseReportWindows:
def __init__(self, figures):
self.master = tk.Tk()
self.master.withdraw() # Hide the main window
root = SafeToplevel()
root.title("Save Report")
root.bind("<Destroy>", self.close_master)
self.figures = figures
self.num_checkboxes = len(figures)
self.selected_values = []
self.save_location = None
# create the checkbox widgets
self.checkboxes = []
for i in range(self.num_checkboxes):
var = tk.IntVar(value=1)
checkbox = tk.Checkbutton(root, text=f"Window {i + 1}", variable=var)
checkbox.grid(row=i + 1, column=0, sticky='w', padx=10)
self.checkboxes.append(var)
# create the save location label and entry box
save_location_label = tk.Label(root, text="Save Location:")
save_location_label.grid(row=0, column=0, padx=5, pady=5, sticky='e')
self.save_location_entry = tk.Entry(root)
self.save_location_entry.grid(row=0, column=1, padx=5, pady=5, sticky='w')
# create the Save As button
save_button = tk.Button(root, text="Choose Save Location", command=self.choose_save_location)
save_button.grid(row=0, column=2, padx=5, pady=5, sticky='w')
# create the submit button
submit_button = tk.Button(root, text="Save", command=self.submit)
submit_button.grid(row=self.num_checkboxes + 1, column=0, columnspan=2, pady=10)
# create the cancel button
cancel_button = tk.Button(root, text="Exit Widget", command=root.destroy)
cancel_button.grid(row=self.num_checkboxes + 1, column=1, columnspan=2, pady=10)
root.mainloop()
def close_master(self, event):
self.master.destroy()
def choose_save_location(self):
file_path = filedialog.asksaveasfilename(title="Choose Save Location", defaultextension=".pdf",
filetypes=(("Pdf", "*.pdf"), ("All Files", "*.*")))
if file_path:
self.save_location_entry.delete(0, 'end')
self.save_location_entry.insert(0, file_path)
def submit(self):
# get the values of the selected checkboxes
self.selected_values = []
for i in range(self.num_checkboxes):
if self.checkboxes[i].get() == 1:
self.selected_values.append(i + 1)
# get the save location
self.save_location = self.save_location_entry.get()
self.save_figures(self.save_location, self.selected_values)
def save_figures(self, file_path, selected_figures):
# Create a PDF file and save the figures in it
if file_path and len(selected_figures) != 0:
with PdfPages(file_path) as pdf:
for i, figure in enumerate(self.figures):
if i + 1 in selected_figures:
pdf.savefig(figure)
print(f"Report Saved to location: {file_path}")
else:
print("Could not save due to missing save location or no windows selected.")
if __name__ == '__main__':
pass
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/me/drives/item/items/item/validate_permission/validate_permission_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from . import validate_permission_post_request_body
from .......models.o_data_errors import o_data_error
class ValidatePermissionRequestBuilder():
"""
Provides operations to call the validatePermission method.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new ValidatePermissionRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/me/drives/{drive%2Did}/items/{driveItem%2Did}/microsoft.graph.validatePermission"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_post_request_information(self,body: Optional[validate_permission_post_request_body.ValidatePermissionPostRequestBody] = None, request_configuration: Optional[ValidatePermissionRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Invoke action validatePermission
Args:
body:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
async def post(self,body: Optional[validate_permission_post_request_body.ValidatePermissionPostRequestBody] = None, request_configuration: Optional[ValidatePermissionRequestBuilderPostRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> None:
"""
Invoke action validatePermission
Args:
body:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.create_post_request_information(
body, request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_no_response_content_async(request_info, response_handler, error_mapping)
@dataclass
class ValidatePermissionRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/taskcc-alipay-sdk-python-3.3.398.tar.gz/taskcc-alipay-sdk-python-3.3.398/alipay/aop/api/domain/AlipayBusinessOrderCreateModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.UserIdentity import UserIdentity
from alipay.aop.api.domain.ControlInfo import ControlInfo
from alipay.aop.api.domain.EnvInfo import EnvInfo
from alipay.aop.api.domain.ItemDetail import ItemDetail
from alipay.aop.api.domain.MarketingSelectionInfo import MarketingSelectionInfo
from alipay.aop.api.domain.UserIdentity import UserIdentity
class AlipayBusinessOrderCreateModel(object):
def __init__(self):
self._buyer_identity = None
self._control_info = None
self._env_info = None
self._item_list = None
self._merchant_order_no = None
self._order_amount = None
self._selected_marketing = None
self._seller_identity = None
self._title = None
@property
def buyer_identity(self):
return self._buyer_identity
@buyer_identity.setter
def buyer_identity(self, value):
if isinstance(value, UserIdentity):
self._buyer_identity = value
else:
self._buyer_identity = UserIdentity.from_alipay_dict(value)
@property
def control_info(self):
return self._control_info
@control_info.setter
def control_info(self, value):
if isinstance(value, ControlInfo):
self._control_info = value
else:
self._control_info = ControlInfo.from_alipay_dict(value)
@property
def env_info(self):
return self._env_info
@env_info.setter
def env_info(self, value):
if isinstance(value, EnvInfo):
self._env_info = value
else:
self._env_info = EnvInfo.from_alipay_dict(value)
@property
def item_list(self):
return self._item_list
@item_list.setter
def item_list(self, value):
if isinstance(value, list):
self._item_list = list()
for i in value:
if isinstance(i, ItemDetail):
self._item_list.append(i)
else:
self._item_list.append(ItemDetail.from_alipay_dict(i))
@property
def merchant_order_no(self):
return self._merchant_order_no
@merchant_order_no.setter
def merchant_order_no(self, value):
self._merchant_order_no = value
@property
def order_amount(self):
return self._order_amount
@order_amount.setter
def order_amount(self, value):
self._order_amount = value
@property
def selected_marketing(self):
return self._selected_marketing
@selected_marketing.setter
def selected_marketing(self, value):
if isinstance(value, MarketingSelectionInfo):
self._selected_marketing = value
else:
self._selected_marketing = MarketingSelectionInfo.from_alipay_dict(value)
@property
def seller_identity(self):
return self._seller_identity
@seller_identity.setter
def seller_identity(self, value):
if isinstance(value, UserIdentity):
self._seller_identity = value
else:
self._seller_identity = UserIdentity.from_alipay_dict(value)
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
def to_alipay_dict(self):
params = dict()
if self.buyer_identity:
if hasattr(self.buyer_identity, 'to_alipay_dict'):
params['buyer_identity'] = self.buyer_identity.to_alipay_dict()
else:
params['buyer_identity'] = self.buyer_identity
if self.control_info:
if hasattr(self.control_info, 'to_alipay_dict'):
params['control_info'] = self.control_info.to_alipay_dict()
else:
params['control_info'] = self.control_info
if self.env_info:
if hasattr(self.env_info, 'to_alipay_dict'):
params['env_info'] = self.env_info.to_alipay_dict()
else:
params['env_info'] = self.env_info
if self.item_list:
if isinstance(self.item_list, list):
for i in range(0, len(self.item_list)):
element = self.item_list[i]
if hasattr(element, 'to_alipay_dict'):
self.item_list[i] = element.to_alipay_dict()
if hasattr(self.item_list, 'to_alipay_dict'):
params['item_list'] = self.item_list.to_alipay_dict()
else:
params['item_list'] = self.item_list
if self.merchant_order_no:
if hasattr(self.merchant_order_no, 'to_alipay_dict'):
params['merchant_order_no'] = self.merchant_order_no.to_alipay_dict()
else:
params['merchant_order_no'] = self.merchant_order_no
if self.order_amount:
if hasattr(self.order_amount, 'to_alipay_dict'):
params['order_amount'] = self.order_amount.to_alipay_dict()
else:
params['order_amount'] = self.order_amount
if self.selected_marketing:
if hasattr(self.selected_marketing, 'to_alipay_dict'):
params['selected_marketing'] = self.selected_marketing.to_alipay_dict()
else:
params['selected_marketing'] = self.selected_marketing
if self.seller_identity:
if hasattr(self.seller_identity, 'to_alipay_dict'):
params['seller_identity'] = self.seller_identity.to_alipay_dict()
else:
params['seller_identity'] = self.seller_identity
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBusinessOrderCreateModel()
if 'buyer_identity' in d:
o.buyer_identity = d['buyer_identity']
if 'control_info' in d:
o.control_info = d['control_info']
if 'env_info' in d:
o.env_info = d['env_info']
if 'item_list' in d:
o.item_list = d['item_list']
if 'merchant_order_no' in d:
o.merchant_order_no = d['merchant_order_no']
if 'order_amount' in d:
o.order_amount = d['order_amount']
if 'selected_marketing' in d:
o.selected_marketing = d['selected_marketing']
if 'seller_identity' in d:
o.seller_identity = d['seller_identity']
if 'title' in d:
o.title = d['title']
return o
|
PypiClean
|
/monk_keras_cuda102-0.0.1-py3-none-any.whl/monk/gluon/models/params.py
|
from monk.gluon.models.imports import *
from monk.system.imports import *
from monk.gluon.models.models import combined_list_lower
@accepts(str, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_model_name(name, system_dict):
'''
Set base model name for transfer learning.
Args:
name (str): Select from available models. Check via List_Models() function
system_dict (dict): System Dictionary
Returns:
dict: Updated system dictionary.
'''
if(name not in combined_list_lower):
msg = "Model name {} not in {}".format(name, combined_list_lower);
raise ConstraintError(msg);
system_dict["model"]["params"]["model_name"] = name;
return system_dict;
@accepts(bool, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_device(value, system_dict):
'''
Set whether to use gpu or not
Args:
value (bool): If set as True, uses GPU
system_dict (dict): System Dictionary
Returns:
dict: Updated system dictionary.
'''
if(value and mx.context.num_gpus()==0):
msg = "GPU not accessible yet requested."
ConstraintWarning(msg)
system_dict["model"]["params"]["use_gpu"] = False;
else:
system_dict["model"]["params"]["use_gpu"] = value;
return system_dict;
@accepts(bool, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_pretrained(value, system_dict):
'''
Set whether to use pretrained models or randomly initialized weights
Args:
value (bool): If set as True, use weights trained on imagenet and coco like dataset
Else, use randomly initialized weights
system_dict (dict): System Dictionary
Returns:
dict: Updated system dictionary.
'''
system_dict["model"]["params"]["use_pretrained"] = value;
return system_dict;
@accepts(bool, dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_freeze_base_network(value, system_dict):
'''
Set whether to freeze base network or not
Args:
value (bool): If set as True, then base network's weights are freezed (cannot be trained)
system_dict (dict): System Dictionary
Returns:
dict: Updated system dictionary.
'''
system_dict["model"]["params"]["freeze_base_network"] = value;
return system_dict;
@accepts([str, list], dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_model_path(path, system_dict):
'''
Set path to custom weights for model
Args:
path (str): Path to custom model weights for initialization.
system_dict (dict): System Dictionary
Returns:
dict: Updated system dictionary.
'''
system_dict["model"]["params"]["model_path"] = path;
return system_dict;
|
PypiClean
|
/yt-dlp-custom-0.0.1.tar.gz/yt-dlp-custom-0.0.1/yt_dlp/extractor/zeenews.py
|
from .common import InfoExtractor
from ..utils import ExtractorError, traverse_obj
class ZeeNewsIE(InfoExtractor):
_VALID_URL = r'https?://zeenews\.india\.com/[^#?]+/video/(?P<display_id>[^#/?]+)/(?P<id>\d+)'
_TESTS = [
{
'url': 'https://zeenews.india.com/hindi/india/delhi-ncr-haryana/delhi-ncr/video/greater-noida-video-viral-on-social-media-attackers-beat-businessman-and-his-son-oppose-market-closed-atdnh/1402138',
'info_dict': {
'id': '1402138',
'ext': 'mp4',
'title': 'Greater Noida Video: हमलावरों ने दिनदहाड़े दुकान में घुसकर की मारपीट, देखें वीडियो',
'display_id': 'greater-noida-video-viral-on-social-media-attackers-beat-businessman-and-his-son-oppose-market-closed-atdnh',
'upload_date': '20221019',
'thumbnail': r're:^https?://.*\.jpg*',
'timestamp': 1666174501,
'view_count': int,
'duration': 97,
'description': 'ग्रेटर नोएडा जारचा थाना क्षेत्र के प्याबली में दिनदहाड़े दुकान में घुसकर अज्ञात हमलावरों ने हमला कर',
}
},
{
'url': 'https://zeenews.india.com/hindi/india/video/videsh-superfast-queen-elizabeth-iis-funeral-today/1357710',
'info_dict': {
'id': '1357710',
'ext': 'mp4',
'title': 'Videsh Superfast: महारानी के अंतिम संस्कार की तैयारी शुरू',
'display_id': 'videsh-superfast-queen-elizabeth-iis-funeral-today',
'upload_date': '20220919',
'thumbnail': r're:^https?://.*\.jpg*',
'timestamp': 1663556881,
'view_count': int,
'duration': 133,
'description': 'सेगमेंट विदेश सुपराफास्ट में देखिए देश और दुनिया की सभी बड़ी खबरें, वो भी हर खबर फटाफट अंदाज में.',
}
}
]
def _real_extract(self, url):
content_id, display_id = self._match_valid_url(url).group('id', 'display_id')
webpage = self._download_webpage(url, content_id)
json_ld_list = list(self._yield_json_ld(webpage, display_id))
embed_url = traverse_obj(
json_ld_list, (lambda _, v: v['@type'] == 'VideoObject', 'embedUrl'), get_all=False)
if not embed_url:
raise ExtractorError('No video found', expected=True)
formats = self._extract_m3u8_formats(embed_url, content_id, 'mp4')
return {
**self._json_ld(json_ld_list, display_id),
'id': content_id,
'display_id': display_id,
'formats': formats,
}
|
PypiClean
|
/criteo_api_retailmedia_sdk-2023.7.0.230831-py3-none-any.whl/criteo_api_retailmedia_v2023_07/model/json_api_single_response_of_campaign_v202301.py
|
import re # noqa: F401
import sys # noqa: F401
from criteo_api_retailmedia_v2023_07.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from criteo_api_retailmedia_v2023_07.exceptions import ApiAttributeError
def lazy_import():
from criteo_api_retailmedia_v2023_07.model.common_error import CommonError
from criteo_api_retailmedia_v2023_07.model.common_warning import CommonWarning
from criteo_api_retailmedia_v2023_07.model.json_api_body_with_id_of_int64_and_campaign_v202301_and_campaign_v202301 import JsonApiBodyWithIdOfInt64AndCampaignV202301AndCampaignV202301
globals()['CommonError'] = CommonError
globals()['CommonWarning'] = CommonWarning
globals()['JsonApiBodyWithIdOfInt64AndCampaignV202301AndCampaignV202301'] = JsonApiBodyWithIdOfInt64AndCampaignV202301AndCampaignV202301
class JsonApiSingleResponseOfCampaignV202301(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (JsonApiBodyWithIdOfInt64AndCampaignV202301AndCampaignV202301,), # noqa: E501
'errors': ([CommonError],), # noqa: E501
'warnings': ([CommonWarning],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'errors': 'errors', # noqa: E501
'warnings': 'warnings', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs): # noqa: E501
"""JsonApiSingleResponseOfCampaignV202301 - a model defined in OpenAPI
Args:
data (JsonApiBodyWithIdOfInt64AndCampaignV202301AndCampaignV202301):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
errors ([CommonError]): [optional] # noqa: E501
warnings ([CommonWarning]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs): # noqa: E501
"""JsonApiSingleResponseOfCampaignV202301 - a model defined in OpenAPI
Args:
data (JsonApiBodyWithIdOfInt64AndCampaignV202301AndCampaignV202301):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
errors ([CommonError]): [optional] # noqa: E501
warnings ([CommonWarning]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/mroi-fd-amenji-0.0.4.tar.gz/mroi-fd-amenji-0.0.4/README.md
|
## Installation
This project is developed and tested on python version `3.7.4`. Please check
your python version using `python --version`. If your system has a different
python version, you may want to consider using
[pyenv](https://github.com/pyenv/pyenv) (See [Using pyenv](#using-pyenv-linux-windows-macos))
First, clone and `cd` into the repository:
```
git clone https://gitlab.com/ailabuser/bacha_hybrid_mroi_face_detection
cd bacha_hybrid_mroi_face_detection
```
### On Windows:
Create a python virtual environment:
```
mkvirtualenv venv
```
Activate the virtual environment (`deactivate` to deactivate the virtual
environment):
```
workon venv
```
Install all the required dependencies while still having the virtual environment
active:
```
pip install -r requirements.txt
```
### On Linux:
Create a python virtual environment:
```
virtualenv -p /usr/bin/python3 venv
```
Activate the virtual environment (`deactivate` to deactivate the virtual
environment):
```
source venv/bin/activate
```
Install all the required dependencies while still having the virtual environment
active:
```
pip install -r requirements.txt
```
### Using pyenv (Linux, Windows, MacOS)
If your python version is not 3.7.4, you may want to use
[pyenv](https://github.com/pyenv/pyenv). After you have installed `pyenv`,
install the specific python version. On Linux, this can be done by running the
following command:
```
env PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install 3.7.4
```
Then, create a virtual environment:
```
pyenv virtualenv 3.7.4 bacha_mroi_face_detection
```
You can activate the virtual environment using `pyenv` like so:
```
pyenv activate bacha_mroi_face_detection
```
## Description
The face detection technique used hybrid margin-based region of interest (MROI)
approach. It is hybrid in the sense that the implementation runs one main
routine to detect a face, but switch to an escape routine when the main routine
fails. Using MROI increase the face detection speed by having the selected face
detection algorithm to only consider a sub-region (where a face was previously
detected) instead of the full frame.
There are three pre-defined selection of main routines available for you to use:
1. Haar cascade classifier
2. Joint cascade
3. Multi-task Convolutional Neural Network (MTCNN)
When the main routine failed to detect a face, the implementation switch to the
escape routine which runs template matching algorithm.
Furthermore, there are five possible different hybrid combinations of the face
detection approach, in addition to a non-hybrid approach using only the main
routine.
1. Normal routine only (N)
2. Normal routine with fixed-margin (FM)
3. Normal routine with dynamic-margin (DM)
4. Normal routine with escape routine (NTM)
5. Normal routine with fixed-margin and escape routine (FMTM)
6. Normal routine with dynamic-margin and escape routine (DMTM)
Three video sources are also supported:
1. Webcam
2. Kinect
3. Video files
For example to use Haar cascade classifier as the main routine using FM
approach while using image frames from your webcam, you can run the following on
Linux (while having the virtual environment active):
```
./main.py webcam haar fm
```
You can run the program without any argument to print a help message (or by
supplying it with `-h`) for more information about the usage of the program.
# Example
## Using the hybrid MROI for your face detection implementation
In order to use your face detection algorithm with the hybrid MROI face
detector, you need to create a subclass which inherit from `FaceDetector`, and
override its `detect` method. The implementation requires the`detect` method
to return either a face ROI or `None`; otherwise, the hybrid MROI face detector
may fail.
Here's an example, in which we use a [python implementation of
MTCNN](https://pypi.org/project/mtcnn/):
```python
import cv2
from mtcnn.mtcnn import MTCNN
from routines import FaceDetector
class MROI_MTCNN(FaceDetector):
def __init__(self):
# The main routine face detector object used to detect faces.
fd_obj = MTCNN()
# Initialize using base class constructor. We pass the face detector
# object (fd_obj) and use the MROI with fixed-margin approach with
# a template matching escape routine.
super().__init__(fd_obj, mode=FaceDetector.FMTM)
@staticmethod
def detect(fd_obj, image):
rgb_src = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
result = fd_obj.detect_faces(rgb_src)
if len(result) > 0:
return result[0]['box']
else:
return None
```
Internally, this was basically how the pre-defined hyrbid MROI face detectors
(i.e., `MROI_HaarCascade` and `MROI_MTCNN`) was defined. Simply import them with
```python
from routines import MROI_HaarCascade, MROI_MTCNN
```
## Running the face detector
To use the face detector, simply instantiate the hybrid MROI face detector and
run it by invoking its `run` method. Below is a simple script that runs the
face detector and feed it images in a loop.
```python
fd = MROI_MTCNN()
fd.run() # This runs the face detector in the background.
while True:
ret, frame = cv2.VideoCapture("/path/to/video/file")
# No more images; exit.
if not ret:
break
# Feed the image into the face detector.
fd.input_image(frame)
# Get the ROI containing the face. This will be `None` if no face is
# detected.
ROI = fd.get_face_region()
if ROI is not None:
x, y, w, h = ROI
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("MROI_MTCNN Face Detector", frame)
if cv2.waitKey(1) == ord('q'):
break
```
|
PypiClean
|
/spark_nlp_jsl_tmp-5.0.0-py3-none-any.whl/sparknlp_jsl/annotator/embeddings/entity_chunk_embeddings.py
|
from sparknlp.annotator import BertSentenceEmbeddings
from sparknlp.common import *
class EntityChunkEmbeddings(BertSentenceEmbeddings):
"""
Weighted average embeddings of multiple named entities chunk annotations
====================== =======================
Input Annotation types Output Annotation type
====================== =======================
``DEPENDENCY, CHUNK`` ``SENTENCE_EMBEDDINGS``
====================== =======================
Parameters
----------
targetEntities
Target entities and their related entities
entityWeights
Relative weights of entities.
maxSyntacticDistance
Maximal syntactic distance between related entities. Default value is 2.
Examples
--------
>>> import sparknlp
>>> from sparknlp.base import *
>>> from sparknlp.common import *
>>> from sparknlp.annotator import *
>>> from sparknlp.training import *
>>> import sparknlp_jsl
>>> from sparknlp_jsl.base import *
>>> from sparknlp_jsl.annotator import *
>>> from pyspark.ml import Pipeline
>>> documenter = DocumentAssembler()\
... .setInputCol("text")\
... .setOutputCol("documents")
>>> sentence_detector = SentenceDetector() \
... .setInputCols("documents") \
... .setOutputCol("sentences")
>>> tokenizer = Tokenizer() \
... .setInputCols("sentences") \
... .setOutputCol("tokens")
>>> embeddings = WordEmbeddingsModel() \
... .pretrained("embeddings_clinical", "en", "clinical/models")\
... .setInputCols(["sentences", "tokens"])\
... .setOutputCol("embeddings")
>>> ner_model = MedicalNerModel()\
... .pretrained("ner_posology_large", "en", "clinical/models")\
... .setInputCols(["sentences", "tokens", "embeddings"])\
... .setOutputCol("ner")
>>> ner_converter = NerConverterInternal()\
... .setInputCols("sentences", "tokens", "ner")\
... .setOutputCol("ner_chunks")
>>> pos_tager = PerceptronModel()\
... .pretrained("pos_clinical", "en", "clinical/models")\
... .setInputCols("sentences", "tokens")\
... .setOutputCol("pos_tags")
>>> dependency_parser = DependencyParserModel()\
... .pretrained("dependency_conllu", "en")\
... .setInputCols(["sentences", "pos_tags", "tokens"])\
... .setOutputCol("dependencies")
>>> drug_chunk_embeddings = EntityChunkEmbeddings()\
... .pretrained("sbiobert_base_cased_mli","en","clinical/models")\
... .setInputCols(["ner_chunks", "dependencies"])\
... .setOutputCol("drug_chunk_embeddings")\
... .setMaxSyntacticDistance(3)\
... .setTargetEntities({"DRUG": []})
... .setEntityWeights({"DRUG": 0.8, "STRENGTH": 0.2, "DOSAGE": 0.2, "FORM": 0.5})
>>> sampleData = "The parient was given metformin 125 mg, 250 mg of coumadin and then one pill paracetamol"
>>> data = SparkContextForTest.spark.createDataFrame([[sampleData]]).toDF("text")
>>> pipeline = Pipeline().setStages([
... documenter,
... sentence_detector,
... tokenizer,
... embeddings,
... ner_model,
... ner_converter,
... pos_tager,
... dependency_parser,
... drug_chunk_embeddings])
>>> results = pipeline.fit(data).transform(data)
>>> results = results \
... .selectExpr("explode(drug_chunk_embeddings) AS drug_chunk") \
... .selectExpr("drug_chunk.result", "slice(drug_chunk.embeddings, 1, 5) AS drug_embedding") \
... .cache()
>>> results.show(truncate=False)
+-----------------------------+-----------------------------------------------------------------+
| result| drug_embedding"|
+-----------------------------+-----------------------------------------------------------------+
|metformin 125 mg |[-0.267413, 0.07614058, -0.5620966, 0.83838946, 0.8911504] |
|250 mg coumadin |[0.22319649, -0.07094894, -0.6885556, 0.79176235, 0.82672405] |
|one pill paracetamol |[-0.10939768, -0.29242, -0.3574444, 0.3981813, 0.79609615] |
+-----------------------------+-----------------------------------------------------------------+
"""
name = "EntityChunkEmbeddings"
entityWeights = Param(Params._dummy(),
"entityWeights",
"Relative weights of named entities.",
typeConverter=TypeConverters.identity)
targetEntities = Param(Params._dummy(),
"targetEntities",
"Target entities and the entities they are related to.",
typeConverter=TypeConverters.identity)
maxSyntacticDistance = Param(Params._dummy(), "maxSyntacticDistance",
"Maximal syntactic distance between related DRUG and non-DRUG entities",
TypeConverters.toInt)
def setEntityWeights(self, weights={}):
"""Sets the relative weights of the embeddings of specific entities. By default the dictionary is empty and
all entities have equal weights. If non-empty and some entity is not in it, then its weight is set to 0.
Parameters
----------
weights: : dict[str, float]
Dictionary with the relative weighs of entities. The notation TARGET_ENTITY:RELATED_ENTITY can be used to
specify the weight of a entity which is related to specific target entity (e.g. "DRUG:SYMPTOM": 0.3).
Entity names are case-insensitive.
"""
self._call_java("setEntityWeights",weights)
def setTargetEntities(self, entities={}):
"""Sets the target entities and maps them to their related entities. A target entity with an empty list of
related entities means all other entities are assumed to be related to it.
Parameters
----------
entities: dict[str, list[str]]
Dictionary with target and related entities (TARGET: [RELATED1, RELATED2,...]). If the list of related
entities is empty, then all non-target entities are considered.
Entity names are case insensitive.
"""
self._call_java("setTargetEntities",entities)
def setMaxSyntacticDistance(self, distance):
"""Sets the maximal syntactic distance between related entities. Default value is 2.
Parameters
----------
distance : int
Maximal syntactic distance
"""
return self._set(maxSyntacticDistance=distance)
@keyword_only
def __init__(self, classname="com.johnsnowlabs.nlp.annotators.embeddings.EntityChunkEmbeddings", java_model=None):
super(EntityChunkEmbeddings, self).__init__(
classname=classname,
java_model=java_model
)
self._setDefault(
dimension=768,
batchSize=32,
maxSentenceLength=128,
caseSensitive=True,
maxSyntacticDistance=2
)
@staticmethod
def pretrained(name="sbiobert_base_cased_mli", lang="en", remote_loc="clinical/models"):
from sparknlp_jsl.pretrained import InternalResourceDownloader
return InternalResourceDownloader.downloadModel(EntityChunkEmbeddings, name, lang, remote_loc,
j_dwn='InternalsPythonResourceDownloader')
|
PypiClean
|
/hpc_acm_cli-2.8.4.tar.gz/hpc_acm_cli-2.8.4/hpc_acm_cli/clus.py
|
from __future__ import print_function
import time
import datetime
import sys
from tqdm import tqdm
from hpc_acm.rest import ApiException
from hpc_acm_cli.command import Command
from hpc_acm_cli.utils import print_table, match_names, shorten, arrange
from hpc_acm_cli.async_op import async_wait, AsyncOp
class Clusrun(Command):
@classmethod
def profile(cls):
return {
'description': '''
HPC diagnostic client for querying/creating/canceling clusrun jobs.
For help of a subcommand(list|show|new|cancel), execute "%(prog)s {subcommand} -h"
'''
}
@classmethod
def subcommands(cls, config):
return [
{
'name': 'list',
'options': {
'help': 'list clusrun jobs',
},
'params': [
{
'name': '--count',
'options': {
'help': 'number of jobs to query',
'type': int,
'default': config.getint('DEFAULT', 'count', fallback=None)
}
},
{
'name': '--last-id',
'options': { 'help': 'the job id since which(but not included) to query' }
},
{
'name': '--asc',
'options': { 'help': 'query in id-ascending order', 'action': 'store_true' }
},
],
},
{
'name': 'show',
'options': {
'help': 'show a clusrun job',
},
'params': [
{
'name': 'id',
'options': { 'help': 'job id', }
},
{
'name': '--short',
'options': {
'help': 'do not show task output',
'action': 'store_true'
}
},
],
},
{
'name': 'new',
'options': {
'help': 'create a new clusrun job',
},
'params': [
{
'group': True,
'items': [
{
'name': '--nodes',
'options': {
'help': 'names of nodes. Multiple names are separated by spaces and quoted as one string, like "node1 node2 node3". Either this or the --pattern parameter must be provided.',
}
},
{
'name': '--pattern',
'options': {
'help': 'name pattern of nodes. Either this or the --nodes parameter must be provided.',
'default': config.get('DEFAULT', 'pattern', fallback=None)
}
},
]
},
{
'name': 'command_line',
'options': { 'help': 'command to run on nodes', 'metavar': 'command' }
},
{
'name': '--short',
'options': {
'help': 'do no show task output.',
'action': 'store_true'
}
},
],
},
{
'name': 'cancel',
'options': {
'help': 'cancel a job',
},
'params': [
{
'name': 'ids',
'options': { 'help': 'job id', 'metavar': 'id', 'nargs': '+' }
},
],
},
]
def list(self):
jobs = self.api.get_clusrun_jobs(reverse=not self.args.asc, count=self.args.count, last_id=self.args.last_id)
self.print_jobs(jobs)
def show(self):
job = self.api.get_clusrun_job(self.args.id)
if self.args.short:
self.show_in_short(job)
else:
self.show_progressing(job)
def new(self):
if self.args.nodes:
nodes = self.args.nodes.split()
elif self.args.pattern:
all = self.api.get_nodes(count=1000000)
names = [n.name for n in all]
nodes = match_names(names, self.args.pattern)
else:
raise ValueError('Either nodes or pattern parameter must be provided!')
job = {
"name": "Command@%s" % datetime.datetime.now().isoformat(),
"targetNodes": nodes,
"commandLine": self.args.command_line,
}
job = self.api.create_clusrun_job(job = job)
if self.args.short:
self.show_in_short(job)
else:
self.show_progressing(job)
def cancel(self):
for id in self.args.ids:
try:
self.api.cancel_clusrun_job(id, job = { "request": "cancel" })
print("Job %s is canceled." % id)
except ApiException as e:
print("Failed to cancel job %s. Error:\n" % id, e)
def print_jobs(self, jobs, in_short=True):
target_nodes = {
'title': 'Target nodes',
'value': lambda j: len(j.target_nodes)
}
command = {
'title': 'Command',
'value': lambda j: shorten(j.command_line, 60) if in_short else arrange(j.command_line, 60)
}
print_table(['id', command, 'state', target_nodes, 'created_at'], jobs)
def show_in_short(self, job):
self.print_jobs([job], in_short=False)
self.list_tasks(job)
def show_progressing(self, job):
self.print_jobs([job])
self.show_task_outputs(job)
def list_tasks(self, job):
def task_info(task, result):
return {
'id': task.id,
'node': task.node,
'state': task.state,
'result_url': '%s/output/clusrun/%s/raw' % (self.args.host, result.result_key) if result else ''
}
tasks = self.api.get_clusrun_tasks(job.id, count=len(job.target_nodes))
if not tasks:
print("No tasks created yet!")
return
task_results = self.wait_task_results(tasks)
results = [task_info(t[0], t[1]) for t in zip(tasks, task_results)]
print_table(['id', 'node', 'state', 'result_url'], results)
class GetTaskResult(AsyncOp):
def __init__(self, api, task):
self.api = api
self.async_task_result = self.api.get_clusrun_task_result(task.job_id, task.id, async=True)
self.task_result = None
self.ready = False
def get_result(self):
if self.ready:
return self.task_result
if not self.async_task_result.ready():
raise AsyncOp.NotReady()
self.ready = True
try:
self.task_result = self.async_task_result.get()
except ApiException: # 404
self.task_result = None
return self.task_result
def wait_task_results(self, tasks):
return async_wait([self.__class__.GetTaskResult(self.api, t) for t in tasks], desc='Loading tasks')
class GetTaskOutput(AsyncOp):
def __init__(self, api, task):
self.api = api
self.task = task
self.async_task_result = self.get_task_result()
self.task_result = None
self.async_last_page = None
self.async_output = None
self.output = None
self.ready = False
def get_task_result(self):
return self.api.get_clusrun_task_result(self.task.job_id, self.task.id, async=True)
def get_last_page(self):
return self.api.get_clusrun_output_in_page(self.task_result.result_key, offset=-1, page_size=2, async=True)
def get_output(self):
if not self.async_output.ready():
raise AsyncOp.NotReady()
file = self.async_output.get()
with open(file, "r") as f:
self.output = f.read()
self.ready = True
def try_get_last_page(self):
if not self.async_last_page.ready():
raise AsyncOp.NotReady()
try:
page = self.async_last_page.get()
except ApiException as e:
# When output is not created(404), try again
self.async_last_page = self.get_last_page()
else:
if not page.eof:
# When output is not over, try again
self.async_last_page = self.get_last_page()
else:
# When output is over
self.async_output = self.api.get_clusrun_output(self.task_result.result_key, async=True)
def try_get_task_result(self):
if not self.async_task_result.ready():
raise AsyncOp.NotReady()
try:
self.task_result = self.async_task_result.get()
except ApiException as e:
# When task result is not created(404), try again
self.async_task_result = self.get_task_result()
else:
self.async_last_page = self.get_last_page()
def get_result(self):
if self.ready:
return (self.task, self.task_result, self.output)
elif self.async_output:
self.get_output()
elif self.async_last_page:
self.try_get_last_page()
else:
self.try_get_task_result()
raise AsyncOp.NotReady()
def show_task_outputs(self, job):
tasks = self.wait_tasks(job)
if not tasks:
print("No tasks created!")
return
def show_output(_, result):
task, task_result, output = result
print('#### %s(%s) ####' % (task.node, task_result.exit_code))
print(output or '')
async_wait([self.__class__.GetTaskOutput(self.api, t) for t in tasks], show_output, desc='Loading task output')
def wait_tasks(self, job):
while True:
job = self.api.get_clusrun_job(job.id)
tasks = self.api.get_clusrun_tasks(job.id, count=len(job.target_nodes))
if tasks or job.state in ['Finished', 'Failed', 'Canceled']:
break
# TODO: Sleep and wait for some time here?
return tasks
def main():
Clusrun.run()
if __name__ == '__main__':
main()
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/device_management/virtual_endpoint/cloud_p_cs/bulk_resize/bulk_resize_post_request_body.py
|
from __future__ import annotations
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
class BulkResizePostRequestBody(AdditionalDataHolder, Parsable):
def __init__(self,) -> None:
"""
Instantiates a new bulkResizePostRequestBody and sets the default values.
"""
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
self._additional_data: Dict[str, Any] = {}
# The cloudPcIds property
self._cloud_pc_ids: Optional[List[str]] = None
# The targetServicePlanId property
self._target_service_plan_id: Optional[str] = None
@property
def additional_data(self,) -> Dict[str, Any]:
"""
Gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Returns: Dict[str, Any]
"""
return self._additional_data
@additional_data.setter
def additional_data(self,value: Dict[str, Any]) -> None:
"""
Sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Args:
value: Value to set for the AdditionalData property.
"""
self._additional_data = value
@property
def cloud_pc_ids(self,) -> Optional[List[str]]:
"""
Gets the cloudPcIds property value. The cloudPcIds property
Returns: Optional[List[str]]
"""
return self._cloud_pc_ids
@cloud_pc_ids.setter
def cloud_pc_ids(self,value: Optional[List[str]] = None) -> None:
"""
Sets the cloudPcIds property value. The cloudPcIds property
Args:
value: Value to set for the cloud_pc_ids property.
"""
self._cloud_pc_ids = value
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> BulkResizePostRequestBody:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: BulkResizePostRequestBody
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return BulkResizePostRequestBody()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
fields: Dict[str, Callable[[Any], None]] = {
"cloudPcIds": lambda n : setattr(self, 'cloud_pc_ids', n.get_collection_of_primitive_values(str)),
"targetServicePlanId": lambda n : setattr(self, 'target_service_plan_id', n.get_str_value()),
}
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
writer.write_collection_of_primitive_values("cloudPcIds", self.cloud_pc_ids)
writer.write_str_value("targetServicePlanId", self.target_service_plan_id)
writer.write_additional_data_value(self.additional_data)
@property
def target_service_plan_id(self,) -> Optional[str]:
"""
Gets the targetServicePlanId property value. The targetServicePlanId property
Returns: Optional[str]
"""
return self._target_service_plan_id
@target_service_plan_id.setter
def target_service_plan_id(self,value: Optional[str] = None) -> None:
"""
Sets the targetServicePlanId property value. The targetServicePlanId property
Args:
value: Value to set for the target_service_plan_id property.
"""
self._target_service_plan_id = value
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_25/models/directory_get_response.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class DirectoryGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[Directory]',
'total': 'list[Directory]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.Directory]
total=None, # type: List[models.Directory]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[Directory]): Displays a list of all items after filtering. The values are displayed for each name, if applicable.
total (list[Directory]): The aggregate value of all items after filtering. If applicable, the average value is displayed instead. The values are displayed for each field, if applicable.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DirectoryGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DirectoryGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/sdnn-cl-2.2.0.tar.gz/sdnn-cl-2.2.0/tvm/relay/backend/contrib/ethosu/te/dma.py
|
"""Tensor Expressions for operations supported by the NPU DMA engine"""
from typing import Callable, Tuple, Optional, List
import tvm # type: ignore
from tvm import te
from tvm.topi.utils import equal_const_int # type: ignore
def _pad_tensor(
tensor: te.Tensor, pad_before: List[int], pad_after: Optional[List[int]] = None
) -> Callable:
"""Generate a padded tensor.
Parameters
----------
tensor : te.Tensor
The tensor to pad.
pad_before : tuple of int
The 'before' padding on each axis.
pad_after : tuple of int
The 'after' padding on each axis.
Returns
-------
_pad : callable
The padded tensor.
"""
pad_after = pad_after or pad_before
dims = len(tensor.shape)
assert len(pad_before) == dims
assert len(pad_after) == dims
def _pad(*indices):
not_zero = [] # A list of padding conditions that aren't trivial (zero padding)
index_tuple = [] # The indices with which to access the padded tensor
for i in range(dims):
if equal_const_int(pad_before[i], 0) and equal_const_int(pad_after[i], 0):
index_tuple.append(indices[i])
else:
index_tuple.append(indices[i] - pad_before[i])
not_zero.append(indices[i] >= pad_before[i])
not_zero.append(indices[i] < tensor.shape[i] + pad_before[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(
not_zero, tensor(*index_tuple), tvm.tir.const(0, tensor.dtype)
)
return tensor(*index_tuple)
return _pad
def read_compute(tensor: te.Tensor, layout: str, zero_point: int, scale: float) -> te.Tensor:
"""A tensor expression which represents a read.
Parameters
----------
tensor : te.Tensor
The tensor to read.
layout : str
The layout of the tensor, either NHWC or NHCWB16.
zero_point : int
The zero point of the tensor.
scale : float
The scale of the tensor.
Returns
-------
te.Tensor
The tensor having been read.
"""
assert layout in {"NHWC", "NHCWB16"}
read_attrs = {
"op": "ethosu_read",
"layout": layout,
"zero_point": zero_point,
"scale": scale,
}
return te.compute(tensor.shape, lambda *i: tensor(*i), name="ethosu_read", attrs=read_attrs)
def write_compute(tensor: te.Tensor, layout: str, zero_point: int, scale: float) -> te.Tensor:
"""A tensor expression which represents a write.
Parameters
----------
tensor : te.Tensor
The tensor to write.
layout : str
The layout of the tensor, either NHWC or NHCWB16.
zero_point : int
The zero point of the tensor.
scale : float
The scale of the tensor.
Returns
-------
te.Tensor
The tensor having been written.
"""
assert layout in {"NHWC", "NHCWB16"}
write_attrs = {
"op": "ethosu_write",
"layout": layout,
"zero_point": zero_point,
"scale": scale,
}
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_write",
attrs=write_attrs,
)
def convert_to_nhwc_compute(tensor: te.Tensor, layout: str, channels: int) -> te.Tensor:
"""Converts a tensor into NHWC layout if it's in NHWCB16 layout.
Parameters
----------
tensor : te.Tensor
The tensor to convert.
layout : str
The layout of the tensor, either NHWC or NHCWB16.
channels : int
The number of valid channels for the tensor.
Returns
-------
te.Tensor
The converted tensor in NHWC layout.
"""
assert layout in {"NHWC", "NHCWB16"}
convert_to_nhwc_attrs = {
"op": "ethosu_convert_to_nhwc",
"layout": layout,
}
if layout == "NHCWB16":
return te.compute(
(tensor.shape[0], tensor.shape[1], tensor.shape[3], channels),
lambda nn, hh, ww, cc: tensor(nn, hh, te.indexdiv(cc, 16), ww, te.indexmod(cc, 16)),
name="ethosu_convert_to_nhwc",
attrs=convert_to_nhwc_attrs,
)
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_convert_to_nhwc",
attrs=convert_to_nhwc_attrs,
)
def convert_to_nhcwb16_compute(tensor: te.Tensor, layout: str, channels: int) -> te.Tensor:
"""Converts a tensor into NHCWB16 layout if it's in NHWC layout.
Parameters
----------
tensor : te.Tensor
The tensor to convert.
layout : str
The layout of the tensor, either NHWC or NHCWB16.
channels : int
The number of valid channels for the tensor.
Returns
-------
te.Tensor
The converted tensor in NHCWB16 layout.
"""
assert layout in {"NHWC", "NHCWB16"}
convert_to_nhcwb16_attrs = {
"op": "ethosu_convert_to_nhcwb16",
"layout": layout,
}
if layout == "NHCWB16":
out_channel_bricks = te.indexdiv(channels - 1, 16) + 1
output_shape = (1, tensor.shape[1], out_channel_bricks, tensor.shape[2], 16)
return te.compute(
output_shape,
lambda nn, hh, cc, ww, cb: tvm.tir.if_then_else(
cc * 16 + cb < channels,
tensor(nn, hh, ww, cc * 16 + cb),
tvm.tir.IntImm(tensor.dtype, 0),
),
name="ethosu_convert_to_nhcwb16",
attrs=convert_to_nhcwb16_attrs,
)
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_convert_to_nhcwb16",
attrs=convert_to_nhcwb16_attrs,
)
def pad_compute(tensor: te.Tensor, padding: tuple) -> te.Tensor:
"""Pad an NHWC tensor in the height and width axes.
Parameters
----------
tensor : te.Tensor
The tensor to pad.
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
Returns
-------
te.Tensor
The padded tensor.
"""
pad_top, pad_left, pad_down, pad_right = padding
pad_before = [0, int(pad_top), int(pad_left), 0]
pad_after = [0, int(pad_down), int(pad_right), 0]
pad_attrs = {
"op": "ethosu_pad",
}
shape = tensor.shape
return te.compute(
(shape[0], shape[1] + pad_top + pad_down, shape[2] + pad_left + pad_right, shape[3]),
lambda nn, hh, ww, cc: _pad_tensor(tensor, pad_before, pad_after)(nn, hh, ww, cc),
name="ethosu_pad",
attrs=pad_attrs,
)
def dma_ifm_compute(
ifm: te.Tensor,
layout: str,
zero_point: int,
scale: float,
channels: int,
padding: Tuple[int, int, int, int],
) -> te.Tensor:
"""A sequence of compute operators representing the DMA capabilities for an IFM.
Parameters
----------
ifm : te.Tensor
The Input Feature Map (IFM) tensor.
layout : str
The layout of the data, either NHWC or NHCWB16.
zero_point : int
The zero point of the data.
scale : float
The scale of the data.
channels : int
The number of valid channels for the data.
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
Returns
-------
te.Tensor
The dma-ed IFM tensor.
"""
read_ifm = read_compute(ifm, layout, zero_point, scale)
convert_to_nhwc_ifm = convert_to_nhwc_compute(read_ifm, layout, channels)
return pad_compute(convert_to_nhwc_ifm, padding)
def dma_ofm_compute(
ofm: te.Tensor, layout: str, zero_point: int, scale: float, channels: int
) -> te.Tensor:
"""A sequence of compute operators representing the DMA capabilities for an OFM.
Parameters
----------
ofm : te.Tensor
The Output Feature Map (OFM) tensor.
layout : str
The layout of the data, either NHWC or NHCWB16.
zero_point : int
The zero point of the data.
scale : float
The scale of the data.
channels : int
The number of valid channels for the data.
Returns
-------
te.Tensor
The dma-ed OFM tensor.
"""
convert_to_nhcwb16_ofm = convert_to_nhcwb16_compute(ofm, layout, channels)
return write_compute(convert_to_nhcwb16_ofm, layout, zero_point, scale)
|
PypiClean
|
/laneDetection-0.0.2-py3-none-any.whl/SCNN_Lane_Detection/__init__.py
|
from SCNN_Lane_Detection.model import SCNN
from SCNN_Lane_Detection.utils.transforms import *
import urllib.request
import tempfile
import os.path
# Global
weightPath = ''
p_threshold = 0.5
# Change this if you are not using pre-trained model
# --------------------------------------------------------
mean = (0.3598, 0.3653, 0.3662) # CULane mean, std
std = (0.2573, 0.2663, 0.2756)
trained_model_width = 800
trained_model_height = 288
# --------------------------------------------------------
# End Global
net = SCNN(input_size=(trained_model_width, trained_model_height), pretrained=False)
transform_img = Resize((trained_model_width, trained_model_height))
transform_to_net = Compose(ToTensor(), Normalize(mean=mean, std=std))
def init(val):
global weightPath
weightPath = val
def predictThreshold(val):
global p_threshold
p_threshold = val
def predictCpu(img):
if isWeightExists():
loadModel()
img, x = transformImage(img)
seg_pred, exist_pred = net(x)[:2]
img, lane_img = drawLane(img, seg_pred, exist_pred)
return img, lane_img
raise ValueError('The weight does not exists, please provide weight path in init()')
def predictGpu(img):
if isWeightExists():
if torch.cuda.is_available():
net.cuda()
loadModel()
img, x = transformImage(img)
seg_pred, exist_pred = net(x.cuda())[:2]
img, lane_img = drawLane(img, seg_pred, exist_pred)
return img, lane_img
raise ValueError('The Library unable to detect CUDA '
'on this device, please use CPU instead.')
raise ValueError('The weight does not exists, please provide weight path in init()')
def demo(url, gpu=False):
file = tempfile.gettempdir() + '/lane_demo.jpg'
downloadDemoFile(file, url)
img = cv2.imread(file)
if gpu:
return predictGpu(img)
else:
return predictCpu(img)
def downloadDemoFile(file, url):
urllib.request.urlretrieve(url, file)
def getAddWeight(img, lane_img):
return cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.)
def loadModel():
save_dict = torch.load(weightPath)
net.load_state_dict(save_dict['net'])
net.eval()
def transformImage(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = transform_img({'img': img})['img']
x = transform_to_net({'img': img})['img']
x.unsqueeze_(0)
return img, x
def drawLane(img, seg_pred, exist_pred):
seg_pred = seg_pred.detach().cpu().numpy()
exist_pred = exist_pred.detach().cpu().numpy()
seg_pred = seg_pred[0]
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
lane_img = np.zeros_like(img)
color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
coord_mask = np.argmax(seg_pred, axis=0)
for i in range(0, 4):
if exist_pred[0, i] > p_threshold:
lane_img[coord_mask == (i + 1)] = color[i]
return img, lane_img
def isWeightExists():
return os.path.isfile(weightPath)
|
PypiClean
|
/gamification-engine-0.4.0.tar.gz/gamification-engine-0.4.0/gengine/app/jsscripts/node_modules/handlebars/dist/handlebars.min.js
|
!function(a,b){"object"==typeof exports&&"object"==typeof module?module.exports=b():"function"==typeof define&&define.amd?define([],b):"object"==typeof exports?exports.Handlebars=b():a.Handlebars=b()}(this,function(){return function(a){function b(d){if(c[d])return c[d].exports;var e=c[d]={exports:{},id:d,loaded:!1};return a[d].call(e.exports,e,e.exports,b),e.loaded=!0,e.exports}var c={};return b.m=a,b.c=c,b.p="",b(0)}([function(a,b,c){"use strict";function d(){var a=r();return a.compile=function(b,c){return k.compile(b,c,a)},a.precompile=function(b,c){return k.precompile(b,c,a)},a.AST=i["default"],a.Compiler=k.Compiler,a.JavaScriptCompiler=m["default"],a.Parser=j.parser,a.parse=j.parse,a.parseWithoutProcessing=j.parseWithoutProcessing,a}var e=c(1)["default"];b.__esModule=!0;var f=c(2),g=e(f),h=c(45),i=e(h),j=c(46),k=c(51),l=c(52),m=e(l),n=c(49),o=e(n),p=c(44),q=e(p),r=g["default"].create,s=d();s.create=d,q["default"](s),s.Visitor=o["default"],s["default"]=s,b["default"]=s,a.exports=b["default"]},function(a,b){"use strict";b["default"]=function(a){return a&&a.__esModule?a:{"default":a}},b.__esModule=!0},function(a,b,c){"use strict";function d(){var a=new h.HandlebarsEnvironment;return n.extend(a,h),a.SafeString=j["default"],a.Exception=l["default"],a.Utils=n,a.escapeExpression=n.escapeExpression,a.VM=p,a.template=function(b){return p.template(b,a)},a}var e=c(3)["default"],f=c(1)["default"];b.__esModule=!0;var g=c(4),h=e(g),i=c(33),j=f(i),k=c(6),l=f(k),m=c(5),n=e(m),o=c(34),p=e(o),q=c(44),r=f(q),s=d();s.create=d,r["default"](s),s["default"]=s,b["default"]=s,a.exports=b["default"]},function(a,b){"use strict";b["default"]=function(a){if(a&&a.__esModule)return a;var b={};if(null!=a)for(var c in a)Object.prototype.hasOwnProperty.call(a,c)&&(b[c]=a[c]);return b["default"]=a,b},b.__esModule=!0},function(a,b,c){"use strict";function d(a,b,c){this.helpers=a||{},this.partials=b||{},this.decorators=c||{},i.registerDefaultHelpers(this),j.registerDefaultDecorators(this)}var e=c(1)["default"];b.__esModule=!0,b.HandlebarsEnvironment=d;var f=c(5),g=c(6),h=e(g),i=c(10),j=c(30),k=c(32),l=e(k),m="4.7.0";b.VERSION=m;var n=8;b.COMPILER_REVISION=n;var o=7;b.LAST_COMPATIBLE_COMPILER_REVISION=o;var p={1:"<= 1.0.rc.2",2:"== 1.0.0-rc.3",3:"== 1.0.0-rc.4",4:"== 1.x.x",5:"== 2.0.0-alpha.x",6:">= 2.0.0-beta.1",7:">= 4.0.0 <4.3.0",8:">= 4.3.0"};b.REVISION_CHANGES=p;var q="[object Object]";d.prototype={constructor:d,logger:l["default"],log:l["default"].log,registerHelper:function(a,b){if(f.toString.call(a)===q){if(b)throw new h["default"]("Arg not supported with multiple helpers");f.extend(this.helpers,a)}else this.helpers[a]=b},unregisterHelper:function(a){delete this.helpers[a]},registerPartial:function(a,b){if(f.toString.call(a)===q)f.extend(this.partials,a);else{if("undefined"==typeof b)throw new h["default"]('Attempting to register a partial called "'+a+'" as undefined');this.partials[a]=b}},unregisterPartial:function(a){delete this.partials[a]},registerDecorator:function(a,b){if(f.toString.call(a)===q){if(b)throw new h["default"]("Arg not supported with multiple decorators");f.extend(this.decorators,a)}else this.decorators[a]=b},unregisterDecorator:function(a){delete this.decorators[a]}};var r=l["default"].log;b.log=r,b.createFrame=f.createFrame,b.logger=l["default"]},function(a,b){"use strict";function c(a){return k[a]}function d(a){for(var b=1;b<arguments.length;b++)for(var c in arguments[b])Object.prototype.hasOwnProperty.call(arguments[b],c)&&(a[c]=arguments[b][c]);return a}function e(a,b){for(var c=0,d=a.length;c<d;c++)if(a[c]===b)return c;return-1}function f(a){if("string"!=typeof a){if(a&&a.toHTML)return a.toHTML();if(null==a)return"";if(!a)return a+"";a=""+a}return m.test(a)?a.replace(l,c):a}function g(a){return!a&&0!==a||!(!p(a)||0!==a.length)}function h(a){var b=d({},a);return b._parent=a,b}function i(a,b){return a.path=b,a}function j(a,b){return(a?a+".":"")+b}b.__esModule=!0,b.extend=d,b.indexOf=e,b.escapeExpression=f,b.isEmpty=g,b.createFrame=h,b.blockParams=i,b.appendContextPath=j;var k={"&":"&","<":"<",">":">",'"':""","'":"'","`":"`","=":"="},l=/[&<>"'`=]/g,m=/[&<>"'`=]/,n=Object.prototype.toString;b.toString=n;var o=function(a){return"function"==typeof a};o(/x/)&&(b.isFunction=o=function(a){return"function"==typeof a&&"[object Function]"===n.call(a)}),b.isFunction=o;var p=Array.isArray||function(a){return!(!a||"object"!=typeof a)&&"[object Array]"===n.call(a)};b.isArray=p},function(a,b,c){"use strict";function d(a,b){var c=b&&b.loc,g=void 0,h=void 0,i=void 0,j=void 0;c&&(g=c.start.line,h=c.end.line,i=c.start.column,j=c.end.column,a+=" - "+g+":"+i);for(var k=Error.prototype.constructor.call(this,a),l=0;l<f.length;l++)this[f[l]]=k[f[l]];Error.captureStackTrace&&Error.captureStackTrace(this,d);try{c&&(this.lineNumber=g,this.endLineNumber=h,e?(Object.defineProperty(this,"column",{value:i,enumerable:!0}),Object.defineProperty(this,"endColumn",{value:j,enumerable:!0})):(this.column=i,this.endColumn=j))}catch(m){}}var e=c(7)["default"];b.__esModule=!0;var f=["description","fileName","lineNumber","endLineNumber","message","name","number","stack"];d.prototype=new Error,b["default"]=d,a.exports=b["default"]},function(a,b,c){a.exports={"default":c(8),__esModule:!0}},function(a,b,c){var d=c(9);a.exports=function(a,b,c){return d.setDesc(a,b,c)}},function(a,b){var c=Object;a.exports={create:c.create,getProto:c.getPrototypeOf,isEnum:{}.propertyIsEnumerable,getDesc:c.getOwnPropertyDescriptor,setDesc:c.defineProperty,setDescs:c.defineProperties,getKeys:c.keys,getNames:c.getOwnPropertyNames,getSymbols:c.getOwnPropertySymbols,each:[].forEach}},function(a,b,c){"use strict";function d(a){h["default"](a),j["default"](a),l["default"](a),n["default"](a),p["default"](a),r["default"](a),t["default"](a)}function e(a,b,c){a.helpers[b]&&(a.hooks[b]=a.helpers[b],c||delete a.helpers[b])}var f=c(1)["default"];b.__esModule=!0,b.registerDefaultHelpers=d,b.moveHelperToHooks=e;var g=c(11),h=f(g),i=c(12),j=f(i),k=c(25),l=f(k),m=c(26),n=f(m),o=c(27),p=f(o),q=c(28),r=f(q),s=c(29),t=f(s)},function(a,b,c){"use strict";b.__esModule=!0;var d=c(5);b["default"]=function(a){a.registerHelper("blockHelperMissing",function(b,c){var e=c.inverse,f=c.fn;if(b===!0)return f(this);if(b===!1||null==b)return e(this);if(d.isArray(b))return b.length>0?(c.ids&&(c.ids=[c.name]),a.helpers.each(b,c)):e(this);if(c.data&&c.ids){var g=d.createFrame(c.data);g.contextPath=d.appendContextPath(c.data.contextPath,c.name),c={data:g}}return f(b,c)})},a.exports=b["default"]},function(a,b,c){(function(d){"use strict";var e=c(13)["default"],f=c(1)["default"];b.__esModule=!0;var g=c(5),h=c(6),i=f(h);b["default"]=function(a){a.registerHelper("each",function(a,b){function c(b,c,d){l&&(l.key=b,l.index=c,l.first=0===c,l.last=!!d,m&&(l.contextPath=m+b)),k+=f(a[b],{data:l,blockParams:g.blockParams([a[b],b],[m+b,null])})}if(!b)throw new i["default"]("Must pass iterator to #each");var f=b.fn,h=b.inverse,j=0,k="",l=void 0,m=void 0;if(b.data&&b.ids&&(m=g.appendContextPath(b.data.contextPath,b.ids[0])+"."),g.isFunction(a)&&(a=a.call(this)),b.data&&(l=g.createFrame(b.data)),a&&"object"==typeof a)if(g.isArray(a))for(var n=a.length;j<n;j++)j in a&&c(j,j,j===a.length-1);else if(d.Symbol&&a[d.Symbol.iterator]){for(var o=[],p=a[d.Symbol.iterator](),q=p.next();!q.done;q=p.next())o.push(q.value);a=o;for(var n=a.length;j<n;j++)c(j,j,j===a.length-1)}else!function(){var b=void 0;e(a).forEach(function(a){void 0!==b&&c(b,j-1),b=a,j++}),void 0!==b&&c(b,j-1,!0)}();return 0===j&&(k=h(this)),k})},a.exports=b["default"]}).call(b,function(){return this}())},function(a,b,c){a.exports={"default":c(14),__esModule:!0}},function(a,b,c){c(15),a.exports=c(21).Object.keys},function(a,b,c){var d=c(16);c(18)("keys",function(a){return function(b){return a(d(b))}})},function(a,b,c){var d=c(17);a.exports=function(a){return Object(d(a))}},function(a,b){a.exports=function(a){if(void 0==a)throw TypeError("Can't call method on "+a);return a}},function(a,b,c){var d=c(19),e=c(21),f=c(24);a.exports=function(a,b){var c=(e.Object||{})[a]||Object[a],g={};g[a]=b(c),d(d.S+d.F*f(function(){c(1)}),"Object",g)}},function(a,b,c){var d=c(20),e=c(21),f=c(22),g="prototype",h=function(a,b,c){var i,j,k,l=a&h.F,m=a&h.G,n=a&h.S,o=a&h.P,p=a&h.B,q=a&h.W,r=m?e:e[b]||(e[b]={}),s=m?d:n?d[b]:(d[b]||{})[g];m&&(c=b);for(i in c)j=!l&&s&&i in s,j&&i in r||(k=j?s[i]:c[i],r[i]=m&&"function"!=typeof s[i]?c[i]:p&&j?f(k,d):q&&s[i]==k?function(a){var b=function(b){return this instanceof a?new a(b):a(b)};return b[g]=a[g],b}(k):o&&"function"==typeof k?f(Function.call,k):k,o&&((r[g]||(r[g]={}))[i]=k))};h.F=1,h.G=2,h.S=4,h.P=8,h.B=16,h.W=32,a.exports=h},function(a,b){var c=a.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=c)},function(a,b){var c=a.exports={version:"1.2.6"};"number"==typeof __e&&(__e=c)},function(a,b,c){var d=c(23);a.exports=function(a,b,c){if(d(a),void 0===b)return a;switch(c){case 1:return function(c){return a.call(b,c)};case 2:return function(c,d){return a.call(b,c,d)};case 3:return function(c,d,e){return a.call(b,c,d,e)}}return function(){return a.apply(b,arguments)}}},function(a,b){a.exports=function(a){if("function"!=typeof a)throw TypeError(a+" is not a function!");return a}},function(a,b){a.exports=function(a){try{return!!a()}catch(b){return!0}}},function(a,b,c){"use strict";var d=c(1)["default"];b.__esModule=!0;var e=c(6),f=d(e);b["default"]=function(a){a.registerHelper("helperMissing",function(){if(1!==arguments.length)throw new f["default"]('Missing helper: "'+arguments[arguments.length-1].name+'"')})},a.exports=b["default"]},function(a,b,c){"use strict";var d=c(1)["default"];b.__esModule=!0;var e=c(5),f=c(6),g=d(f);b["default"]=function(a){a.registerHelper("if",function(a,b){if(2!=arguments.length)throw new g["default"]("#if requires exactly one argument");return e.isFunction(a)&&(a=a.call(this)),!b.hash.includeZero&&!a||e.isEmpty(a)?b.inverse(this):b.fn(this)}),a.registerHelper("unless",function(b,c){if(2!=arguments.length)throw new g["default"]("#unless requires exactly one argument");return a.helpers["if"].call(this,b,{fn:c.inverse,inverse:c.fn,hash:c.hash})})},a.exports=b["default"]},function(a,b){"use strict";b.__esModule=!0,b["default"]=function(a){a.registerHelper("log",function(){for(var b=[void 0],c=arguments[arguments.length-1],d=0;d<arguments.length-1;d++)b.push(arguments[d]);var e=1;null!=c.hash.level?e=c.hash.level:c.data&&null!=c.data.level&&(e=c.data.level),b[0]=e,a.log.apply(a,b)})},a.exports=b["default"]},function(a,b){"use strict";b.__esModule=!0,b["default"]=function(a){a.registerHelper("lookup",function(a,b,c){return a?c.lookupProperty(a,b):a})},a.exports=b["default"]},function(a,b,c){"use strict";var d=c(1)["default"];b.__esModule=!0;var e=c(5),f=c(6),g=d(f);b["default"]=function(a){a.registerHelper("with",function(a,b){if(2!=arguments.length)throw new g["default"]("#with requires exactly one argument");e.isFunction(a)&&(a=a.call(this));var c=b.fn;if(e.isEmpty(a))return b.inverse(this);var d=b.data;return b.data&&b.ids&&(d=e.createFrame(b.data),d.contextPath=e.appendContextPath(b.data.contextPath,b.ids[0])),c(a,{data:d,blockParams:e.blockParams([a],[d&&d.contextPath])})})},a.exports=b["default"]},function(a,b,c){"use strict";function d(a){g["default"](a)}var e=c(1)["default"];b.__esModule=!0,b.registerDefaultDecorators=d;var f=c(31),g=e(f)},function(a,b,c){"use strict";b.__esModule=!0;var d=c(5);b["default"]=function(a){a.registerDecorator("inline",function(a,b,c,e){var f=a;return b.partials||(b.partials={},f=function(e,f){var g=c.partials;c.partials=d.extend({},g,b.partials);var h=a(e,f);return c.partials=g,h}),b.partials[e.args[0]]=e.fn,f})},a.exports=b["default"]},function(a,b,c){"use strict";b.__esModule=!0;var d=c(5),e={methodMap:["debug","info","warn","error"],level:"info",lookupLevel:function(a){if("string"==typeof a){var b=d.indexOf(e.methodMap,a.toLowerCase());a=b>=0?b:parseInt(a,10)}return a},log:function(a){if(a=e.lookupLevel(a),"undefined"!=typeof console&&e.lookupLevel(e.level)<=a){var b=e.methodMap[a];console[b]||(b="log");for(var c=arguments.length,d=Array(c>1?c-1:0),f=1;f<c;f++)d[f-1]=arguments[f];console[b].apply(console,d)}}};b["default"]=e,a.exports=b["default"]},function(a,b){"use strict";function c(a){this.string=a}b.__esModule=!0,c.prototype.toString=c.prototype.toHTML=function(){return""+this.string},b["default"]=c,a.exports=b["default"]},function(a,b,c){"use strict";function d(a){var b=a&&a[0]||1,c=v.COMPILER_REVISION;if(!(b>=v.LAST_COMPATIBLE_COMPILER_REVISION&&b<=v.COMPILER_REVISION)){if(b<v.LAST_COMPATIBLE_COMPILER_REVISION){var d=v.REVISION_CHANGES[c],e=v.REVISION_CHANGES[b];throw new u["default"]("Template was precompiled with an older version of Handlebars than the current runtime. Please update your precompiler to a newer version ("+d+") or downgrade your runtime to an older version ("+e+").")}throw new u["default"]("Template was precompiled with a newer version of Handlebars than the current runtime. Please update your runtime to a newer version ("+a[1]+").")}}function e(a,b){function c(c,d,e){e.hash&&(d=s.extend({},d,e.hash),e.ids&&(e.ids[0]=!0)),c=b.VM.resolvePartial.call(this,c,d,e);var f=s.extend({},e,{hooks:this.hooks,protoAccessControl:this.protoAccessControl}),g=b.VM.invokePartial.call(this,c,d,f);if(null==g&&b.compile&&(e.partials[e.name]=b.compile(c,a.compilerOptions,b),g=e.partials[e.name](d,f)),null!=g){if(e.indent){for(var h=g.split("\n"),i=0,j=h.length;i<j&&(h[i]||i+1!==j);i++)h[i]=e.indent+h[i];g=h.join("\n")}return g}throw new u["default"]("The partial "+e.name+" could not be compiled when running in runtime-only mode")}function d(b){function c(b){return""+a.main(g,b,g.helpers,g.partials,f,i,h)}var e=arguments.length<=1||void 0===arguments[1]?{}:arguments[1],f=e.data;d._setup(e),!e.partial&&a.useData&&(f=j(b,f));var h=void 0,i=a.useBlockParams?[]:void 0;return a.useDepths&&(h=e.depths?b!=e.depths[0]?[b].concat(e.depths):e.depths:[b]),(c=k(a.main,c,g,e.depths||[],f,i))(b,e)}if(!b)throw new u["default"]("No environment passed to template");if(!a||!a.main)throw new u["default"]("Unknown template object: "+typeof a);a.main.decorator=a.main_d,b.VM.checkRevision(a.compiler);var e=a.compiler&&7===a.compiler[0],g={strict:function(a,b,c){if(!(a&&b in a))throw new u["default"]('"'+b+'" not defined in '+a,{loc:c});return a[b]},lookupProperty:function(a,b){var c=a[b];return null==c?c:Object.prototype.hasOwnProperty.call(a,b)?c:y.resultIsAllowed(c,g.protoAccessControl,b)?c:void 0},lookup:function(a,b){for(var c=a.length,d=0;d<c;d++){var e=a[d]&&g.lookupProperty(a[d],b);if(null!=e)return a[d][b]}},lambda:function(a,b){return"function"==typeof a?a.call(b):a},escapeExpression:s.escapeExpression,invokePartial:c,fn:function(b){var c=a[b];return c.decorator=a[b+"_d"],c},programs:[],program:function(a,b,c,d,e){var g=this.programs[a],h=this.fn(a);return b||e||d||c?g=f(this,a,h,b,c,d,e):g||(g=this.programs[a]=f(this,a,h)),g},data:function(a,b){for(;a&&b--;)a=a._parent;return a},mergeIfNeeded:function(a,b){var c=a||b;return a&&b&&a!==b&&(c=s.extend({},b,a)),c},nullContext:n({}),noop:b.VM.noop,compilerInfo:a.compiler};return d.isTop=!0,d._setup=function(c){if(c.partial)g.protoAccessControl=c.protoAccessControl,g.helpers=c.helpers,g.partials=c.partials,g.decorators=c.decorators,g.hooks=c.hooks;else{var d=s.extend({},b.helpers,c.helpers);l(d,g),g.helpers=d,a.usePartial&&(g.partials=g.mergeIfNeeded(c.partials,b.partials)),(a.usePartial||a.useDecorators)&&(g.decorators=s.extend({},b.decorators,c.decorators)),g.hooks={},g.protoAccessControl=y.createProtoAccessControl(c);var f=c.allowCallsToHelperMissing||e;w.moveHelperToHooks(g,"helperMissing",f),w.moveHelperToHooks(g,"blockHelperMissing",f)}},d._child=function(b,c,d,e){if(a.useBlockParams&&!d)throw new u["default"]("must pass block params");if(a.useDepths&&!e)throw new u["default"]("must pass parent depths");return f(g,b,a[b],c,0,d,e)},d}function f(a,b,c,d,e,f,g){function h(b){var e=arguments.length<=1||void 0===arguments[1]?{}:arguments[1],h=g;return!g||b==g[0]||b===a.nullContext&&null===g[0]||(h=[b].concat(g)),c(a,b,a.helpers,a.partials,e.data||d,f&&[e.blockParams].concat(f),h)}return h=k(c,h,a,g,d,f),h.program=b,h.depth=g?g.length:0,h.blockParams=e||0,h}function g(a,b,c){return a?a.call||c.name||(c.name=a,a=c.partials[a]):a="@partial-block"===c.name?c.data["partial-block"]:c.partials[c.name],a}function h(a,b,c){var d=c.data&&c.data["partial-block"];c.partial=!0,c.ids&&(c.data.contextPath=c.ids[0]||c.data.contextPath);var e=void 0;if(c.fn&&c.fn!==i&&!function(){c.data=v.createFrame(c.data);var a=c.fn;e=c.data["partial-block"]=function(b){var c=arguments.length<=1||void 0===arguments[1]?{}:arguments[1];return c.data=v.createFrame(c.data),c.data["partial-block"]=d,a(b,c)},a.partials&&(c.partials=s.extend({},c.partials,a.partials))}(),void 0===a&&e&&(a=e),void 0===a)throw new u["default"]("The partial "+c.name+" could not be found");if(a instanceof Function)return a(b,c)}function i(){return""}function j(a,b){return b&&"root"in b||(b=b?v.createFrame(b):{},b.root=a),b}function k(a,b,c,d,e,f){if(a.decorator){var g={};b=a.decorator(b,g,c,d&&d[0],e,f,d),s.extend(b,g)}return b}function l(a,b){o(a).forEach(function(c){var d=a[c];a[c]=m(d,b)})}function m(a,b){var c=b.lookupProperty;return x.wrapHelper(a,function(a){return s.extend({lookupProperty:c},a)})}var n=c(35)["default"],o=c(13)["default"],p=c(3)["default"],q=c(1)["default"];b.__esModule=!0,b.checkRevision=d,b.template=e,b.wrapProgram=f,b.resolvePartial=g,b.invokePartial=h,b.noop=i;var r=c(5),s=p(r),t=c(6),u=q(t),v=c(4),w=c(10),x=c(39),y=c(40)},function(a,b,c){a.exports={"default":c(36),__esModule:!0}},function(a,b,c){c(37),a.exports=c(21).Object.seal},function(a,b,c){var d=c(38);c(18)("seal",function(a){return function(b){return a&&d(b)?a(b):b}})},function(a,b){a.exports=function(a){return"object"==typeof a?null!==a:"function"==typeof a}},function(a,b){"use strict";function c(a,b){var c=function(){var c=arguments[arguments.length-1];return arguments[arguments.length-1]=b(c),a.apply(this,arguments)};return c}b.__esModule=!0,b.wrapHelper=c},function(a,b,c){"use strict";function d(a){var b=g(null);b.constructor=!1,b.__defineGetter__=!1,b.__defineSetter__=!1,b.__lookupGetter__=!1;var c=g(null);return c.__proto__=!1,{properties:{whitelist:i.createNewLookupObject(c,a.allowedProtoProperties),defaultValue:a.allowProtoPropertiesByDefault},methods:{whitelist:i.createNewLookupObject(b,a.allowedProtoMethods),defaultValue:a.allowProtoMethodsByDefault}}}function e(a,b,c){return"function"==typeof a?f(b.methods,c):f(b.properties,c)}function f(a,b){return void 0!==a.whitelist[b]?a.whitelist[b]===!0:void 0!==a.defaultValue?a.defaultValue:(k.log("error",'Handlebars: Access has been denied to resolve the property "'+b+'" because it is not an "own property" of its parent.\nYou can add a runtime option to disable the check or this warning:\nSee http://localhost:8080/api-reference/runtime-options.html#options-to-control-prototype-access for details'),!1)}var g=c(41)["default"],h=c(3)["default"];b.__esModule=!0,b.createProtoAccessControl=d,b.resultIsAllowed=e;var i=c(43),j=c(32),k=h(j)},function(a,b,c){a.exports={"default":c(42),__esModule:!0}},function(a,b,c){var d=c(9);a.exports=function(a,b){return d.create(a,b)}},function(a,b,c){"use strict";function d(){for(var a=arguments.length,b=Array(a),c=0;c<a;c++)b[c]=arguments[c];return f.extend.apply(void 0,[e(null)].concat(b))}var e=c(41)["default"];b.__esModule=!0,b.createNewLookupObject=d;var f=c(5)},function(a,b){(function(c){"use strict";b.__esModule=!0,b["default"]=function(a){var b="undefined"!=typeof c?c:window,d=b.Handlebars;a.noConflict=function(){return b.Handlebars===a&&(b.Handlebars=d),a}},a.exports=b["default"]}).call(b,function(){return this}())},function(a,b){"use strict";b.__esModule=!0;var c={helpers:{helperExpression:function(a){return"SubExpression"===a.type||("MustacheStatement"===a.type||"BlockStatement"===a.type)&&!!(a.params&&a.params.length||a.hash)},scopedId:function(a){return/^\.|this\b/.test(a.original)},simpleId:function(a){return 1===a.parts.length&&!c.helpers.scopedId(a)&&!a.depth}}};b["default"]=c,a.exports=b["default"]},function(a,b,c){"use strict";function d(a,b){if("Program"===a.type)return a;i["default"].yy=o,o.locInfo=function(a){return new o.SourceLocation(b&&b.srcName,a)};var c=i["default"].parse(a);return c}function e(a,b){var c=d(a,b),e=new k["default"](b);return e.accept(c)}var f=c(1)["default"],g=c(3)["default"];b.__esModule=!0,b.parseWithoutProcessing=d,b.parse=e;var h=c(47),i=f(h),j=c(48),k=f(j),l=c(50),m=g(l),n=c(5);b.parser=i["default"];var o={};n.extend(o,m)},function(a,b){"use strict";b.__esModule=!0;var c=function(){function a(){this.yy={}}var b={trace:function(){},yy:{},symbols_:{error:2,root:3,program:4,EOF:5,program_repetition0:6,statement:7,mustache:8,block:9,rawBlock:10,partial:11,partialBlock:12,content:13,COMMENT:14,CONTENT:15,openRawBlock:16,rawBlock_repetition0:17,END_RAW_BLOCK:18,OPEN_RAW_BLOCK:19,helperName:20,openRawBlock_repetition0:21,openRawBlock_option0:22,CLOSE_RAW_BLOCK:23,openBlock:24,block_option0:25,closeBlock:26,openInverse:27,block_option1:28,OPEN_BLOCK:29,openBlock_repetition0:30,openBlock_option0:31,openBlock_option1:32,CLOSE:33,OPEN_INVERSE:34,openInverse_repetition0:35,openInverse_option0:36,openInverse_option1:37,openInverseChain:38,OPEN_INVERSE_CHAIN:39,openInverseChain_repetition0:40,openInverseChain_option0:41,openInverseChain_option1:42,inverseAndProgram:43,INVERSE:44,inverseChain:45,inverseChain_option0:46,OPEN_ENDBLOCK:47,OPEN:48,mustache_repetition0:49,mustache_option0:50,OPEN_UNESCAPED:51,mustache_repetition1:52,mustache_option1:53,CLOSE_UNESCAPED:54,OPEN_PARTIAL:55,partialName:56,partial_repetition0:57,partial_option0:58,openPartialBlock:59,OPEN_PARTIAL_BLOCK:60,openPartialBlock_repetition0:61,openPartialBlock_option0:62,param:63,sexpr:64,OPEN_SEXPR:65,sexpr_repetition0:66,sexpr_option0:67,CLOSE_SEXPR:68,hash:69,hash_repetition_plus0:70,hashSegment:71,ID:72,EQUALS:73,blockParams:74,OPEN_BLOCK_PARAMS:75,blockParams_repetition_plus0:76,CLOSE_BLOCK_PARAMS:77,path:78,dataName:79,STRING:80,NUMBER:81,BOOLEAN:82,UNDEFINED:83,NULL:84,DATA:85,pathSegments:86,SEP:87,$accept:0,$end:1},terminals_:{2:"error",5:"EOF",14:"COMMENT",15:"CONTENT",18:"END_RAW_BLOCK",19:"OPEN_RAW_BLOCK",23:"CLOSE_RAW_BLOCK",29:"OPEN_BLOCK",33:"CLOSE",34:"OPEN_INVERSE",39:"OPEN_INVERSE_CHAIN",44:"INVERSE",47:"OPEN_ENDBLOCK",48:"OPEN",51:"OPEN_UNESCAPED",54:"CLOSE_UNESCAPED",55:"OPEN_PARTIAL",60:"OPEN_PARTIAL_BLOCK",65:"OPEN_SEXPR",68:"CLOSE_SEXPR",72:"ID",73:"EQUALS",75:"OPEN_BLOCK_PARAMS",77:"CLOSE_BLOCK_PARAMS",80:"STRING",81:"NUMBER",82:"BOOLEAN",83:"UNDEFINED",84:"NULL",85:"DATA",87:"SEP"},productions_:[0,[3,2],[4,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[13,1],[10,3],[16,5],[9,4],[9,4],[24,6],[27,6],[38,6],[43,2],[45,3],[45,1],[26,3],[8,5],[8,5],[11,5],[12,3],[59,5],[63,1],[63,1],[64,5],[69,1],[71,3],[74,3],[20,1],[20,1],[20,1],[20,1],[20,1],[20,1],[20,1],[56,1],[56,1],[79,2],[78,1],[86,3],[86,1],[6,0],[6,2],[17,0],[17,2],[21,0],[21,2],[22,0],[22,1],[25,0],[25,1],[28,0],[28,1],[30,0],[30,2],[31,0],[31,1],[32,0],[32,1],[35,0],[35,2],[36,0],[36,1],[37,0],[37,1],[40,0],[40,2],[41,0],[41,1],[42,0],[42,1],[46,0],[46,1],[49,0],[49,2],[50,0],[50,1],[52,0],[52,2],[53,0],[53,1],[57,0],[57,2],[58,0],[58,1],[61,0],[61,2],[62,0],[62,1],[66,0],[66,2],[67,0],[67,1],[70,1],[70,2],[76,1],[76,2]],performAction:function(a,b,c,d,e,f,g){var h=f.length-1;switch(e){case 1:return f[h-1];case 2:this.$=d.prepareProgram(f[h]);break;case 3:this.$=f[h];break;case 4:this.$=f[h];break;case 5:this.$=f[h];break;case 6:this.$=f[h];break;case 7:this.$=f[h];break;case 8:this.$=f[h];break;case 9:this.$={type:"CommentStatement",value:d.stripComment(f[h]),strip:d.stripFlags(f[h],f[h]),loc:d.locInfo(this._$)};break;case 10:this.$={type:"ContentStatement",original:f[h],value:f[h],loc:d.locInfo(this._$)};break;case 11:this.$=d.prepareRawBlock(f[h-2],f[h-1],f[h],this._$);break;case 12:this.$={path:f[h-3],params:f[h-2],hash:f[h-1]};break;case 13:this.$=d.prepareBlock(f[h-3],f[h-2],f[h-1],f[h],!1,this._$);break;case 14:this.$=d.prepareBlock(f[h-3],f[h-2],f[h-1],f[h],!0,this._$);break;case 15:this.$={open:f[h-5],path:f[h-4],params:f[h-3],hash:f[h-2],blockParams:f[h-1],strip:d.stripFlags(f[h-5],f[h])};break;case 16:this.$={path:f[h-4],params:f[h-3],hash:f[h-2],blockParams:f[h-1],strip:d.stripFlags(f[h-5],f[h])};break;case 17:this.$={path:f[h-4],params:f[h-3],hash:f[h-2],blockParams:f[h-1],strip:d.stripFlags(f[h-5],f[h])};break;case 18:this.$={strip:d.stripFlags(f[h-1],f[h-1]),program:f[h]};break;case 19:var i=d.prepareBlock(f[h-2],f[h-1],f[h],f[h],!1,this._$),j=d.prepareProgram([i],f[h-1].loc);j.chained=!0,this.$={strip:f[h-2].strip,program:j,chain:!0};break;case 20:this.$=f[h];break;case 21:this.$={path:f[h-1],strip:d.stripFlags(f[h-2],f[h])};break;case 22:this.$=d.prepareMustache(f[h-3],f[h-2],f[h-1],f[h-4],d.stripFlags(f[h-4],f[h]),this._$);break;case 23:this.$=d.prepareMustache(f[h-3],f[h-2],f[h-1],f[h-4],d.stripFlags(f[h-4],f[h]),this._$);break;case 24:this.$={type:"PartialStatement",name:f[h-3],params:f[h-2],hash:f[h-1],indent:"",strip:d.stripFlags(f[h-4],f[h]),loc:d.locInfo(this._$)};break;case 25:this.$=d.preparePartialBlock(f[h-2],f[h-1],f[h],this._$);break;case 26:this.$={path:f[h-3],params:f[h-2],hash:f[h-1],strip:d.stripFlags(f[h-4],f[h])};break;case 27:this.$=f[h];break;case 28:this.$=f[h];break;case 29:this.$={type:"SubExpression",path:f[h-3],params:f[h-2],hash:f[h-1],loc:d.locInfo(this._$)};break;case 30:this.$={type:"Hash",pairs:f[h],loc:d.locInfo(this._$)};break;case 31:this.$={type:"HashPair",key:d.id(f[h-2]),value:f[h],loc:d.locInfo(this._$)};break;case 32:this.$=d.id(f[h-1]);break;case 33:this.$=f[h];break;case 34:this.$=f[h];break;case 35:this.$={type:"StringLiteral",value:f[h],original:f[h],loc:d.locInfo(this._$)};break;case 36:this.$={type:"NumberLiteral",value:Number(f[h]),original:Number(f[h]),loc:d.locInfo(this._$)};break;case 37:this.$={type:"BooleanLiteral",value:"true"===f[h],original:"true"===f[h],loc:d.locInfo(this._$)};break;case 38:this.$={type:"UndefinedLiteral",original:void 0,value:void 0,loc:d.locInfo(this._$)};break;case 39:this.$={type:"NullLiteral",original:null,value:null,loc:d.locInfo(this._$)};break;case 40:this.$=f[h];break;case 41:this.$=f[h];break;case 42:this.$=d.preparePath(!0,f[h],this._$);break;case 43:this.$=d.preparePath(!1,f[h],this._$);break;case 44:f[h-2].push({part:d.id(f[h]),original:f[h],separator:f[h-1]}),this.$=f[h-2];break;case 45:this.$=[{part:d.id(f[h]),original:f[h]}];break;case 46:this.$=[];break;case 47:f[h-1].push(f[h]);break;case 48:this.$=[];break;case 49:f[h-1].push(f[h]);break;case 50:this.$=[];break;case 51:f[h-1].push(f[h]);break;case 58:this.$=[];break;case 59:f[h-1].push(f[h]);break;case 64:this.$=[];break;case 65:f[h-1].push(f[h]);break;case 70:this.$=[];break;case 71:f[h-1].push(f[h]);break;case 78:this.$=[];break;case 79:f[h-1].push(f[h]);break;case 82:this.$=[];break;case 83:f[h-1].push(f[h]);break;case 86:this.$=[];break;case 87:f[h-1].push(f[h]);break;case 90:this.$=[];break;case 91:f[h-1].push(f[h]);break;case 94:this.$=[];break;case 95:f[h-1].push(f[h]);break;case 98:this.$=[f[h]];break;case 99:f[h-1].push(f[h]);break;case 100:this.$=[f[h]];break;case 101:f[h-1].push(f[h])}},table:[{3:1,4:2,5:[2,46],6:3,14:[2,46],15:[2,46],19:[2,46],29:[2,46],34:[2,46],48:[2,46],51:[2,46],55:[2,46],60:[2,46]},{1:[3]},{5:[1,4]},{5:[2,2],7:5,8:6,9:7,10:8,11:9,12:10,13:11,14:[1,12],15:[1,20],16:17,19:[1,23],24:15,27:16,29:[1,21],34:[1,22],39:[2,2],44:[2,2],47:[2,2],48:[1,13],51:[1,14],55:[1,18],59:19,60:[1,24]},{1:[2,1]},{5:[2,47],14:[2,47],15:[2,47],19:[2,47],29:[2,47],34:[2,47],39:[2,47],44:[2,47],47:[2,47],48:[2,47],51:[2,47],55:[2,47],60:[2,47]},{5:[2,3],14:[2,3],15:[2,3],19:[2,3],29:[2,3],34:[2,3],39:[2,3],44:[2,3],47:[2,3],48:[2,3],51:[2,3],55:[2,3],60:[2,3]},{5:[2,4],14:[2,4],15:[2,4],19:[2,4],29:[2,4],34:[2,4],39:[2,4],44:[2,4],47:[2,4],48:[2,4],51:[2,4],55:[2,4],60:[2,4]},{5:[2,5],14:[2,5],15:[2,5],19:[2,5],29:[2,5],34:[2,5],39:[2,5],44:[2,5],47:[2,5],48:[2,5],51:[2,5],55:[2,5],60:[2,5]},{5:[2,6],14:[2,6],15:[2,6],19:[2,6],29:[2,6],34:[2,6],39:[2,6],44:[2,6],47:[2,6],48:[2,6],51:[2,6],55:[2,6],60:[2,6]},{5:[2,7],14:[2,7],15:[2,7],19:[2,7],29:[2,7],34:[2,7],39:[2,7],44:[2,7],47:[2,7],48:[2,7],51:[2,7],55:[2,7],60:[2,7]},{5:[2,8],14:[2,8],15:[2,8],19:[2,8],29:[2,8],34:[2,8],39:[2,8],44:[2,8],47:[2,8],48:[2,8],51:[2,8],55:[2,8],60:[2,8]},{5:[2,9],14:[2,9],15:[2,9],19:[2,9],29:[2,9],34:[2,9],39:[2,9],44:[2,9],47:[2,9],48:[2,9],51:[2,9],55:[2,9],60:[2,9]},{20:25,72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{20:36,72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{4:37,6:3,14:[2,46],15:[2,46],19:[2,46],29:[2,46],34:[2,46],39:[2,46],44:[2,46],47:[2,46],48:[2,46],51:[2,46],55:[2,46],60:[2,46]},{4:38,6:3,14:[2,46],15:[2,46],19:[2,46],29:[2,46],34:[2,46],44:[2,46],47:[2,46],48:[2,46],51:[2,46],55:[2,46],60:[2,46]},{15:[2,48],17:39,18:[2,48]},{20:41,56:40,64:42,65:[1,43],72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{4:44,6:3,14:[2,46],15:[2,46],19:[2,46],29:[2,46],34:[2,46],47:[2,46],48:[2,46],51:[2,46],55:[2,46],60:[2,46]},{5:[2,10],14:[2,10],15:[2,10],18:[2,10],19:[2,10],29:[2,10],34:[2,10],39:[2,10],44:[2,10],47:[2,10],48:[2,10],51:[2,10],55:[2,10],60:[2,10]},{20:45,72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{20:46,72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{20:47,72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{20:41,56:48,64:42,65:[1,43],72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{33:[2,78],49:49,65:[2,78],72:[2,78],80:[2,78],81:[2,78],82:[2,78],83:[2,78],84:[2,78],85:[2,78]},{23:[2,33],33:[2,33],54:[2,33],65:[2,33],68:[2,33],72:[2,33],75:[2,33],80:[2,33],81:[2,33],82:[2,33],83:[2,33],84:[2,33],85:[2,33]},{23:[2,34],33:[2,34],54:[2,34],65:[2,34],68:[2,34],72:[2,34],75:[2,34],80:[2,34],81:[2,34],82:[2,34],83:[2,34],84:[2,34],85:[2,34]},{23:[2,35],33:[2,35],54:[2,35],65:[2,35],68:[2,35],72:[2,35],75:[2,35],80:[2,35],81:[2,35],82:[2,35],83:[2,35],84:[2,35],85:[2,35]},{23:[2,36],33:[2,36],54:[2,36],65:[2,36],68:[2,36],72:[2,36],75:[2,36],80:[2,36],81:[2,36],82:[2,36],83:[2,36],84:[2,36],85:[2,36]},{23:[2,37],33:[2,37],54:[2,37],65:[2,37],68:[2,37],72:[2,37],75:[2,37],80:[2,37],81:[2,37],82:[2,37],83:[2,37],84:[2,37],85:[2,37]},{23:[2,38],33:[2,38],54:[2,38],65:[2,38],68:[2,38],72:[2,38],75:[2,38],80:[2,38],81:[2,38],82:[2,38],83:[2,38],84:[2,38],85:[2,38]},{23:[2,39],33:[2,39],54:[2,39],65:[2,39],68:[2,39],72:[2,39],75:[2,39],80:[2,39],81:[2,39],82:[2,39],83:[2,39],84:[2,39],85:[2,39]},{23:[2,43],33:[2,43],54:[2,43],65:[2,43],68:[2,43],72:[2,43],75:[2,43],80:[2,43],81:[2,43],82:[2,43],83:[2,43],84:[2,43],85:[2,43],87:[1,50]},{72:[1,35],86:51},{23:[2,45],33:[2,45],54:[2,45],65:[2,45],68:[2,45],72:[2,45],75:[2,45],80:[2,45],81:[2,45],82:[2,45],83:[2,45],84:[2,45],85:[2,45],87:[2,45]},{52:52,54:[2,82],65:[2,82],72:[2,82],80:[2,82],81:[2,82],82:[2,82],83:[2,82],84:[2,82],85:[2,82]},{25:53,38:55,39:[1,57],43:56,44:[1,58],45:54,47:[2,54]},{28:59,43:60,44:[1,58],47:[2,56]},{13:62,15:[1,20],18:[1,61]},{33:[2,86],57:63,65:[2,86],72:[2,86],80:[2,86],81:[2,86],82:[2,86],83:[2,86],84:[2,86],85:[2,86]},{33:[2,40],65:[2,40],72:[2,40],80:[2,40],81:[2,40],82:[2,40],83:[2,40],84:[2,40],85:[2,40]},{33:[2,41],65:[2,41],72:[2,41],80:[2,41],81:[2,41],82:[2,41],83:[2,41],84:[2,41],85:[2,41]},{20:64,72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{26:65,47:[1,66]},{30:67,33:[2,58],65:[2,58],72:[2,58],75:[2,58],80:[2,58],81:[2,58],
82:[2,58],83:[2,58],84:[2,58],85:[2,58]},{33:[2,64],35:68,65:[2,64],72:[2,64],75:[2,64],80:[2,64],81:[2,64],82:[2,64],83:[2,64],84:[2,64],85:[2,64]},{21:69,23:[2,50],65:[2,50],72:[2,50],80:[2,50],81:[2,50],82:[2,50],83:[2,50],84:[2,50],85:[2,50]},{33:[2,90],61:70,65:[2,90],72:[2,90],80:[2,90],81:[2,90],82:[2,90],83:[2,90],84:[2,90],85:[2,90]},{20:74,33:[2,80],50:71,63:72,64:75,65:[1,43],69:73,70:76,71:77,72:[1,78],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{72:[1,79]},{23:[2,42],33:[2,42],54:[2,42],65:[2,42],68:[2,42],72:[2,42],75:[2,42],80:[2,42],81:[2,42],82:[2,42],83:[2,42],84:[2,42],85:[2,42],87:[1,50]},{20:74,53:80,54:[2,84],63:81,64:75,65:[1,43],69:82,70:76,71:77,72:[1,78],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{26:83,47:[1,66]},{47:[2,55]},{4:84,6:3,14:[2,46],15:[2,46],19:[2,46],29:[2,46],34:[2,46],39:[2,46],44:[2,46],47:[2,46],48:[2,46],51:[2,46],55:[2,46],60:[2,46]},{47:[2,20]},{20:85,72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{4:86,6:3,14:[2,46],15:[2,46],19:[2,46],29:[2,46],34:[2,46],47:[2,46],48:[2,46],51:[2,46],55:[2,46],60:[2,46]},{26:87,47:[1,66]},{47:[2,57]},{5:[2,11],14:[2,11],15:[2,11],19:[2,11],29:[2,11],34:[2,11],39:[2,11],44:[2,11],47:[2,11],48:[2,11],51:[2,11],55:[2,11],60:[2,11]},{15:[2,49],18:[2,49]},{20:74,33:[2,88],58:88,63:89,64:75,65:[1,43],69:90,70:76,71:77,72:[1,78],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{65:[2,94],66:91,68:[2,94],72:[2,94],80:[2,94],81:[2,94],82:[2,94],83:[2,94],84:[2,94],85:[2,94]},{5:[2,25],14:[2,25],15:[2,25],19:[2,25],29:[2,25],34:[2,25],39:[2,25],44:[2,25],47:[2,25],48:[2,25],51:[2,25],55:[2,25],60:[2,25]},{20:92,72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{20:74,31:93,33:[2,60],63:94,64:75,65:[1,43],69:95,70:76,71:77,72:[1,78],75:[2,60],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{20:74,33:[2,66],36:96,63:97,64:75,65:[1,43],69:98,70:76,71:77,72:[1,78],75:[2,66],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{20:74,22:99,23:[2,52],63:100,64:75,65:[1,43],69:101,70:76,71:77,72:[1,78],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{20:74,33:[2,92],62:102,63:103,64:75,65:[1,43],69:104,70:76,71:77,72:[1,78],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{33:[1,105]},{33:[2,79],65:[2,79],72:[2,79],80:[2,79],81:[2,79],82:[2,79],83:[2,79],84:[2,79],85:[2,79]},{33:[2,81]},{23:[2,27],33:[2,27],54:[2,27],65:[2,27],68:[2,27],72:[2,27],75:[2,27],80:[2,27],81:[2,27],82:[2,27],83:[2,27],84:[2,27],85:[2,27]},{23:[2,28],33:[2,28],54:[2,28],65:[2,28],68:[2,28],72:[2,28],75:[2,28],80:[2,28],81:[2,28],82:[2,28],83:[2,28],84:[2,28],85:[2,28]},{23:[2,30],33:[2,30],54:[2,30],68:[2,30],71:106,72:[1,107],75:[2,30]},{23:[2,98],33:[2,98],54:[2,98],68:[2,98],72:[2,98],75:[2,98]},{23:[2,45],33:[2,45],54:[2,45],65:[2,45],68:[2,45],72:[2,45],73:[1,108],75:[2,45],80:[2,45],81:[2,45],82:[2,45],83:[2,45],84:[2,45],85:[2,45],87:[2,45]},{23:[2,44],33:[2,44],54:[2,44],65:[2,44],68:[2,44],72:[2,44],75:[2,44],80:[2,44],81:[2,44],82:[2,44],83:[2,44],84:[2,44],85:[2,44],87:[2,44]},{54:[1,109]},{54:[2,83],65:[2,83],72:[2,83],80:[2,83],81:[2,83],82:[2,83],83:[2,83],84:[2,83],85:[2,83]},{54:[2,85]},{5:[2,13],14:[2,13],15:[2,13],19:[2,13],29:[2,13],34:[2,13],39:[2,13],44:[2,13],47:[2,13],48:[2,13],51:[2,13],55:[2,13],60:[2,13]},{38:55,39:[1,57],43:56,44:[1,58],45:111,46:110,47:[2,76]},{33:[2,70],40:112,65:[2,70],72:[2,70],75:[2,70],80:[2,70],81:[2,70],82:[2,70],83:[2,70],84:[2,70],85:[2,70]},{47:[2,18]},{5:[2,14],14:[2,14],15:[2,14],19:[2,14],29:[2,14],34:[2,14],39:[2,14],44:[2,14],47:[2,14],48:[2,14],51:[2,14],55:[2,14],60:[2,14]},{33:[1,113]},{33:[2,87],65:[2,87],72:[2,87],80:[2,87],81:[2,87],82:[2,87],83:[2,87],84:[2,87],85:[2,87]},{33:[2,89]},{20:74,63:115,64:75,65:[1,43],67:114,68:[2,96],69:116,70:76,71:77,72:[1,78],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{33:[1,117]},{32:118,33:[2,62],74:119,75:[1,120]},{33:[2,59],65:[2,59],72:[2,59],75:[2,59],80:[2,59],81:[2,59],82:[2,59],83:[2,59],84:[2,59],85:[2,59]},{33:[2,61],75:[2,61]},{33:[2,68],37:121,74:122,75:[1,120]},{33:[2,65],65:[2,65],72:[2,65],75:[2,65],80:[2,65],81:[2,65],82:[2,65],83:[2,65],84:[2,65],85:[2,65]},{33:[2,67],75:[2,67]},{23:[1,123]},{23:[2,51],65:[2,51],72:[2,51],80:[2,51],81:[2,51],82:[2,51],83:[2,51],84:[2,51],85:[2,51]},{23:[2,53]},{33:[1,124]},{33:[2,91],65:[2,91],72:[2,91],80:[2,91],81:[2,91],82:[2,91],83:[2,91],84:[2,91],85:[2,91]},{33:[2,93]},{5:[2,22],14:[2,22],15:[2,22],19:[2,22],29:[2,22],34:[2,22],39:[2,22],44:[2,22],47:[2,22],48:[2,22],51:[2,22],55:[2,22],60:[2,22]},{23:[2,99],33:[2,99],54:[2,99],68:[2,99],72:[2,99],75:[2,99]},{73:[1,108]},{20:74,63:125,64:75,65:[1,43],72:[1,35],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{5:[2,23],14:[2,23],15:[2,23],19:[2,23],29:[2,23],34:[2,23],39:[2,23],44:[2,23],47:[2,23],48:[2,23],51:[2,23],55:[2,23],60:[2,23]},{47:[2,19]},{47:[2,77]},{20:74,33:[2,72],41:126,63:127,64:75,65:[1,43],69:128,70:76,71:77,72:[1,78],75:[2,72],78:26,79:27,80:[1,28],81:[1,29],82:[1,30],83:[1,31],84:[1,32],85:[1,34],86:33},{5:[2,24],14:[2,24],15:[2,24],19:[2,24],29:[2,24],34:[2,24],39:[2,24],44:[2,24],47:[2,24],48:[2,24],51:[2,24],55:[2,24],60:[2,24]},{68:[1,129]},{65:[2,95],68:[2,95],72:[2,95],80:[2,95],81:[2,95],82:[2,95],83:[2,95],84:[2,95],85:[2,95]},{68:[2,97]},{5:[2,21],14:[2,21],15:[2,21],19:[2,21],29:[2,21],34:[2,21],39:[2,21],44:[2,21],47:[2,21],48:[2,21],51:[2,21],55:[2,21],60:[2,21]},{33:[1,130]},{33:[2,63]},{72:[1,132],76:131},{33:[1,133]},{33:[2,69]},{15:[2,12],18:[2,12]},{14:[2,26],15:[2,26],19:[2,26],29:[2,26],34:[2,26],47:[2,26],48:[2,26],51:[2,26],55:[2,26],60:[2,26]},{23:[2,31],33:[2,31],54:[2,31],68:[2,31],72:[2,31],75:[2,31]},{33:[2,74],42:134,74:135,75:[1,120]},{33:[2,71],65:[2,71],72:[2,71],75:[2,71],80:[2,71],81:[2,71],82:[2,71],83:[2,71],84:[2,71],85:[2,71]},{33:[2,73],75:[2,73]},{23:[2,29],33:[2,29],54:[2,29],65:[2,29],68:[2,29],72:[2,29],75:[2,29],80:[2,29],81:[2,29],82:[2,29],83:[2,29],84:[2,29],85:[2,29]},{14:[2,15],15:[2,15],19:[2,15],29:[2,15],34:[2,15],39:[2,15],44:[2,15],47:[2,15],48:[2,15],51:[2,15],55:[2,15],60:[2,15]},{72:[1,137],77:[1,136]},{72:[2,100],77:[2,100]},{14:[2,16],15:[2,16],19:[2,16],29:[2,16],34:[2,16],44:[2,16],47:[2,16],48:[2,16],51:[2,16],55:[2,16],60:[2,16]},{33:[1,138]},{33:[2,75]},{33:[2,32]},{72:[2,101],77:[2,101]},{14:[2,17],15:[2,17],19:[2,17],29:[2,17],34:[2,17],39:[2,17],44:[2,17],47:[2,17],48:[2,17],51:[2,17],55:[2,17],60:[2,17]}],defaultActions:{4:[2,1],54:[2,55],56:[2,20],60:[2,57],73:[2,81],82:[2,85],86:[2,18],90:[2,89],101:[2,53],104:[2,93],110:[2,19],111:[2,77],116:[2,97],119:[2,63],122:[2,69],135:[2,75],136:[2,32]},parseError:function(a,b){throw new Error(a)},parse:function(a){function b(){var a;return a=c.lexer.lex()||1,"number"!=typeof a&&(a=c.symbols_[a]||a),a}var c=this,d=[0],e=[null],f=[],g=this.table,h="",i=0,j=0,k=0;this.lexer.setInput(a),this.lexer.yy=this.yy,this.yy.lexer=this.lexer,this.yy.parser=this,"undefined"==typeof this.lexer.yylloc&&(this.lexer.yylloc={});var l=this.lexer.yylloc;f.push(l);var m=this.lexer.options&&this.lexer.options.ranges;"function"==typeof this.yy.parseError&&(this.parseError=this.yy.parseError);for(var n,o,p,q,r,s,t,u,v,w={};;){if(p=d[d.length-1],this.defaultActions[p]?q=this.defaultActions[p]:(null!==n&&"undefined"!=typeof n||(n=b()),q=g[p]&&g[p][n]),"undefined"==typeof q||!q.length||!q[0]){var x="";if(!k){v=[];for(s in g[p])this.terminals_[s]&&s>2&&v.push("'"+this.terminals_[s]+"'");x=this.lexer.showPosition?"Parse error on line "+(i+1)+":\n"+this.lexer.showPosition()+"\nExpecting "+v.join(", ")+", got '"+(this.terminals_[n]||n)+"'":"Parse error on line "+(i+1)+": Unexpected "+(1==n?"end of input":"'"+(this.terminals_[n]||n)+"'"),this.parseError(x,{text:this.lexer.match,token:this.terminals_[n]||n,line:this.lexer.yylineno,loc:l,expected:v})}}if(q[0]instanceof Array&&q.length>1)throw new Error("Parse Error: multiple actions possible at state: "+p+", token: "+n);switch(q[0]){case 1:d.push(n),e.push(this.lexer.yytext),f.push(this.lexer.yylloc),d.push(q[1]),n=null,o?(n=o,o=null):(j=this.lexer.yyleng,h=this.lexer.yytext,i=this.lexer.yylineno,l=this.lexer.yylloc,k>0&&k--);break;case 2:if(t=this.productions_[q[1]][1],w.$=e[e.length-t],w._$={first_line:f[f.length-(t||1)].first_line,last_line:f[f.length-1].last_line,first_column:f[f.length-(t||1)].first_column,last_column:f[f.length-1].last_column},m&&(w._$.range=[f[f.length-(t||1)].range[0],f[f.length-1].range[1]]),r=this.performAction.call(w,h,j,i,this.yy,q[1],e,f),"undefined"!=typeof r)return r;t&&(d=d.slice(0,-1*t*2),e=e.slice(0,-1*t),f=f.slice(0,-1*t)),d.push(this.productions_[q[1]][0]),e.push(w.$),f.push(w._$),u=g[d[d.length-2]][d[d.length-1]],d.push(u);break;case 3:return!0}}return!0}},c=function(){var a={EOF:1,parseError:function(a,b){if(!this.yy.parser)throw new Error(a);this.yy.parser.parseError(a,b)},setInput:function(a){return this._input=a,this._more=this._less=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var a=this._input[0];this.yytext+=a,this.yyleng++,this.offset++,this.match+=a,this.matched+=a;var b=a.match(/(?:\r\n?|\n).*/g);return b?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),a},unput:function(a){var b=a.length,c=a.split(/(?:\r\n?|\n)/g);this._input=a+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-b-1),this.offset-=b;var d=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),c.length-1&&(this.yylineno-=c.length-1);var e=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:c?(c.length===d.length?this.yylloc.first_column:0)+d[d.length-c.length].length-c[0].length:this.yylloc.first_column-b},this.options.ranges&&(this.yylloc.range=[e[0],e[0]+this.yyleng-b]),this},more:function(){return this._more=!0,this},less:function(a){this.unput(this.match.slice(a))},pastInput:function(){var a=this.matched.substr(0,this.matched.length-this.match.length);return(a.length>20?"...":"")+a.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var a=this.match;return a.length<20&&(a+=this._input.substr(0,20-a.length)),(a.substr(0,20)+(a.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var a=this.pastInput(),b=new Array(a.length+1).join("-");return a+this.upcomingInput()+"\n"+b+"^"},next:function(){if(this.done)return this.EOF;this._input||(this.done=!0);var a,b,c,d,e;this._more||(this.yytext="",this.match="");for(var f=this._currentRules(),g=0;g<f.length&&(c=this._input.match(this.rules[f[g]]),!c||b&&!(c[0].length>b[0].length)||(b=c,d=g,this.options.flex));g++);return b?(e=b[0].match(/(?:\r\n?|\n).*/g),e&&(this.yylineno+=e.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:e?e[e.length-1].length-e[e.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+b[0].length},this.yytext+=b[0],this.match+=b[0],this.matches=b,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._input=this._input.slice(b[0].length),this.matched+=b[0],a=this.performAction.call(this,this.yy,this,f[d],this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),a?a:void 0):""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var a=this.next();return"undefined"!=typeof a?a:this.lex()},begin:function(a){this.conditionStack.push(a)},popState:function(){return this.conditionStack.pop()},_currentRules:function(){return this.conditions[this.conditionStack[this.conditionStack.length-1]].rules},topState:function(){return this.conditionStack[this.conditionStack.length-2]},pushState:function(a){this.begin(a)}};return a.options={},a.performAction=function(a,b,c,d){function e(a,c){return b.yytext=b.yytext.substring(a,b.yyleng-c+a)}switch(c){case 0:if("\\\\"===b.yytext.slice(-2)?(e(0,1),this.begin("mu")):"\\"===b.yytext.slice(-1)?(e(0,1),this.begin("emu")):this.begin("mu"),b.yytext)return 15;break;case 1:return 15;case 2:return this.popState(),15;case 3:return this.begin("raw"),15;case 4:return this.popState(),"raw"===this.conditionStack[this.conditionStack.length-1]?15:(e(5,9),"END_RAW_BLOCK");case 5:return 15;case 6:return this.popState(),14;case 7:return 65;case 8:return 68;case 9:return 19;case 10:return this.popState(),this.begin("raw"),23;case 11:return 55;case 12:return 60;case 13:return 29;case 14:return 47;case 15:return this.popState(),44;case 16:return this.popState(),44;case 17:return 34;case 18:return 39;case 19:return 51;case 20:return 48;case 21:this.unput(b.yytext),this.popState(),this.begin("com");break;case 22:return this.popState(),14;case 23:return 48;case 24:return 73;case 25:return 72;case 26:return 72;case 27:return 87;case 28:break;case 29:return this.popState(),54;case 30:return this.popState(),33;case 31:return b.yytext=e(1,2).replace(/\\"/g,'"'),80;case 32:return b.yytext=e(1,2).replace(/\\'/g,"'"),80;case 33:return 85;case 34:return 82;case 35:return 82;case 36:return 83;case 37:return 84;case 38:return 81;case 39:return 75;case 40:return 77;case 41:return 72;case 42:return b.yytext=b.yytext.replace(/\\([\\\]])/g,"$1"),72;case 43:return"INVALID";case 44:return 5}},a.rules=[/^(?:[^\x00]*?(?=(\{\{)))/,/^(?:[^\x00]+)/,/^(?:[^\x00]{2,}?(?=(\{\{|\\\{\{|\\\\\{\{|$)))/,/^(?:\{\{\{\{(?=[^\/]))/,/^(?:\{\{\{\{\/[^\s!"#%-,\.\/;->@\[-\^`\{-~]+(?=[=}\s\/.])\}\}\}\})/,/^(?:[^\x00]+?(?=(\{\{\{\{)))/,/^(?:[\s\S]*?--(~)?\}\})/,/^(?:\()/,/^(?:\))/,/^(?:\{\{\{\{)/,/^(?:\}\}\}\})/,/^(?:\{\{(~)?>)/,/^(?:\{\{(~)?#>)/,/^(?:\{\{(~)?#\*?)/,/^(?:\{\{(~)?\/)/,/^(?:\{\{(~)?\^\s*(~)?\}\})/,/^(?:\{\{(~)?\s*else\s*(~)?\}\})/,/^(?:\{\{(~)?\^)/,/^(?:\{\{(~)?\s*else\b)/,/^(?:\{\{(~)?\{)/,/^(?:\{\{(~)?&)/,/^(?:\{\{(~)?!--)/,/^(?:\{\{(~)?![\s\S]*?\}\})/,/^(?:\{\{(~)?\*?)/,/^(?:=)/,/^(?:\.\.)/,/^(?:\.(?=([=~}\s\/.)|])))/,/^(?:[\/.])/,/^(?:\s+)/,/^(?:\}(~)?\}\})/,/^(?:(~)?\}\})/,/^(?:"(\\["]|[^"])*")/,/^(?:'(\\[']|[^'])*')/,/^(?:@)/,/^(?:true(?=([~}\s)])))/,/^(?:false(?=([~}\s)])))/,/^(?:undefined(?=([~}\s)])))/,/^(?:null(?=([~}\s)])))/,/^(?:-?[0-9]+(?:\.[0-9]+)?(?=([~}\s)])))/,/^(?:as\s+\|)/,/^(?:\|)/,/^(?:([^\s!"#%-,\.\/;->@\[-\^`\{-~]+(?=([=~}\s\/.)|]))))/,/^(?:\[(\\\]|[^\]])*\])/,/^(?:.)/,/^(?:$)/],a.conditions={mu:{rules:[7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44],inclusive:!1},emu:{rules:[2],inclusive:!1},com:{rules:[6],inclusive:!1},raw:{rules:[3,4,5],inclusive:!1},INITIAL:{rules:[0,1,44],inclusive:!0}},a}();return b.lexer=c,a.prototype=b,b.Parser=a,new a}();b["default"]=c,a.exports=b["default"]},function(a,b,c){"use strict";function d(){var a=arguments.length<=0||void 0===arguments[0]?{}:arguments[0];this.options=a}function e(a,b,c){void 0===b&&(b=a.length);var d=a[b-1],e=a[b-2];return d?"ContentStatement"===d.type?(e||!c?/\r?\n\s*?$/:/(^|\r?\n)\s*?$/).test(d.original):void 0:c}function f(a,b,c){void 0===b&&(b=-1);var d=a[b+1],e=a[b+2];return d?"ContentStatement"===d.type?(e||!c?/^\s*?\r?\n/:/^\s*?(\r?\n|$)/).test(d.original):void 0:c}function g(a,b,c){var d=a[null==b?0:b+1];if(d&&"ContentStatement"===d.type&&(c||!d.rightStripped)){var e=d.value;d.value=d.value.replace(c?/^\s+/:/^[ \t]*\r?\n?/,""),d.rightStripped=d.value!==e}}function h(a,b,c){var d=a[null==b?a.length-1:b-1];if(d&&"ContentStatement"===d.type&&(c||!d.leftStripped)){var e=d.value;return d.value=d.value.replace(c?/\s+$/:/[ \t]+$/,""),d.leftStripped=d.value!==e,d.leftStripped}}var i=c(1)["default"];b.__esModule=!0;var j=c(49),k=i(j);d.prototype=new k["default"],d.prototype.Program=function(a){var b=!this.options.ignoreStandalone,c=!this.isRootSeen;this.isRootSeen=!0;for(var d=a.body,i=0,j=d.length;i<j;i++){var k=d[i],l=this.accept(k);if(l){var m=e(d,i,c),n=f(d,i,c),o=l.openStandalone&&m,p=l.closeStandalone&&n,q=l.inlineStandalone&&m&&n;l.close&&g(d,i,!0),l.open&&h(d,i,!0),b&&q&&(g(d,i),h(d,i)&&"PartialStatement"===k.type&&(k.indent=/([ \t]+$)/.exec(d[i-1].original)[1])),b&&o&&(g((k.program||k.inverse).body),h(d,i)),b&&p&&(g(d,i),h((k.inverse||k.program).body))}}return a},d.prototype.BlockStatement=d.prototype.DecoratorBlock=d.prototype.PartialBlockStatement=function(a){this.accept(a.program),this.accept(a.inverse);var b=a.program||a.inverse,c=a.program&&a.inverse,d=c,i=c;if(c&&c.chained)for(d=c.body[0].program;i.chained;)i=i.body[i.body.length-1].program;var j={open:a.openStrip.open,close:a.closeStrip.close,openStandalone:f(b.body),closeStandalone:e((d||b).body)};if(a.openStrip.close&&g(b.body,null,!0),c){var k=a.inverseStrip;k.open&&h(b.body,null,!0),k.close&&g(d.body,null,!0),a.closeStrip.open&&h(i.body,null,!0),!this.options.ignoreStandalone&&e(b.body)&&f(d.body)&&(h(b.body),g(d.body))}else a.closeStrip.open&&h(b.body,null,!0);return j},d.prototype.Decorator=d.prototype.MustacheStatement=function(a){return a.strip},d.prototype.PartialStatement=d.prototype.CommentStatement=function(a){var b=a.strip||{};return{inlineStandalone:!0,open:b.open,close:b.close}},b["default"]=d,a.exports=b["default"]},function(a,b,c){"use strict";function d(){this.parents=[]}function e(a){this.acceptRequired(a,"path"),this.acceptArray(a.params),this.acceptKey(a,"hash")}function f(a){e.call(this,a),this.acceptKey(a,"program"),this.acceptKey(a,"inverse")}function g(a){this.acceptRequired(a,"name"),this.acceptArray(a.params),this.acceptKey(a,"hash")}var h=c(1)["default"];b.__esModule=!0;var i=c(6),j=h(i);d.prototype={constructor:d,mutating:!1,acceptKey:function(a,b){var c=this.accept(a[b]);if(this.mutating){if(c&&!d.prototype[c.type])throw new j["default"]('Unexpected node type "'+c.type+'" found when accepting '+b+" on "+a.type);a[b]=c}},acceptRequired:function(a,b){if(this.acceptKey(a,b),!a[b])throw new j["default"](a.type+" requires "+b)},acceptArray:function(a){for(var b=0,c=a.length;b<c;b++)this.acceptKey(a,b),a[b]||(a.splice(b,1),b--,c--)},accept:function(a){if(a){if(!this[a.type])throw new j["default"]("Unknown type: "+a.type,a);this.current&&this.parents.unshift(this.current),this.current=a;var b=this[a.type](a);return this.current=this.parents.shift(),!this.mutating||b?b:b!==!1?a:void 0}},Program:function(a){this.acceptArray(a.body)},MustacheStatement:e,Decorator:e,BlockStatement:f,DecoratorBlock:f,PartialStatement:g,PartialBlockStatement:function(a){g.call(this,a),this.acceptKey(a,"program")},ContentStatement:function(){},CommentStatement:function(){},SubExpression:e,PathExpression:function(){},StringLiteral:function(){},NumberLiteral:function(){},BooleanLiteral:function(){},UndefinedLiteral:function(){},NullLiteral:function(){},Hash:function(a){this.acceptArray(a.pairs)},HashPair:function(a){this.acceptRequired(a,"value")}},b["default"]=d,a.exports=b["default"]},function(a,b,c){"use strict";function d(a,b){if(b=b.path?b.path.original:b,a.path.original!==b){var c={loc:a.path.loc};throw new q["default"](a.path.original+" doesn't match "+b,c)}}function e(a,b){this.source=a,this.start={line:b.first_line,column:b.first_column},this.end={line:b.last_line,column:b.last_column}}function f(a){return/^\[.*\]$/.test(a)?a.substring(1,a.length-1):a}function g(a,b){return{open:"~"===a.charAt(2),close:"~"===b.charAt(b.length-3)}}function h(a){return a.replace(/^\{\{~?!-?-?/,"").replace(/-?-?~?\}\}$/,"")}function i(a,b,c){c=this.locInfo(c);for(var d=a?"@":"",e=[],f=0,g=0,h=b.length;g<h;g++){var i=b[g].part,j=b[g].original!==i;if(d+=(b[g].separator||"")+i,j||".."!==i&&"."!==i&&"this"!==i)e.push(i);else{if(e.length>0)throw new q["default"]("Invalid path: "+d,{loc:c});".."===i&&f++}}return{type:"PathExpression",data:a,depth:f,parts:e,original:d,loc:c}}function j(a,b,c,d,e,f){var g=d.charAt(3)||d.charAt(2),h="{"!==g&&"&"!==g,i=/\*/.test(d);return{type:i?"Decorator":"MustacheStatement",path:a,params:b,hash:c,escaped:h,strip:e,loc:this.locInfo(f)}}function k(a,b,c,e){d(a,c),e=this.locInfo(e);var f={type:"Program",body:b,strip:{},loc:e};return{type:"BlockStatement",path:a.path,params:a.params,hash:a.hash,program:f,openStrip:{},inverseStrip:{},closeStrip:{},loc:e}}function l(a,b,c,e,f,g){e&&e.path&&d(a,e);var h=/\*/.test(a.open);b.blockParams=a.blockParams;var i=void 0,j=void 0;if(c){if(h)throw new q["default"]("Unexpected inverse block on decorator",c);c.chain&&(c.program.body[0].closeStrip=e.strip),j=c.strip,i=c.program}return f&&(f=i,i=b,b=f),{type:h?"DecoratorBlock":"BlockStatement",path:a.path,params:a.params,hash:a.hash,program:b,inverse:i,openStrip:a.strip,inverseStrip:j,closeStrip:e&&e.strip,loc:this.locInfo(g)}}function m(a,b){if(!b&&a.length){var c=a[0].loc,d=a[a.length-1].loc;c&&d&&(b={source:c.source,start:{line:c.start.line,column:c.start.column},end:{line:d.end.line,column:d.end.column}})}return{type:"Program",body:a,strip:{},loc:b}}function n(a,b,c,e){return d(a,c),{type:"PartialBlockStatement",name:a.path,params:a.params,hash:a.hash,program:b,openStrip:a.strip,closeStrip:c&&c.strip,loc:this.locInfo(e)}}var o=c(1)["default"];b.__esModule=!0,b.SourceLocation=e,b.id=f,b.stripFlags=g,b.stripComment=h,b.preparePath=i,b.prepareMustache=j,b.prepareRawBlock=k,b.prepareBlock=l,b.prepareProgram=m,b.preparePartialBlock=n;var p=c(6),q=o(p)},function(a,b,c){"use strict";function d(){}function e(a,b,c){if(null==a||"string"!=typeof a&&"Program"!==a.type)throw new l["default"]("You must pass a string or Handlebars AST to Handlebars.precompile. You passed "+a);b=b||{},"data"in b||(b.data=!0),b.compat&&(b.useDepths=!0);var d=c.parse(a,b),e=(new c.Compiler).compile(d,b);return(new c.JavaScriptCompiler).compile(e,b)}function f(a,b,c){function d(){var d=c.parse(a,b),e=(new c.Compiler).compile(d,b),f=(new c.JavaScriptCompiler).compile(e,b,void 0,!0);return c.template(f)}function e(a,b){return f||(f=d()),f.call(this,a,b)}if(void 0===b&&(b={}),null==a||"string"!=typeof a&&"Program"!==a.type)throw new l["default"]("You must pass a string or Handlebars AST to Handlebars.compile. You passed "+a);b=m.extend({},b),"data"in b||(b.data=!0),b.compat&&(b.useDepths=!0);var f=void 0;return e._setup=function(a){return f||(f=d()),f._setup(a)},e._child=function(a,b,c,e){return f||(f=d()),f._child(a,b,c,e)},e}function g(a,b){if(a===b)return!0;if(m.isArray(a)&&m.isArray(b)&&a.length===b.length){for(var c=0;c<a.length;c++)if(!g(a[c],b[c]))return!1;return!0}}function h(a){if(!a.path.parts){var b=a.path;a.path={type:"PathExpression",data:!1,depth:0,parts:[b.original+""],original:b.original+"",loc:b.loc}}}var i=c(41)["default"],j=c(1)["default"];b.__esModule=!0,b.Compiler=d,b.precompile=e,b.compile=f;var k=c(6),l=j(k),m=c(5),n=c(45),o=j(n),p=[].slice;d.prototype={compiler:d,equals:function(a){var b=this.opcodes.length;if(a.opcodes.length!==b)return!1;for(var c=0;c<b;c++){var d=this.opcodes[c],e=a.opcodes[c];if(d.opcode!==e.opcode||!g(d.args,e.args))return!1}b=this.children.length;for(var c=0;c<b;c++)if(!this.children[c].equals(a.children[c]))return!1;return!0},guid:0,compile:function(a,b){return this.sourceNode=[],this.opcodes=[],this.children=[],this.options=b,this.stringParams=b.stringParams,this.trackIds=b.trackIds,b.blockParams=b.blockParams||[],b.knownHelpers=m.extend(i(null),{helperMissing:!0,blockHelperMissing:!0,each:!0,"if":!0,unless:!0,"with":!0,log:!0,lookup:!0},b.knownHelpers),this.accept(a)},compileProgram:function(a){var b=new this.compiler,c=b.compile(a,this.options),d=this.guid++;return this.usePartial=this.usePartial||c.usePartial,this.children[d]=c,this.useDepths=this.useDepths||c.useDepths,d},accept:function(a){if(!this[a.type])throw new l["default"]("Unknown type: "+a.type,a);this.sourceNode.unshift(a);var b=this[a.type](a);return this.sourceNode.shift(),b},Program:function(a){this.options.blockParams.unshift(a.blockParams);for(var b=a.body,c=b.length,d=0;d<c;d++)this.accept(b[d]);return this.options.blockParams.shift(),this.isSimple=1===c,this.blockParams=a.blockParams?a.blockParams.length:0,this},BlockStatement:function(a){h(a);var b=a.program,c=a.inverse;b=b&&this.compileProgram(b),c=c&&this.compileProgram(c);var d=this.classifySexpr(a);"helper"===d?this.helperSexpr(a,b,c):"simple"===d?(this.simpleSexpr(a),this.opcode("pushProgram",b),this.opcode("pushProgram",c),this.opcode("emptyHash"),this.opcode("blockValue",a.path.original)):(this.ambiguousSexpr(a,b,c),this.opcode("pushProgram",b),this.opcode("pushProgram",c),this.opcode("emptyHash"),this.opcode("ambiguousBlockValue")),this.opcode("append")},DecoratorBlock:function(a){var b=a.program&&this.compileProgram(a.program),c=this.setupFullMustacheParams(a,b,void 0),d=a.path;this.useDecorators=!0,this.opcode("registerDecorator",c.length,d.original)},PartialStatement:function(a){this.usePartial=!0;var b=a.program;b&&(b=this.compileProgram(a.program));var c=a.params;if(c.length>1)throw new l["default"]("Unsupported number of partial arguments: "+c.length,a);c.length||(this.options.explicitPartialContext?this.opcode("pushLiteral","undefined"):c.push({type:"PathExpression",parts:[],depth:0}));var d=a.name.original,e="SubExpression"===a.name.type;e&&this.accept(a.name),this.setupFullMustacheParams(a,b,void 0,!0);var f=a.indent||"";this.options.preventIndent&&f&&(this.opcode("appendContent",f),f=""),this.opcode("invokePartial",e,d,f),this.opcode("append")},PartialBlockStatement:function(a){this.PartialStatement(a)},MustacheStatement:function(a){this.SubExpression(a),a.escaped&&!this.options.noEscape?this.opcode("appendEscaped"):this.opcode("append")},Decorator:function(a){this.DecoratorBlock(a)},ContentStatement:function(a){a.value&&this.opcode("appendContent",a.value)},CommentStatement:function(){},SubExpression:function(a){h(a);var b=this.classifySexpr(a);"simple"===b?this.simpleSexpr(a):"helper"===b?this.helperSexpr(a):this.ambiguousSexpr(a)},ambiguousSexpr:function(a,b,c){var d=a.path,e=d.parts[0],f=null!=b||null!=c;this.opcode("getContext",d.depth),this.opcode("pushProgram",b),this.opcode("pushProgram",c),d.strict=!0,this.accept(d),this.opcode("invokeAmbiguous",e,f)},simpleSexpr:function(a){var b=a.path;b.strict=!0,this.accept(b),this.opcode("resolvePossibleLambda")},helperSexpr:function(a,b,c){var d=this.setupFullMustacheParams(a,b,c),e=a.path,f=e.parts[0];if(this.options.knownHelpers[f])this.opcode("invokeKnownHelper",d.length,f);else{if(this.options.knownHelpersOnly)throw new l["default"]("You specified knownHelpersOnly, but used the unknown helper "+f,a);e.strict=!0,e.falsy=!0,this.accept(e),this.opcode("invokeHelper",d.length,e.original,o["default"].helpers.simpleId(e))}},PathExpression:function(a){this.addDepth(a.depth),this.opcode("getContext",a.depth);var b=a.parts[0],c=o["default"].helpers.scopedId(a),d=!a.depth&&!c&&this.blockParamIndex(b);d?this.opcode("lookupBlockParam",d,a.parts):b?a.data?(this.options.data=!0,this.opcode("lookupData",a.depth,a.parts,a.strict)):this.opcode("lookupOnContext",a.parts,a.falsy,a.strict,c):this.opcode("pushContext")},StringLiteral:function(a){this.opcode("pushString",a.value)},NumberLiteral:function(a){this.opcode("pushLiteral",a.value)},BooleanLiteral:function(a){this.opcode("pushLiteral",a.value)},UndefinedLiteral:function(){this.opcode("pushLiteral","undefined")},NullLiteral:function(){this.opcode("pushLiteral","null")},Hash:function(a){var b=a.pairs,c=0,d=b.length;for(this.opcode("pushHash");c<d;c++)this.pushParam(b[c].value);for(;c--;)this.opcode("assignToHash",b[c].key);this.opcode("popHash")},opcode:function(a){this.opcodes.push({opcode:a,args:p.call(arguments,1),loc:this.sourceNode[0].loc})},addDepth:function(a){a&&(this.useDepths=!0)},classifySexpr:function(a){var b=o["default"].helpers.simpleId(a.path),c=b&&!!this.blockParamIndex(a.path.parts[0]),d=!c&&o["default"].helpers.helperExpression(a),e=!c&&(d||b);if(e&&!d){var f=a.path.parts[0],g=this.options;g.knownHelpers[f]?d=!0:g.knownHelpersOnly&&(e=!1)}return d?"helper":e?"ambiguous":"simple"},pushParams:function(a){for(var b=0,c=a.length;b<c;b++)this.pushParam(a[b])},pushParam:function(a){var b=null!=a.value?a.value:a.original||"";if(this.stringParams)b.replace&&(b=b.replace(/^(\.?\.\/)*/g,"").replace(/\//g,".")),a.depth&&this.addDepth(a.depth),this.opcode("getContext",a.depth||0),this.opcode("pushStringParam",b,a.type),"SubExpression"===a.type&&this.accept(a);else{if(this.trackIds){var c=void 0;if(!a.parts||o["default"].helpers.scopedId(a)||a.depth||(c=this.blockParamIndex(a.parts[0])),c){var d=a.parts.slice(1).join(".");this.opcode("pushId","BlockParam",c,d)}else b=a.original||b,b.replace&&(b=b.replace(/^this(?:\.|$)/,"").replace(/^\.\//,"").replace(/^\.$/,"")),this.opcode("pushId",a.type,b)}this.accept(a)}},setupFullMustacheParams:function(a,b,c,d){var e=a.params;return this.pushParams(e),this.opcode("pushProgram",b),this.opcode("pushProgram",c),a.hash?this.accept(a.hash):this.opcode("emptyHash",d),e},blockParamIndex:function(a){for(var b=0,c=this.options.blockParams.length;b<c;b++){var d=this.options.blockParams[b],e=d&&m.indexOf(d,a);if(d&&e>=0)return[b,e]}}}},function(a,b,c){"use strict";function d(a){this.value=a}function e(){}function f(a,b,c,d){var e=b.popStack(),f=0,g=c.length;for(a&&g--;f<g;f++)e=b.nameLookup(e,c[f],d);return a?[b.aliasable("container.strict"),"(",e,", ",b.quotedString(c[f]),", ",JSON.stringify(b.source.currentLocation)," )"]:e}var g=c(13)["default"],h=c(1)["default"];b.__esModule=!0;var i=c(4),j=c(6),k=h(j),l=c(5),m=c(53),n=h(m);e.prototype={nameLookup:function(a,b){return this.internalNameLookup(a,b)},depthedLookup:function(a){return[this.aliasable("container.lookup"),'(depths, "',a,'")']},compilerInfo:function(){var a=i.COMPILER_REVISION,b=i.REVISION_CHANGES[a];return[a,b]},appendToBuffer:function(a,b,c){return l.isArray(a)||(a=[a]),a=this.source.wrap(a,b),this.environment.isSimple?["return ",a,";"]:c?["buffer += ",a,";"]:(a.appendToBuffer=!0,a)},initializeBuffer:function(){return this.quotedString("")},internalNameLookup:function(a,b){return this.lookupPropertyFunctionIsUsed=!0,["lookupProperty(",a,",",JSON.stringify(b),")"]},lookupPropertyFunctionIsUsed:!1,compile:function(a,b,c,d){this.environment=a,this.options=b,this.stringParams=this.options.stringParams,this.trackIds=this.options.trackIds,this.precompile=!d,this.name=this.environment.name,this.isChild=!!c,this.context=c||{decorators:[],programs:[],environments:[]},this.preamble(),this.stackSlot=0,this.stackVars=[],this.aliases={},this.registers={list:[]},this.hashes=[],this.compileStack=[],this.inlineStack=[],this.blockParams=[],this.compileChildren(a,b),this.useDepths=this.useDepths||a.useDepths||a.useDecorators||this.options.compat,this.useBlockParams=this.useBlockParams||a.useBlockParams;var e=a.opcodes,f=void 0,g=void 0,h=void 0,i=void 0;for(h=0,i=e.length;h<i;h++)f=e[h],this.source.currentLocation=f.loc,g=g||f.loc,this[f.opcode].apply(this,f.args);if(this.source.currentLocation=g,this.pushSource(""),this.stackSlot||this.inlineStack.length||this.compileStack.length)throw new k["default"]("Compile completed with content left on stack");this.decorators.isEmpty()?this.decorators=void 0:(this.useDecorators=!0,this.decorators.prepend(["var decorators = container.decorators, ",this.lookupPropertyFunctionVarDeclaration(),";\n"]),this.decorators.push("return fn;"),d?this.decorators=Function.apply(this,["fn","props","container","depth0","data","blockParams","depths",this.decorators.merge()]):(this.decorators.prepend("function(fn, props, container, depth0, data, blockParams, depths) {\n"),
this.decorators.push("}\n"),this.decorators=this.decorators.merge()));var j=this.createFunctionContext(d);if(this.isChild)return j;var l={compiler:this.compilerInfo(),main:j};this.decorators&&(l.main_d=this.decorators,l.useDecorators=!0);var m=this.context,n=m.programs,o=m.decorators;for(h=0,i=n.length;h<i;h++)n[h]&&(l[h]=n[h],o[h]&&(l[h+"_d"]=o[h],l.useDecorators=!0));return this.environment.usePartial&&(l.usePartial=!0),this.options.data&&(l.useData=!0),this.useDepths&&(l.useDepths=!0),this.useBlockParams&&(l.useBlockParams=!0),this.options.compat&&(l.compat=!0),d?l.compilerOptions=this.options:(l.compiler=JSON.stringify(l.compiler),this.source.currentLocation={start:{line:1,column:0}},l=this.objectLiteral(l),b.srcName?(l=l.toStringWithSourceMap({file:b.destName}),l.map=l.map&&l.map.toString()):l=l.toString()),l},preamble:function(){this.lastContext=0,this.source=new n["default"](this.options.srcName),this.decorators=new n["default"](this.options.srcName)},createFunctionContext:function(a){var b=this,c="",d=this.stackVars.concat(this.registers.list);d.length>0&&(c+=", "+d.join(", "));var e=0;g(this.aliases).forEach(function(a){var d=b.aliases[a];d.children&&d.referenceCount>1&&(c+=", alias"+ ++e+"="+a,d.children[0]="alias"+e)}),this.lookupPropertyFunctionIsUsed&&(c+=", "+this.lookupPropertyFunctionVarDeclaration());var f=["container","depth0","helpers","partials","data"];(this.useBlockParams||this.useDepths)&&f.push("blockParams"),this.useDepths&&f.push("depths");var h=this.mergeSource(c);return a?(f.push(h),Function.apply(this,f)):this.source.wrap(["function(",f.join(","),") {\n ",h,"}"])},mergeSource:function(a){var b=this.environment.isSimple,c=!this.forceBuffer,d=void 0,e=void 0,f=void 0,g=void 0;return this.source.each(function(a){a.appendToBuffer?(f?a.prepend(" + "):f=a,g=a):(f&&(e?f.prepend("buffer += "):d=!0,g.add(";"),f=g=void 0),e=!0,b||(c=!1))}),c?f?(f.prepend("return "),g.add(";")):e||this.source.push('return "";'):(a+=", buffer = "+(d?"":this.initializeBuffer()),f?(f.prepend("return buffer + "),g.add(";")):this.source.push("return buffer;")),a&&this.source.prepend("var "+a.substring(2)+(d?"":";\n")),this.source.merge()},lookupPropertyFunctionVarDeclaration:function(){return"\n lookupProperty = container.lookupProperty || function(parent, propertyName) {\n if (Object.prototype.hasOwnProperty.call(parent, propertyName)) {\n return parent[propertyName];\n }\n return undefined\n }\n ".trim()},blockValue:function(a){var b=this.aliasable("container.hooks.blockHelperMissing"),c=[this.contextName(0)];this.setupHelperArgs(a,0,c);var d=this.popStack();c.splice(1,0,d),this.push(this.source.functionCall(b,"call",c))},ambiguousBlockValue:function(){var a=this.aliasable("container.hooks.blockHelperMissing"),b=[this.contextName(0)];this.setupHelperArgs("",0,b,!0),this.flushInline();var c=this.topStack();b.splice(1,0,c),this.pushSource(["if (!",this.lastHelper,") { ",c," = ",this.source.functionCall(a,"call",b),"}"])},appendContent:function(a){this.pendingContent?a=this.pendingContent+a:this.pendingLocation=this.source.currentLocation,this.pendingContent=a},append:function(){if(this.isInline())this.replaceStack(function(a){return[" != null ? ",a,' : ""']}),this.pushSource(this.appendToBuffer(this.popStack()));else{var a=this.popStack();this.pushSource(["if (",a," != null) { ",this.appendToBuffer(a,void 0,!0)," }"]),this.environment.isSimple&&this.pushSource(["else { ",this.appendToBuffer("''",void 0,!0)," }"])}},appendEscaped:function(){this.pushSource(this.appendToBuffer([this.aliasable("container.escapeExpression"),"(",this.popStack(),")"]))},getContext:function(a){this.lastContext=a},pushContext:function(){this.pushStackLiteral(this.contextName(this.lastContext))},lookupOnContext:function(a,b,c,d){var e=0;d||!this.options.compat||this.lastContext?this.pushContext():this.push(this.depthedLookup(a[e++])),this.resolvePath("context",a,e,b,c)},lookupBlockParam:function(a,b){this.useBlockParams=!0,this.push(["blockParams[",a[0],"][",a[1],"]"]),this.resolvePath("context",b,1)},lookupData:function(a,b,c){a?this.pushStackLiteral("container.data(data, "+a+")"):this.pushStackLiteral("data"),this.resolvePath("data",b,0,!0,c)},resolvePath:function(a,b,c,d,e){var g=this;if(this.options.strict||this.options.assumeObjects)return void this.push(f(this.options.strict&&e,this,b,a));for(var h=b.length;c<h;c++)this.replaceStack(function(e){var f=g.nameLookup(e,b[c],a);return d?[" && ",f]:[" != null ? ",f," : ",e]})},resolvePossibleLambda:function(){this.push([this.aliasable("container.lambda"),"(",this.popStack(),", ",this.contextName(0),")"])},pushStringParam:function(a,b){this.pushContext(),this.pushString(b),"SubExpression"!==b&&("string"==typeof a?this.pushString(a):this.pushStackLiteral(a))},emptyHash:function(a){this.trackIds&&this.push("{}"),this.stringParams&&(this.push("{}"),this.push("{}")),this.pushStackLiteral(a?"undefined":"{}")},pushHash:function(){this.hash&&this.hashes.push(this.hash),this.hash={values:{},types:[],contexts:[],ids:[]}},popHash:function(){var a=this.hash;this.hash=this.hashes.pop(),this.trackIds&&this.push(this.objectLiteral(a.ids)),this.stringParams&&(this.push(this.objectLiteral(a.contexts)),this.push(this.objectLiteral(a.types))),this.push(this.objectLiteral(a.values))},pushString:function(a){this.pushStackLiteral(this.quotedString(a))},pushLiteral:function(a){this.pushStackLiteral(a)},pushProgram:function(a){null!=a?this.pushStackLiteral(this.programExpression(a)):this.pushStackLiteral(null)},registerDecorator:function(a,b){var c=this.nameLookup("decorators",b,"decorator"),d=this.setupHelperArgs(b,a);this.decorators.push(["fn = ",this.decorators.functionCall(c,"",["fn","props","container",d])," || fn;"])},invokeHelper:function(a,b,c){var d=this.popStack(),e=this.setupHelper(a,b),f=[];c&&f.push(e.name),f.push(d),this.options.strict||f.push(this.aliasable("container.hooks.helperMissing"));var g=["(",this.itemsSeparatedBy(f,"||"),")"],h=this.source.functionCall(g,"call",e.callParams);this.push(h)},itemsSeparatedBy:function(a,b){var c=[];c.push(a[0]);for(var d=1;d<a.length;d++)c.push(b,a[d]);return c},invokeKnownHelper:function(a,b){var c=this.setupHelper(a,b);this.push(this.source.functionCall(c.name,"call",c.callParams))},invokeAmbiguous:function(a,b){this.useRegister("helper");var c=this.popStack();this.emptyHash();var d=this.setupHelper(0,a,b),e=this.lastHelper=this.nameLookup("helpers",a,"helper"),f=["(","(helper = ",e," || ",c,")"];this.options.strict||(f[0]="(helper = ",f.push(" != null ? helper : ",this.aliasable("container.hooks.helperMissing"))),this.push(["(",f,d.paramsInit?["),(",d.paramsInit]:[],"),","(typeof helper === ",this.aliasable('"function"')," ? ",this.source.functionCall("helper","call",d.callParams)," : helper))"])},invokePartial:function(a,b,c){var d=[],e=this.setupParams(b,1,d);a&&(b=this.popStack(),delete e.name),c&&(e.indent=JSON.stringify(c)),e.helpers="helpers",e.partials="partials",e.decorators="container.decorators",a?d.unshift(b):d.unshift(this.nameLookup("partials",b,"partial")),this.options.compat&&(e.depths="depths"),e=this.objectLiteral(e),d.push(e),this.push(this.source.functionCall("container.invokePartial","",d))},assignToHash:function(a){var b=this.popStack(),c=void 0,d=void 0,e=void 0;this.trackIds&&(e=this.popStack()),this.stringParams&&(d=this.popStack(),c=this.popStack());var f=this.hash;c&&(f.contexts[a]=c),d&&(f.types[a]=d),e&&(f.ids[a]=e),f.values[a]=b},pushId:function(a,b,c){"BlockParam"===a?this.pushStackLiteral("blockParams["+b[0]+"].path["+b[1]+"]"+(c?" + "+JSON.stringify("."+c):"")):"PathExpression"===a?this.pushString(b):"SubExpression"===a?this.pushStackLiteral("true"):this.pushStackLiteral("null")},compiler:e,compileChildren:function(a,b){for(var c=a.children,d=void 0,e=void 0,f=0,g=c.length;f<g;f++){d=c[f],e=new this.compiler;var h=this.matchExistingProgram(d);if(null==h){this.context.programs.push("");var i=this.context.programs.length;d.index=i,d.name="program"+i,this.context.programs[i]=e.compile(d,b,this.context,!this.precompile),this.context.decorators[i]=e.decorators,this.context.environments[i]=d,this.useDepths=this.useDepths||e.useDepths,this.useBlockParams=this.useBlockParams||e.useBlockParams,d.useDepths=this.useDepths,d.useBlockParams=this.useBlockParams}else d.index=h.index,d.name="program"+h.index,this.useDepths=this.useDepths||h.useDepths,this.useBlockParams=this.useBlockParams||h.useBlockParams}},matchExistingProgram:function(a){for(var b=0,c=this.context.environments.length;b<c;b++){var d=this.context.environments[b];if(d&&d.equals(a))return d}},programExpression:function(a){var b=this.environment.children[a],c=[b.index,"data",b.blockParams];return(this.useBlockParams||this.useDepths)&&c.push("blockParams"),this.useDepths&&c.push("depths"),"container.program("+c.join(", ")+")"},useRegister:function(a){this.registers[a]||(this.registers[a]=!0,this.registers.list.push(a))},push:function(a){return a instanceof d||(a=this.source.wrap(a)),this.inlineStack.push(a),a},pushStackLiteral:function(a){this.push(new d(a))},pushSource:function(a){this.pendingContent&&(this.source.push(this.appendToBuffer(this.source.quotedString(this.pendingContent),this.pendingLocation)),this.pendingContent=void 0),a&&this.source.push(a)},replaceStack:function(a){var b=["("],c=void 0,e=void 0,f=void 0;if(!this.isInline())throw new k["default"]("replaceStack on non-inline");var g=this.popStack(!0);if(g instanceof d)c=[g.value],b=["(",c],f=!0;else{e=!0;var h=this.incrStack();b=["((",this.push(h)," = ",g,")"],c=this.topStack()}var i=a.call(this,c);f||this.popStack(),e&&this.stackSlot--,this.push(b.concat(i,")"))},incrStack:function(){return this.stackSlot++,this.stackSlot>this.stackVars.length&&this.stackVars.push("stack"+this.stackSlot),this.topStackName()},topStackName:function(){return"stack"+this.stackSlot},flushInline:function(){var a=this.inlineStack;this.inlineStack=[];for(var b=0,c=a.length;b<c;b++){var e=a[b];if(e instanceof d)this.compileStack.push(e);else{var f=this.incrStack();this.pushSource([f," = ",e,";"]),this.compileStack.push(f)}}},isInline:function(){return this.inlineStack.length},popStack:function(a){var b=this.isInline(),c=(b?this.inlineStack:this.compileStack).pop();if(!a&&c instanceof d)return c.value;if(!b){if(!this.stackSlot)throw new k["default"]("Invalid stack pop");this.stackSlot--}return c},topStack:function(){var a=this.isInline()?this.inlineStack:this.compileStack,b=a[a.length-1];return b instanceof d?b.value:b},contextName:function(a){return this.useDepths&&a?"depths["+a+"]":"depth"+a},quotedString:function(a){return this.source.quotedString(a)},objectLiteral:function(a){return this.source.objectLiteral(a)},aliasable:function(a){var b=this.aliases[a];return b?(b.referenceCount++,b):(b=this.aliases[a]=this.source.wrap(a),b.aliasable=!0,b.referenceCount=1,b)},setupHelper:function(a,b,c){var d=[],e=this.setupHelperArgs(b,a,d,c),f=this.nameLookup("helpers",b,"helper"),g=this.aliasable(this.contextName(0)+" != null ? "+this.contextName(0)+" : (container.nullContext || {})");return{params:d,paramsInit:e,name:f,callParams:[g].concat(d)}},setupParams:function(a,b,c){var d={},e=[],f=[],g=[],h=!c,i=void 0;h&&(c=[]),d.name=this.quotedString(a),d.hash=this.popStack(),this.trackIds&&(d.hashIds=this.popStack()),this.stringParams&&(d.hashTypes=this.popStack(),d.hashContexts=this.popStack());var j=this.popStack(),k=this.popStack();(k||j)&&(d.fn=k||"container.noop",d.inverse=j||"container.noop");for(var l=b;l--;)i=this.popStack(),c[l]=i,this.trackIds&&(g[l]=this.popStack()),this.stringParams&&(f[l]=this.popStack(),e[l]=this.popStack());return h&&(d.args=this.source.generateArray(c)),this.trackIds&&(d.ids=this.source.generateArray(g)),this.stringParams&&(d.types=this.source.generateArray(f),d.contexts=this.source.generateArray(e)),this.options.data&&(d.data="data"),this.useBlockParams&&(d.blockParams="blockParams"),d},setupHelperArgs:function(a,b,c,d){var e=this.setupParams(a,b,c);return e.loc=JSON.stringify(this.source.currentLocation),e=this.objectLiteral(e),d?(this.useRegister("options"),c.push("options"),["options=",e]):c?(c.push(e),""):e}},function(){for(var a="break else new var case finally return void catch for switch while continue function this with default if throw delete in try do instanceof typeof abstract enum int short boolean export interface static byte extends long super char final native synchronized class float package throws const goto private transient debugger implements protected volatile double import public let yield await null true false".split(" "),b=e.RESERVED_WORDS={},c=0,d=a.length;c<d;c++)b[a[c]]=!0}(),e.isValidJavaScriptVariableName=function(a){return!e.RESERVED_WORDS[a]&&/^[a-zA-Z_$][0-9a-zA-Z_$]*$/.test(a)},b["default"]=e,a.exports=b["default"]},function(a,b,c){"use strict";function d(a,b,c){if(g.isArray(a)){for(var d=[],e=0,f=a.length;e<f;e++)d.push(b.wrap(a[e],c));return d}return"boolean"==typeof a||"number"==typeof a?a+"":a}function e(a){this.srcFile=a,this.source=[]}var f=c(13)["default"];b.__esModule=!0;var g=c(5),h=void 0;try{}catch(i){}h||(h=function(a,b,c,d){this.src="",d&&this.add(d)},h.prototype={add:function(a){g.isArray(a)&&(a=a.join("")),this.src+=a},prepend:function(a){g.isArray(a)&&(a=a.join("")),this.src=a+this.src},toStringWithSourceMap:function(){return{code:this.toString()}},toString:function(){return this.src}}),e.prototype={isEmpty:function(){return!this.source.length},prepend:function(a,b){this.source.unshift(this.wrap(a,b))},push:function(a,b){this.source.push(this.wrap(a,b))},merge:function(){var a=this.empty();return this.each(function(b){a.add([" ",b,"\n"])}),a},each:function(a){for(var b=0,c=this.source.length;b<c;b++)a(this.source[b])},empty:function(){var a=this.currentLocation||{start:{}};return new h(a.start.line,a.start.column,this.srcFile)},wrap:function(a){var b=arguments.length<=1||void 0===arguments[1]?this.currentLocation||{start:{}}:arguments[1];return a instanceof h?a:(a=d(a,this,b),new h(b.start.line,b.start.column,this.srcFile,a))},functionCall:function(a,b,c){return c=this.generateList(c),this.wrap([a,b?"."+b+"(":"(",c,")"])},quotedString:function(a){return'"'+(a+"").replace(/\\/g,"\\\\").replace(/"/g,'\\"').replace(/\n/g,"\\n").replace(/\r/g,"\\r").replace(/\u2028/g,"\\u2028").replace(/\u2029/g,"\\u2029")+'"'},objectLiteral:function(a){var b=this,c=[];f(a).forEach(function(e){var f=d(a[e],b);"undefined"!==f&&c.push([b.quotedString(e),":",f])});var e=this.generateList(c);return e.prepend("{"),e.add("}"),e},generateList:function(a){for(var b=this.empty(),c=0,e=a.length;c<e;c++)c&&b.add(","),b.add(d(a[c],this));return b},generateArray:function(a){var b=this.generateList(a);return b.prepend("["),b.add("]"),b}},b["default"]=e,a.exports=b["default"]}])});
|
PypiClean
|
/tencentcloud-sdk-python-privatedns-3.0.973.tar.gz/tencentcloud-sdk-python-privatedns-3.0.973/tencentcloud/privatedns/v20201028/privatedns_client.py
|
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.privatedns.v20201028 import models
class PrivatednsClient(AbstractClient):
_apiVersion = '2020-10-28'
_endpoint = 'privatedns.tencentcloudapi.com'
_service = 'privatedns'
def CreatePrivateDNSAccount(self, request):
"""创建私有域解析账号
:param request: Request instance for CreatePrivateDNSAccount.
:type request: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateDNSAccountRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateDNSAccountResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("CreatePrivateDNSAccount", params, headers=headers)
response = json.loads(body)
model = models.CreatePrivateDNSAccountResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def CreatePrivateZone(self, request):
"""创建私有域
:param request: Request instance for CreatePrivateZone.
:type request: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateZoneRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateZoneResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("CreatePrivateZone", params, headers=headers)
response = json.loads(body)
model = models.CreatePrivateZoneResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def CreatePrivateZoneRecord(self, request):
"""添加私有域解析记录
:param request: Request instance for CreatePrivateZoneRecord.
:type request: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateZoneRecordRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateZoneRecordResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("CreatePrivateZoneRecord", params, headers=headers)
response = json.loads(body)
model = models.CreatePrivateZoneRecordResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DeleteEndPoint(self, request):
"""删除终端节点
:param request: Request instance for DeleteEndPoint.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DeleteEndPointRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DeleteEndPointResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteEndPoint", params, headers=headers)
response = json.loads(body)
model = models.DeleteEndPointResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DeletePrivateDNSAccount(self, request):
"""删除私有域解析账号
:param request: Request instance for DeletePrivateDNSAccount.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateDNSAccountRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateDNSAccountResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DeletePrivateDNSAccount", params, headers=headers)
response = json.loads(body)
model = models.DeletePrivateDNSAccountResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DeletePrivateZone(self, request):
"""删除私有域并停止解析
:param request: Request instance for DeletePrivateZone.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateZoneRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateZoneResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DeletePrivateZone", params, headers=headers)
response = json.loads(body)
model = models.DeletePrivateZoneResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DeletePrivateZoneRecord(self, request):
"""删除私有域解析记录
:param request: Request instance for DeletePrivateZoneRecord.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateZoneRecordRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateZoneRecordResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DeletePrivateZoneRecord", params, headers=headers)
response = json.loads(body)
model = models.DeletePrivateZoneRecordResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribeAccountVpcList(self, request):
"""获取私有域解析账号的VPC列表
:param request: Request instance for DescribeAccountVpcList.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeAccountVpcListRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeAccountVpcListResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeAccountVpcList", params, headers=headers)
response = json.loads(body)
model = models.DescribeAccountVpcListResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribeAuditLog(self, request):
"""获取操作日志列表
:param request: Request instance for DescribeAuditLog.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeAuditLogRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeAuditLogResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeAuditLog", params, headers=headers)
response = json.loads(body)
model = models.DescribeAuditLogResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribeDashboard(self, request):
"""获取私有域解析概览
:param request: Request instance for DescribeDashboard.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeDashboardRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeDashboardResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeDashboard", params, headers=headers)
response = json.loads(body)
model = models.DescribeDashboardResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribePrivateDNSAccountList(self, request):
"""获取私有域解析账号列表
:param request: Request instance for DescribePrivateDNSAccountList.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateDNSAccountListRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateDNSAccountListResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribePrivateDNSAccountList", params, headers=headers)
response = json.loads(body)
model = models.DescribePrivateDNSAccountListResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribePrivateZone(self, request):
"""获取私有域信息
:param request: Request instance for DescribePrivateZone.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribePrivateZone", params, headers=headers)
response = json.loads(body)
model = models.DescribePrivateZoneResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribePrivateZoneList(self, request):
"""获取私有域列表
:param request: Request instance for DescribePrivateZoneList.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneListRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneListResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribePrivateZoneList", params, headers=headers)
response = json.loads(body)
model = models.DescribePrivateZoneListResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribePrivateZoneRecordList(self, request):
"""获取私有域记录列表
:param request: Request instance for DescribePrivateZoneRecordList.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneRecordListRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneRecordListResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribePrivateZoneRecordList", params, headers=headers)
response = json.loads(body)
model = models.DescribePrivateZoneRecordListResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribePrivateZoneService(self, request):
"""查询私有域解析开通状态
:param request: Request instance for DescribePrivateZoneService.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneServiceRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneServiceResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribePrivateZoneService", params, headers=headers)
response = json.loads(body)
model = models.DescribePrivateZoneServiceResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribeQuotaUsage(self, request):
"""查询额度使用情况
:param request: Request instance for DescribeQuotaUsage.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeQuotaUsageRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeQuotaUsageResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeQuotaUsage", params, headers=headers)
response = json.loads(body)
model = models.DescribeQuotaUsageResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def DescribeRequestData(self, request):
"""获取私有域解析请求量
:param request: Request instance for DescribeRequestData.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeRequestDataRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeRequestDataResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeRequestData", params, headers=headers)
response = json.loads(body)
model = models.DescribeRequestDataResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def ModifyPrivateZone(self, request):
"""修改私有域信息
:param request: Request instance for ModifyPrivateZone.
:type request: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("ModifyPrivateZone", params, headers=headers)
response = json.loads(body)
model = models.ModifyPrivateZoneResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def ModifyPrivateZoneRecord(self, request):
"""修改私有域解析记录
:param request: Request instance for ModifyPrivateZoneRecord.
:type request: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneRecordRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneRecordResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("ModifyPrivateZoneRecord", params, headers=headers)
response = json.loads(body)
model = models.ModifyPrivateZoneRecordResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def ModifyPrivateZoneVpc(self, request):
"""修改私有域关联的VPC
:param request: Request instance for ModifyPrivateZoneVpc.
:type request: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneVpcRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneVpcResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("ModifyPrivateZoneVpc", params, headers=headers)
response = json.loads(body)
model = models.ModifyPrivateZoneVpcResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def ModifyRecordsStatus(self, request):
"""修改解析记录状态
:param request: Request instance for ModifyRecordsStatus.
:type request: :class:`tencentcloud.privatedns.v20201028.models.ModifyRecordsStatusRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.ModifyRecordsStatusResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("ModifyRecordsStatus", params, headers=headers)
response = json.loads(body)
model = models.ModifyRecordsStatusResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
def SubscribePrivateZoneService(self, request):
"""开通私有域解析
:param request: Request instance for SubscribePrivateZoneService.
:type request: :class:`tencentcloud.privatedns.v20201028.models.SubscribePrivateZoneServiceRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.SubscribePrivateZoneServiceResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("SubscribePrivateZoneService", params, headers=headers)
response = json.loads(body)
model = models.SubscribePrivateZoneServiceResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
PypiClean
|
/movekit-0.1.25.tar.gz/movekit-0.1.25/examples/06_clustering.ipynb
|
## Spatio-Temporal Clustering
```
import movekit as mkit
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import animation
# Enter path to CSV file
path = "./datasets/fish-6.csv"
data = mkit.read_data(path)
data.head()
```
`Movekit` allows the use of many different clustering algorithms for spatio-temporal clustering: dbscan, hdbscan, agglomerative, kmeans, optics, spectral, affinitypropagation, birch.
```
#normalize the data first
data_norm = mkit.normalize(data)
#then cluster it
labels = mkit.clustering('dbscan', data_norm, eps1=0.05, eps2=10, min_samples=2)
#OR cluster with the splitting-and-merging method (data is partitioned into frames and merged afterwards).
#labels = mkit.clustering_with_splits('dbscan', data, frame_size=20, eps1=0.05, eps2=10, min_samples=3)
labels
def plot(data, labels):
colors = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a']
for i in range(-1, len(set(labels))):
if i == -1:
col = [0, 0, 0, 1]
else:
col = colors[i % len(colors)]
clust = data[np.where(labels==i)]
plt.scatter(clust[:,0], clust[:,1], c=[col], s=1)
plt.show()
return None
data_np = data_norm.loc[:, ['time','x','y']].values
plot(data_np[:,1:], labels)
```
|
PypiClean
|
/pypcd3-0.1.1.tar.gz/pypcd3-0.1.1/pypcd/pypcd.py
|
import re
import struct
import copy
from io import StringIO as sio
import numpy as np
import warnings
import lzf
HAS_SENSOR_MSGS = True
try:
from sensor_msgs.msg import PointField
import numpy_pc2 # needs sensor_msgs
except ImportError:
HAS_SENSOR_MSGS = False
__all__ = ['PointCloud',
'point_cloud_to_path',
'point_cloud_to_buffer',
'point_cloud_to_fileobj',
'point_cloud_from_path',
'point_cloud_from_buffer',
'point_cloud_from_fileobj',
'make_xyz_point_cloud',
'make_xyz_rgb_point_cloud',
'make_xyz_label_point_cloud',
'save_txt',
'cat_point_clouds',
'add_fields',
'update_field',
'build_ascii_fmtstr',
'encode_rgb_for_pcl',
'decode_rgb_from_pcl',
'save_point_cloud',
'save_point_cloud_bin',
'save_point_cloud_bin_compressed',
'pcd_type_to_numpy_type',
'numpy_type_to_pcd_type',
]
if HAS_SENSOR_MSGS:
pc2_pcd_type_mappings = [(PointField.INT8, ('I', 1)),
(PointField.UINT8, ('U', 1)),
(PointField.INT16, ('I', 2)),
(PointField.UINT16, ('U', 2)),
(PointField.INT32, ('I', 4)),
(PointField.UINT32, ('U', 4)),
(PointField.FLOAT32, ('F', 4)),
(PointField.FLOAT64, ('F', 8))]
pc2_type_to_pcd_type = dict(pc2_pcd_type_mappings)
pcd_type_to_pc2_type = dict((q, p) for (p, q) in pc2_pcd_type_mappings)
__all__.extend(['pcd_type_to_pc2_type', 'pc2_type_to_pcd_type'])
numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),
(np.dtype('float64'), ('F', 8)),
(np.dtype('uint8'), ('U', 1)),
(np.dtype('uint16'), ('U', 2)),
(np.dtype('uint32'), ('U', 4)),
(np.dtype('uint64'), ('U', 8)),
(np.dtype('int16'), ('I', 2)),
(np.dtype('int32'), ('I', 4)),
(np.dtype('int64'), ('I', 8))]
numpy_type_to_pcd_type = dict(numpy_pcd_type_mappings)
pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)
def parse_header(lines):
metadata = {}
for ln in lines:
if ln.startswith('#') or len(ln) < 2:
continue
match = re.match('(\w+)\s+([\w\s\.]+)', ln)
if not match:
warnings.warn("warning: can't understand line: %s" % ln)
continue
key, value = match.group(1).lower(), match.group(2)
if key == 'version':
metadata[key] = value
elif key in ('fields', 'type'):
metadata[key] = value.split()
elif key in ('size', 'count'):
metadata[key] = list(map(int, value.split()))
elif key in ('width', 'height', 'points'):
metadata[key] = int(value)
elif key == 'viewpoint':
metadata[key] = list(map(float, value.split()))
elif key == 'data':
metadata[key] = value.strip().lower()
# TODO apparently count is not required?
# add some reasonable defaults
if 'count' not in metadata:
metadata['count'] = [1]*len(metadata['fields'])
if 'viewpoint' not in metadata:
metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
if 'version' not in metadata:
metadata['version'] = '.7'
return metadata
def write_header(metadata, rename_padding=False):
""" given metadata as dictionary return a string header.
"""
template = """\
VERSION {version}
FIELDS {fields}
SIZE {size}
TYPE {type}
COUNT {count}
WIDTH {width}
HEIGHT {height}
VIEWPOINT {viewpoint}
POINTS {points}
DATA {data}
"""
str_metadata = metadata.copy()
if not rename_padding:
str_metadata['fields'] = ' '.join(metadata['fields'])
else:
new_fields = []
for f in metadata['fields']:
if f == '_':
new_fields.append('padding')
else:
new_fields.append(f)
str_metadata['fields'] = ' '.join(new_fields)
str_metadata['size'] = ' '.join(map(str, metadata['size']))
str_metadata['type'] = ' '.join(metadata['type'])
str_metadata['count'] = ' '.join(map(str, metadata['count']))
str_metadata['width'] = str(metadata['width'])
str_metadata['height'] = str(metadata['height'])
str_metadata['viewpoint'] = ' '.join(map(str, metadata['viewpoint']))
str_metadata['points'] = str(metadata['points'])
tmpl = template.format(**str_metadata)
return tmpl
def _metadata_is_consistent(metadata):
""" sanity check for metadata. just some basic checks.
"""
checks = []
required = ('version', 'fields', 'size', 'width', 'height', 'points',
'viewpoint', 'data')
for f in required:
if f not in metadata:
print('%s required' % f)
checks.append((lambda m: all([k in m for k in required]),
'missing field'))
checks.append((lambda m: len(m['type']) == len(m['count']) ==
len(m['fields']),
'length of type, count and fields must be equal'))
checks.append((lambda m: m['height'] > 0,
'height must be greater than 0'))
checks.append((lambda m: m['width'] > 0,
'width must be greater than 0'))
checks.append((lambda m: m['points'] > 0,
'points must be greater than 0'))
checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',
'binary_compressed'),
'unknown data type:'
'should be ascii/binary/binary_compressed'))
ok = True
for check, msg in checks:
if not check(metadata):
print('error:', msg)
ok = False
return ok
# def pcd_type_to_numpy(pcd_type, pcd_sz):
# """ convert from a pcd type string and size to numpy dtype."""
# typedict = {'F' : { 4:np.float32, 8:np.float64 },
# 'I' : { 1:np.int8, 2:np.int16, 4:np.int32, 8:np.int64 },
# 'U' : { 1:np.uint8, 2:np.uint16, 4:np.uint32 , 8:np.uint64 }}
# return typedict[pcd_type][pcd_sz]
def _build_dtype(metadata):
""" build numpy structured array dtype from pcl metadata.
note that fields with count > 1 are 'flattened' by creating multiple
single-count fields.
TODO: allow 'proper' multi-count fields.
"""
fieldnames = []
typenames = []
for f, c, t, s in zip(metadata['fields'],
metadata['count'],
metadata['type'],
metadata['size']):
np_type = pcd_type_to_numpy_type[(t, s)]
if c == 1:
fieldnames.append(f)
typenames.append(np_type)
else:
fieldnames.extend(['%s_%04d' % (f, i) for i in range(c)])
typenames.extend([np_type]*c)
dtype = np.dtype(list(zip(fieldnames, typenames)))
return dtype
def build_ascii_fmtstr(pc):
""" make a format string for printing to ascii, using fields
%.8f minimum for rgb
%.10f for more general use?
"""
fmtstr = []
for t, cnt in zip(pc.type, pc.count):
if t == 'F':
fmtstr.extend(['%.10f']*cnt)
elif t == 'I':
fmtstr.extend(['%d']*cnt)
elif t == 'U':
fmtstr.extend(['%u']*cnt)
else:
raise ValueError("don't know about type %s" % t)
return fmtstr
def parse_ascii_pc_data(f, dtype, metadata):
return np.loadtxt(f, dtype=dtype, delimiter=' ')
def parse_binary_pc_data(f, dtype, metadata):
rowstep = metadata['points']*dtype.itemsize
# for some reason pcl adds empty space at the end of files
buf = f.read(rowstep)
return np.fromstring(buf, dtype=dtype)
def parse_binary_compressed_pc_data(f, dtype, metadata):
# compressed size of data (uint32)
# uncompressed size of data (uint32)
# compressed data
# junk
fmt = 'II'
compressed_size, uncompressed_size =\
struct.unpack(fmt, f.read(struct.calcsize(fmt)))
compressed_data = f.read(compressed_size)
# TODO what to use as second argument? if buf is None
# (compressed > uncompressed)
# should we read buf as raw binary?
buf = lzf.decompress(compressed_data, uncompressed_size)
if len(buf) != uncompressed_size:
raise Exception('Error decompressing data')
# the data is stored field-by-field
pc_data = np.zeros(metadata['width'], dtype=dtype)
ix = 0
for dti in range(len(dtype)):
dt = dtype[dti]
bytes = dt.itemsize * metadata['width']
column = np.fromstring(buf[ix:(ix+bytes)], dt)
pc_data[dtype.names[dti]] = column
ix += bytes
return pc_data
def point_cloud_from_fileobj(f):
""" parse pointcloud coming from file object f
"""
header = []
while True:
ln = f.readline().strip()
if not isinstance(ln, str):
ln = ln.decode('utf-8')
header.append(ln)
if ln.startswith('DATA'):
metadata = parse_header(header)
dtype = _build_dtype(metadata)
break
if metadata['data'] == 'ascii':
pc_data = parse_ascii_pc_data(f, dtype, metadata)
elif metadata['data'] == 'binary':
pc_data = parse_binary_pc_data(f, dtype, metadata)
elif metadata['data'] == 'binary_compressed':
pc_data = parse_binary_compressed_pc_data(f, dtype, metadata)
else:
print('DATA field is neither "ascii" or "binary" or\
"binary_compressed"')
return PointCloud(metadata, pc_data)
def point_cloud_from_path(fname):
""" load point cloud in binary format
"""
with open(fname, 'rb') as f:
pc = point_cloud_from_fileobj(f)
return pc
def point_cloud_from_buffer(buf):
fileobj = sio.StringIO(buf)
pc = point_cloud_from_fileobj(fileobj)
fileobj.close() # necessary?
return pc
def point_cloud_to_fileobj(pc, fileobj, data_compression=None):
""" write pointcloud as .pcd to fileobj.
if data_compression is not None it overrides pc.data.
"""
metadata = pc.get_metadata()
if data_compression is not None:
data_compression = data_compression.lower()
assert(data_compression in ('ascii', 'binary', 'binary_compressed'))
metadata['data'] = data_compression
header = write_header(metadata).encode('utf-8')
fileobj.write(header)
if metadata['data'].lower() == 'ascii':
fmtstr = build_ascii_fmtstr(pc)
np.savetxt(fileobj, pc.pc_data, fmt=fmtstr)
elif metadata['data'].lower() == 'binary':
fileobj.write(pc.pc_data.tostring())
elif metadata['data'].lower() == 'binary_compressed':
# TODO
# a '_' field is ignored by pcl and breakes compressed point clouds.
# changing '_' to '_padding' or other name fixes this.
# admittedly padding shouldn't be compressed in the first place
# reorder to column-by-column
uncompressed_lst = []
for fieldname in pc.pc_data.dtype.names:
column = np.ascontiguousarray(pc.pc_data[fieldname]).tostring()
uncompressed_lst.append(column)
uncompressed = b''.join(uncompressed_lst)
uncompressed_size = len(uncompressed)
# print("uncompressed_size = %r"%(uncompressed_size))
buf = lzf.compress(uncompressed)
if buf is None:
# compression didn't shrink the file
# TODO what do to do in this case when reading?
buf = uncompressed
compressed_size = uncompressed_size
else:
compressed_size = len(buf)
fmt = 'II'
fileobj.write(struct.pack(fmt, compressed_size, uncompressed_size))
fileobj.write(buf)
else:
raise ValueError('unknown DATA type')
# we can't close because if it's stringio buf then we can't get value after
def point_cloud_to_path(pc, fname):
with open(fname, 'w') as f:
point_cloud_to_fileobj(pc, f)
def point_cloud_to_buffer(pc, data_compression=None):
fileobj = sio.StringIO()
point_cloud_to_fileobj(pc, fileobj, data_compression)
return fileobj.getvalue()
def save_point_cloud(pc, fname):
""" save pointcloud to fname in ascii format
"""
with open(fname, 'wb') as f:
point_cloud_to_fileobj(pc, f, 'ascii')
def save_point_cloud_bin(pc, fname):
""" save pointcloud to fname in binary format
"""
with open(fname, 'wb') as f:
point_cloud_to_fileobj(pc, f, 'binary')
def save_point_cloud_bin_compressed(pc, fname):
with open(fname, 'wb') as f:
point_cloud_to_fileobj(pc, f, 'binary_compressed')
def save_xyz_label(pc, fname, use_default_lbl=False):
""" save a simple (x y z label) pointcloud, ignoring all other features.
label is initialized to 1000, for jptview.
"""
md = pc.get_metadata()
if not use_default_lbl and ('label' not in md['fields']):
raise Exception('label is not a field in this point cloud')
with open(fname, 'w') as f:
for i in xrange(pc.points):
x, y, z = ['%.4f' % d for d in (
pc.pc_data['x'][i], pc.pc_data['y'][i], pc.pc_data['z'][i]
)]
lbl = '1000' if use_default_lbl else pc.pc_data['label'][i]
f.write(' '.join((x, y, z, lbl))+'\n')
def save_xyz_intensity_label(pc, fname, use_default_lbl=False):
md = pc.get_metadata()
if not use_default_lbl and ('label' not in md['fields']):
raise Exception('label is not a field in this point cloud')
if 'intensity' not in md['fields']:
raise Exception('intensity is not a field in this point cloud')
with open(fname, 'w') as f:
for i in xrange(pc.points):
x, y, z = ['%.4f' % d for d in (
pc.pc_data['x'][i], pc.pc_data['y'][i], pc.pc_data['z'][i]
)]
intensity = '%.4f' % pc.pc_data['intensity'][i]
lbl = '1000' if use_default_lbl else pc.pc_data['label'][i]
f.write(' '.join((x, y, z, intensity, lbl))+'\n')
def save_txt(pc, fname, header=True):
""" TODO support multi-count fields
"""
with open(fname, 'w') as f:
if header:
header_lst = []
for field_name, cnt in zip(pc.fields, pc.count):
if cnt == 1:
header_lst.append(field_name)
else:
for c in xrange(cnt):
header_lst.append('%s_%04d' % (field_name, c))
f.write(' '.join(header_lst)+'\n')
fmtstr = build_ascii_fmtstr(pc)
np.savetxt(f, pc.pc_data, fmt=fmtstr)
def update_field(pc, field, pc_data):
""" updates field in-place.
"""
pc.pc_data[field] = pc_data
return pc
def add_fields(pc, metadata, pc_data):
""" builds copy of pointcloud with extra fields
multi-count fields are sketchy
"""
if len(set(metadata['fields']).intersection(set(pc.fields))) > 0:
raise Exception("Fields with that name exist.")
if pc.points != len(pc_data):
raise Exception("Mismatch in number of points.")
new_metadata = pc.get_metadata()
new_metadata['fields'].extend(metadata['fields'])
new_metadata['count'].extend(metadata['count'])
new_metadata['size'].extend(metadata['size'])
new_metadata['type'].extend(metadata['type'])
# parse metadata to add
# TODO factor this
fieldnames, typenames = [], []
for f, c, t, s in zip(metadata['fields'],
metadata['count'],
metadata['type'],
metadata['size']):
np_type = pcd_type_to_numpy_type[(t, s)]
if c == 1:
fieldnames.append(f)
typenames.append(np_type)
else:
fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])
typenames.extend([np_type]*c)
dtype = list(zip(fieldnames, typenames))
# new dtype. could be inferred?
new_dtype = [(f, pc.pc_data.dtype[f])
for f in pc.pc_data.dtype.names] + dtype
new_data = np.empty(len(pc.pc_data), new_dtype)
for n in pc.pc_data.dtype.names:
new_data[n] = pc.pc_data[n]
for n, n_tmp in zip(fieldnames, pc_data.dtype.names):
new_data[n] = pc_data[n_tmp]
# TODO maybe just all the metadata in the dtype.
# TODO maybe use composite structured arrays for fields with count > 1
newpc = PointCloud(new_metadata, new_data)
return newpc
def cat_point_clouds(pc1, pc2):
if len(pc1.fields) != len(pc2.fields):
raise ValueError("Pointclouds must have same fields")
new_metadata = pc1.get_metadata()
new_data = np.concatenate((pc1.pc_data, pc2.pc_data))
# TODO this only makes sense for unstructured pc?
new_metadata['width'] = pc1.width+pc2.width
new_metadata['points'] = pc1.points+pc2.points
pc3 = PointCloud(new_metadata, new_data)
return pc3
def make_xyz_point_cloud(xyz, metadata=None):
""" Make a pointcloud object from xyz array.
xyz array is cast to float32.
"""
md = {'version': .7,
'fields': ['x', 'y', 'z'],
'size': [4, 4, 4],
'type': ['F', 'F', 'F'],
'count': [1, 1, 1],
'width': len(xyz),
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': len(xyz),
'data': 'binary'}
if metadata is not None:
md.update(metadata)
xyz = xyz.astype(np.float32)
pc_data = xyz.view(np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32)]))
# pc_data = np.rec.fromarrays([xyz[:,0], xyz[:,1], xyz[:,2]], dtype=dt)
# data = np.rec.fromarrays([xyz.T], dtype=dt)
pc = PointCloud(md, pc_data)
return pc
def make_xyz_rgb_point_cloud(xyz_rgb, metadata=None):
""" Make a pointcloud object from xyz array.
xyz array is assumed to be float32.
rgb is assumed to be encoded as float32 according to pcl conventions.
"""
md = {'version': .7,
'fields': ['x', 'y', 'z', 'rgb'],
'count': [1, 1, 1, 1],
'width': len(xyz_rgb),
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': len(xyz_rgb),
'type': ['F', 'F', 'F', 'F'],
'size': [4, 4, 4, 4],
'data': 'binary'}
if xyz_rgb.dtype != np.float32:
raise ValueError('array must be float32')
if metadata is not None:
md.update(metadata)
pc_data = xyz_rgb.view(np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('rgb', np.float32)])).squeeze()
# pc_data = np.rec.fromarrays([xyz[:,0], xyz[:,1], xyz[:,2]], dtype=dt)
# data = np.rec.fromarrays([xyz.T], dtype=dt)
pc = PointCloud(md, pc_data)
return pc
def encode_rgb_for_pcl(rgb):
""" Input is Nx3 uint8 array with RGB values.
Output is Nx1 float32 array with bit-packed RGB, for PCL.
"""
assert(rgb.dtype == np.uint8)
assert(rgb.ndim == 2)
assert(rgb.shape[1] == 3)
rgb = rgb.astype(np.uint32)
rgb = np.array((rgb[:, 0] << 16) | (rgb[:, 1] << 8) | (rgb[:, 2] << 0),
dtype=np.uint32)
rgb.dtype = np.float32
return rgb
def decode_rgb_from_pcl(rgb):
rgb = rgb.copy()
rgb.dtype = np.uint32
r = np.asarray((rgb >> 16) & 255, dtype=np.uint8)
g = np.asarray((rgb >> 8) & 255, dtype=np.uint8)
b = np.asarray(rgb & 255, dtype=np.uint8)
rgb_arr = np.zeros((len(rgb), 3), dtype=np.uint8)
rgb_arr[:, 0] = r
rgb_arr[:, 1] = g
rgb_arr[:, 2] = b
return rgb_arr
def make_xyz_label_point_cloud(xyzl, label_type='f'):
""" TODO i labels? """
md = {'version': .7,
'fields': ['x', 'y', 'z', 'label'],
'count': [1, 1, 1, 1],
'width': len(xyzl),
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': len(xyzl),
'data': 'ASCII'}
if label_type.lower() == 'f':
md['size'] = [4, 4, 4, 4]
md['type'] = ['F', 'F', 'F', 'F']
elif label_type.lower() == 'u':
md['size'] = [4, 4, 4, 1]
md['type'] = ['F', 'F', 'F', 'U']
else:
raise ValueError('label type must be F or U')
# TODO use .view()
xyzl = xyzl.astype(np.float32)
dt = np.dtype([('x', np.float32), ('y', np.float32), ('z', np.float32),
('label', np.float32)])
pc_data = np.rec.fromarrays([xyzl[:, 0], xyzl[:, 1], xyzl[:, 2],
xyzl[:, 3]], dtype=dt)
pc = PointCloud(md, pc_data)
return pc
class PointCloud(object):
def __init__(self, metadata, pc_data):
self.metadata_keys = metadata.keys()
self.__dict__.update(metadata)
self.pc_data = pc_data
self.check_sanity()
def get_metadata(self):
""" returns copy of metadata """
metadata = {}
for k in self.metadata_keys:
metadata[k] = copy.copy(getattr(self, k))
return metadata
def check_sanity(self):
# pdb.set_trace()
md = self.get_metadata()
assert(_metadata_is_consistent(md))
assert(len(self.pc_data) == self.points)
assert(self.width*self.height == self.points)
assert(len(self.fields) == len(self.count))
assert(len(self.fields) == len(self.type))
def save(self, fname):
self.save_pcd(fname, 'ascii')
def save_pcd(self, fname, compression=None, **kwargs):
if 'data_compression' in kwargs:
warnings.warn('data_compression keyword is deprecated for'
' compression')
compression = kwargs['data_compression']
with open(fname, 'wb') as f:
point_cloud_to_fileobj(self, f, compression)
def save_pcd_to_fileobj(self, fileobj, compression=None, **kwargs):
if 'data_compression' in kwargs:
warnings.warn('data_compression keyword is deprecated for'
' compression')
compression = kwargs['data_compression']
point_cloud_to_fileobj(self, fileobj, compression)
def save_pcd_to_buffer(self, compression=None, **kwargs):
if 'data_compression' in kwargs:
warnings.warn('data_compression keyword is deprecated for'
' compression')
compression = kwargs['data_compression']
return point_cloud_to_buffer(self, compression)
def save_txt(self, fname):
save_txt(self, fname)
def save_xyz_label(self, fname, **kwargs):
save_xyz_label(self, fname, **kwargs)
def save_xyz_intensity_label(self, fname, **kwargs):
save_xyz_intensity_label(self, fname, **kwargs)
def copy(self):
new_pc_data = np.copy(self.pc_data)
new_metadata = self.get_metadata()
return PointCloud(new_metadata, new_pc_data)
def to_msg(self):
if not HAS_SENSOR_MSGS:
raise NotImplementedError('ROS sensor_msgs not found')
# TODO is there some metadata we want to attach?
return numpy_pc2.array_to_pointcloud2(self.pc_data)
@staticmethod
def from_path(fname):
return point_cloud_from_path(fname)
@staticmethod
def from_fileobj(fileobj):
return point_cloud_from_fileobj(fileobj)
@staticmethod
def from_buffer(buf):
return point_cloud_from_buffer(buf)
@staticmethod
def from_array(arr):
""" create a PointCloud object from an array.
"""
pc_data = arr.copy()
md = {'version': .7,
'fields': [],
'size': [],
'count': [],
'width': 0,
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': 0,
'type': [],
'data': 'binary_compressed'}
md['fields'] = pc_data.dtype.names
for field in md['fields']:
type_, size_ =\
numpy_type_to_pcd_type[pc_data.dtype.fields[field][0]]
md['type'].append(type_)
md['size'].append(size_)
# TODO handle multicount
md['count'].append(1)
md['width'] = len(pc_data)
md['points'] = len(pc_data)
pc = PointCloud(md, pc_data)
return pc
@staticmethod
def from_msg(msg, squeeze=True):
""" from pointcloud2 msg
squeeze: fix when clouds get 1 as first dim
"""
if not HAS_SENSOR_MSGS:
raise NotImplementedError('ROS sensor_msgs not found')
md = {'version': .7,
'fields': [],
'size': [],
'count': [],
'width': 0,
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': 0,
'type': [],
'data': 'binary_compressed'}
for field in msg.fields:
md['fields'].append(field.name)
t, s = pc2_type_to_pcd_type[field.datatype]
md['type'].append(t)
md['size'].append(s)
# TODO handle multicount correctly
if field.count > 1:
warnings.warn('fields with count > 1 are not well tested')
md['count'].append(field.count)
pc_data = np.squeeze(numpy_pc2.pointcloud2_to_array(msg))
md['width'] = len(pc_data)
md['points'] = len(pc_data)
pc = PointCloud(md, pc_data)
return pc
|
PypiClean
|
/aliyun-python-sdk-vpc-3.0.45.tar.gz/aliyun-python-sdk-vpc-3.0.45/aliyunsdkvpc/request/v20160428/CreateVbrHaRequest.py
|
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateVbrHaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateVbrHa','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_VbrId(self): # String
return self.get_query_params().get('VbrId')
def set_VbrId(self, VbrId): # String
self.add_query_param('VbrId', VbrId)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_PeerVbrId(self): # String
return self.get_query_params().get('PeerVbrId')
def set_PeerVbrId(self, PeerVbrId): # String
self.add_query_param('PeerVbrId', PeerVbrId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
|
PypiClean
|
/doc_curation-0.1.16-py3-none-any.whl/doc_curation/md/content_processor/details_helper.py
|
import copy
import logging
import os
import textwrap
import doc_curation.md.content_processor.line_helper
import regex
from bs4 import NavigableString
from doc_curation.md.file import MdFile
from indic_transliteration import sanscript
from doc_curation.md import content_processor
from bs4 import BeautifulSoup, NavigableString
class Detail(object):
def __init__(self, type, content):
self.type = type
self.content = content
def to_md_html(self, attributes_str=None):
if self.type is None:
title = "Misc Detail"
logging.warning(f"Unknown detail type for: {self.content}")
else:
title = self.type
if attributes_str is None:
if self.type in ["विश्वास-प्रस्तुतिः", "मूलम् (वचनम्)"]:
attributes_str = "open"
else:
attributes_str = ""
if attributes_str.strip() != "":
attributes_str = " " + attributes_str
return f"<details{attributes_str}><summary>{title}</summary>\n\n{self.content.strip()}\n</details>"
def to_soup(self):
return BeautifulSoup(self.to_md_html(), 'html.parser')
@classmethod
def from_soup_tag(cls, detail_tag):
title = detail_tag.select_one("summary").text.strip()
detail_text = "".join([x.text for x in list(detail_tag.children)[1:]]).strip()
return Detail(type=title, content=detail_text)
def interleave_from_file(md_file, source_file, dest_pattern="[^\d०-९೦-೯]([\d०-९೦-೯]+) *॥.*(?=\n|$)", source_pattern="(?<=\n|^)([\d०-९೦-೯]+).+\n", detail_title="English", dry_run=False):
(_, dest_content) = md_file.read()
if callable(source_file):
source_file = source_file(md_file.file_path)
if not os.path.exists(source_file):
logging.warning("Source %s does not exist!", source_file)
return
logging.info("Interleaving content from %s into %s", source_file, md_file.file_path)
source_md = MdFile(file_path=source_file)
(_, source_content) = source_md.read()
dest_matches = list(regex.finditer(dest_pattern, dest_content))
source_matches = list(regex.finditer(source_pattern, source_content))
source_match_map = {}
for source_match in source_matches:
index_str = sanscript.transliterate(source_match.group(1), _to=sanscript.IAST)
if index_str.isnumeric():
source_match_map[int(index_str)] = source_match
else:
logging.warning("Could not get index for: %s", source_match.group())
for dest_match in dest_matches:
index_str = sanscript.transliterate(dest_match.group(1), _to=sanscript.IAST)
if not index_str.isnumeric():
logging.warning("Could not get index for: %s", dest_match.group())
continue
index = int(index_str)
if index not in source_match_map:
logging.warning("Could not get index %d in source: %s", index, dest_match.group())
continue
detail_html = textwrap.dedent(
"""
<details><summary>%s</summary>
%s
</details>
"""
) % (detail_title, source_match_map[index].group())
dest_content = dest_content.replace(dest_match.group(), "%s\n%s" % (dest_match.group(), detail_html))
source_content = source_content.replace(source_match_map[index].group(), "")
md_file.replace_content_metadata(new_content=dest_content, dry_run=dry_run)
source_md.replace_content_metadata(new_content=source_content, dry_run=dry_run)
def transform_details_with_soup(content, metadata, transformer, title=None, *args, **kwargs):
# Stray usage of < can fool the soup parser. Hence the below.
if "details" not in content:
return content
soup = content_processor._soup_from_content(content=content, metadata=metadata)
if soup is None:
return content
details = soup.select("body>details")
for detail_tag in details:
detail = Detail.from_soup_tag(detail_tag=detail_tag)
if title is None or title == detail.type:
transformer(detail_tag, *args, **kwargs)
detail_tag.insert_after("\n")
return content_processor._make_content_from_soup(soup=soup)
def extract_details_from_file(md_file):
[metadata, content] = md_file.read()
# Stray usage of < can fool the soup parser. Hence the below.
if "details" not in content:
return []
soup = content_processor._soup_from_content(content=content, metadata=metadata)
details = soup.select("body>details", recursive=False)
return details
def insert_after_detail(content, metadata, title, new_element):
# Stray usage of < can fool the soup parser. Hence the below.
if "details" not in content or title not in content:
return content
soup = content_processor._soup_from_content(content=content, metadata=metadata)
if soup is None:
return content
if isinstance(new_element, str):
new_element = BeautifulSoup(new_element, 'html.parser')
details = soup.select("details")
for detail in details:
if detail.select_one("summary").text.strip() == title:
detail.insert_after("\n")
detail.insert_after(new_element)
detail.insert_after("\n")
detail.insert_after("\n")
return content_processor._make_content_from_soup(soup=soup)
def get_details(content, metadata, title):
# Stray usage of < can fool the soup parser. Hence the below.
if "details" not in content:
return []
soup = content_processor._soup_from_content(content=content, metadata=metadata)
if soup is None:
return []
details = soup.select("details")
result = []
for detail_tag in details:
detail = Detail.from_soup_tag(detail_tag=detail_tag)
if detail.type == title:
result.append((detail_tag, detail))
return result
def get_detail(content, metadata, title):
details = get_details(content=content, metadata=metadata, title=title)
if len(details) == 0:
return (None, None)
else:
return details[0]
def get_detail_content(content, metadata, titles):
result = ""
for title in titles:
details = get_details(content=content, metadata=metadata, title=title)
for detail_tuple in details:
(tag, detail) = detail_tuple
result = f"{result}\n\n{detail.content}"
return result
def rearrange_details(content, metadata, titles, *args, **kwargs):
# UNTESTED
# Stray usage of < can fool the soup parser. Hence the below.
if "details" not in content:
return content
soup = content_processor._soup_from_content(content=content, metadata=metadata)
if soup is None:
return content
details = soup.select("details")
final_details = []
title_to_detail = {detail.select_one("summary").text: detail for detail in details}
for title in titles:
if title in title_to_detail:
final_details.append(copy.copy(title_to_detail[title]))
for index, detail in details.enumerate():
detail.insert_after("\n")
detail.insert_after(final_details[index])
detail.decompose()
return content_processor._make_content_from_soup(soup=soup)
def detail_content_replacer_soup(detail_tag, replacement):
summary = detail_tag.select_one("summary")
detail = Detail.from_soup_tag(detail_tag=detail_tag)
for x in summary.find_next_siblings():
x.extract()
for x in detail_tag.contents:
if isinstance(x, NavigableString):
x.extract()
if callable(replacement):
replacement = replacement(detail.content)
summary.insert_after(f"\n\n{replacement}\n")
def vishvAsa_sanskrit_transformer(detail_tag):
if detail_tag.select_one("summary").text != "विश्वास-प्रस्तुतिः":
return
for x in detail_tag.contents:
if isinstance(x, NavigableString):
x.replace_with(doc_curation.md.content_processor.line_helper.rehyphenate_sanskrit_line_endings(x))
def shlokas_to_muula_viprastuti_details(content, pattern=None):
if "विश्वास-प्रस्तुतिः" in content:
return content
if pattern is None:
from doc_curation.utils import patterns
pattern = patterns.PATTERN_2LINE_SHLOKA
def detail_maker(match):
shloka = match.group()
detail_vishvaasa = Detail(type="विश्वास-प्रस्तुतिः", content=shloka)
detail_muula = Detail(type="मूलम्", content=shloka)
return f"\n{detail_vishvaasa.to_md_html()}\n\n{detail_muula.to_md_html()}"
content = regex.sub(pattern, detail_maker, content)
return content
def wrap_into_detail(content, title):
content_out = content.strip()
if content_out == "":
return content
return Detail(type=title, content=content.strip()).to_md_html()
|
PypiClean
|
/spotify-playlist-exporter-0.0.3.tar.gz/spotify-playlist-exporter-0.0.3/src/spotify_grab_current_playlists/spotify_grab_current_playlists.py
|
import spotipy
from spotipy.oauth2 import SpotifyOAuth
import pandas as pd
import datetime
import time
import argparse
import sys
#from cred import client_id, client_secret, redirect_uri, username
def import_credentials():
try:
from cred_my import client_id, client_secret, redirect_uri, username
except ImportError:
try:
from cred import client_id, client_secret, redirect_uri, username
except ImportError:
print('No credentials file found.')
sys.exit(1)
return client_id, client_secret, redirect_uri, username
def get_playlist_tracks(sp,username,playlist_id):
'''
sp... spotipy API client instance
username... spotify username
playlist_id... playlist id (not name)
'''
playlist = sp.user_playlist(user=username,playlist_id=playlist_id) # dict with keys 'tracks'
track_list = playlist['tracks'] #dict with key 'items', which values are the tracks
tracks_list_items=track_list['items']
while track_list['next']:
print('next page')
track_list=sp.next(track_list) # use the sp (the client) to go to next page, weird however why this needs be done on ['tracks'] level
tracks_list_items.extend(track_list['items'])
# tracks_list_items is a list of dicts. Each dict is a track.
# item in the list has keys: dict_keys(['added_at', 'added_by', 'is_local', 'primary_color', 'track', 'video_thumbnail'])
# item['track'] in the list has keys: dict_keys(['album', 'artists', 'available_markets', 'disc_number', 'duration_ms', 'episode', 'explicit', 'external_ids', 'external_urls', 'href', 'id', 'is_local', 'name', 'popularity', 'preview_url', 'track', 'track_number', 'type', 'uri'])
# item['track']['name'] is the tracks name
# item['track']['artists'] can be a list of dicts of contributors
return tracks_list_items
def get_playlist(sp,username,list_name):
playlists=sp.user_playlists(username)
for playlist in playlists['items']:
if playlist['name']=='Mit Star bewertet':
the_list=playlist
return the_list
def create_api_session(scope,client_id,client_secret, redirect_uri):
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri, scope=scope))
return sp
def export_spotify_playlists(username,sp):
# sp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri, scope=scope))
playlists = sp.user_playlists(username)
# Create a dictionary to store the playlist titles and their tracks
all_playlists = {}
for playlist in playlists["items"]:
if playlist["owner"]["id"] == username:
playlist_title = playlist["name"]
print(playlist_title)
playlist_tracks_short = []
playlist_tracks = get_playlist_tracks(sp=sp,username=username,playlist_id=playlist['id'])
for item in playlist_tracks:
# print('\n')
# print(item)
# print(item['track']['name'])
track_name = item['track']['name']
artist_name = item["track"]["artists"][0]["name"]
playlist_tracks_short.append((track_name,artist_name))
all_playlists[playlist_title] = playlist_tracks_short
# print(all_playlists)
return all_playlists
def playlists_to_dataframe(all_playlists,to_clipboard = False, savefile="playlists.xlsx"):
# Initialize an empty list to store the rows of the DataFrame
rows = []
# Iterate over each playlist in the all_playlists dictionary
for playlist_title, playlist_tracks in all_playlists.items():
# Iterate over each track in the playlist
for track in playlist_tracks:
# Split the track name and artist name using the " - " separator
artist, title = track[0],track[1]
# Append a new row to the list of rows
rows.append({"Playlist": playlist_title, "Artist": artist, "Title": title})
# Convert the list of rows into a pandas DataFrame
df = pd.DataFrame(rows)
if to_clipboard == True:
df.to_clipboard()
if savefile != None:
df.to_excel('./out/'+savefile, index=False)
return df
def recently_played_to_dataframe(all_tracks,to_clipboard = False, savefile="recently_played.xlsx"):
# Initialize an empty list to store the rows of the DataFrame
rows = []
# Iterate over each playlist in the all_playlists dictionary
for track in all_tracks:
# Split the track name and artist name using the " - " separator
artist, title = track[0],track[1]
# Append a new row to the list of rows
rows.append({"Artist": artist, "Title": title})
# Convert the list of rows into a pandas DataFrame
df = pd.DataFrame(rows)
if to_clipboard == True:
df.to_clipboard()
if savefile != None:
df.to_excel('./out/'+savefile, index=False)
return df
def recently_played(sp,timestamp):
'''
Problem: Only tracks that have been played to the end are on this list.
'''
# Get the user's recently played tracks
number_tracks=50
results = sp.current_user_recently_played(limit=number_tracks,after=timestamp)
# print(results.keys())
# print(results['next'])
tracks_list = []
while results['next']:
results=sp.next(results)
print('next page of {}'.format(number_tracks))
for item in results['items']:
track = item['track']
# print(track['name'], '-', track['artists'][0]['name'])
tracks_list.append((track['name'], track['artists'][0]['name']))
return tracks_list
def datestring_to_timestamp(datestring):
# Convert input date to datetime object
dt = datetime.datetime.strptime(datestring, '%Y-%m-%d')
# Convert datetime object to Unix timestamp in milliseconds
unix_timestamp = int(time.mktime(dt.timetuple()))*1000
return unix_timestamp
def now_as_string():
now = datetime.datetime.now()
print('Now is: {}'.format(now.strftime("%y%m%d_%H%M%S")))
return now.strftime("%y%m%d_%H%M%S")
def main(playlists_file='playlists',recently_file='recently_played', date_string='2023-01-1'):
date_timestamp = datestring_to_timestamp(date_string)
now_string = now_as_string()
client_id, client_secret, redirect_uri, username = import_credentials()
# Get and save playlists
scope = "playlist-read-private"
sp = create_api_session(scope=scope,client_id=client_id,client_secret=client_secret,redirect_uri=redirect_uri)
all_playlists = export_spotify_playlists(username,sp)
df_playlists = playlists_to_dataframe(all_playlists,to_clipboard = False, savefile="{}_{}.xlsx".format(now_string,playlists_file))
# Get and save recently played (since date_string in yyyy-mm-dd)
scope = "user-read-recently-played"
sp = create_api_session(scope=scope,client_id=client_id,client_secret=client_secret,redirect_uri=redirect_uri)
recently_list=recently_played(sp,date_timestamp)
df_recently = recently_played_to_dataframe(recently_list,to_clipboard = False, savefile="{}_{}.xlsx".format(now_string,recently_file))
return df_playlists, df_recently
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('--playlistsfile', type=str, default='playlists', help='Description of arg1')
parser.add_argument('--recentlyfile', type=str, default ='recently_played',help='Description of arg2')
parser.add_argument('--datestring', type=str, default='2023-01-01',help='Description of arg2')
args = parser.parse_args()
print(args)
playlist_file=args.playlistsfile
recently_file=args.recentlyfile
date_string=args.datestring
main(playlist_file, recently_file, date_string)
|
PypiClean
|
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/chris/chris『红丸会员』:高阶框架打造03吵架中如何掌控框架.md
|
# chris『红丸会员』:高阶框架打造03吵架中如何掌控框架
真相一旦优越 女无法适合不见,大家好 我是梁叔,今天是高阶框架的第三节课,今天我们主要来给大家讲解一下,两星之间的吵架,今天主要给大家分享的重点是,吵架不可怕,吵架里面没有框架 才可怕。
接下来的话我们会通过一些具体的案例场景,要你了解到高阶框架的具体应用,首先的话 梁叔想抛出一个问题给大家,在我们的生活当中,我们和自己的另一半真草是不是很残阶。
或者能和身边亲的人 朋友 同事 或者各种关系,因为两个不一样的个体,你有你的想法 我有我的考虑,遇到问题触发点和角度不一样,这个也在说 为滅,哪有情侣不吵架的呢,但为什么说有的情侣吵架。
关心反到越来越好,而有的情侣吵完家之后,关系就慢慢凉了,其实好的吵架和坏的吵架还是不一样的,而且吵架里面几乎都包含了框架的问题,今天这节课我们就来说说,吵架当中的正确框架应用。
让你的感情不会因为吵架而三经动骨,好了 我们先来看一段视频,我帮你留点到时候破馆,我们要拆到来边,为什么你带到我不知的,我问你为什么,那你认识了就不知道,你不要搞错,你没追踪我私隐,那你不要做错事。
怕被人决定,你当什么,严谨反了,恐怖认识人,用谓星追踪我,你有什么不可以打给我,那你问什么不听的话,我电话跟他们交换了你自己看清楚,你带我可不可以联系一点,你不见我 你不第一时间跟我说。
错错 你又说多一点,不如你说买条纳哪儿来的,我懂不懂的,你怎么救我味,你求救我味,我跟你说,你相信就相信,不相信就相信,我不相信 怎么,那什么我会说,你整个人都在这种虽态度,你做虽然都觉得人家对你不住。
你现在是否很争我,我问你否很争我,那女人还会见不见婚,见都没见,见不见,你自己说的,你记住自己说过什么,这段视频当中,男女双方因为一些生活当中的,小事误会大吵了一架,根据我们第二课。
学习到的宽架原理来看的话,这里就很明显了,男女双方都属于硬宽架,因为他们都是站在自己的角度,维护自己的利益和感受,所以在这边场景当中,男女双方对方的吸引力,都不会下降,但是可得性都会有不同程度的简低。
而我们都知道,并不是说吸引力会完全阻掌一切,特别是,当我们和另外一半已经跨越了追求,阶段之后,开始慢慢进入到长期交往,甚至婚姻当中,残酒的高吸引力,低可得性,会让光吸走入航冷的兵和司机。
所以我们经常在生活当中,看到很多男的和女双吵完架之后,冷占了两天,然后又去红对方,这其实就是最基本的宽架运用,吵架的时候是硬宽架,红人的时候是软宽架,在这种软硬结合的推大当中,光吸会有不同的走向。
这据课的重点来了,你要注意四种不同的情况,第一种吵架的时候是硬宽架,吵完架还是硬宽架,等着对方来服软来红理,这种方式的话,就是你的自尊心是得到了极大的满足,但是你要注意了,你的自尊心被满足了。
一位着你的对象,也就是他的自尊心是不被满足的,他愿意先放下面子来红理,他愿意先服软,是一种更爱更包容的表现,这个时候你应该即时给予正面反馈,否则残久以往,承这种习惯之后的话。
你的关系会面临即强的不稳定性,第二种吵架的时候是硬宽架,吵完架之后是软宽架,要么是合适的时候的话,去红对方,要么的话就是改做什么,就做什么,关系就是像没有吵过架,没有发生过一样,当然你软宽架。
是否能让对方真心感受到,你要注意自己心里的真实想法,软宽架的一个合格呈现是,有一个基本的前提,就是说你已经冷静下来了,已经想清楚了,真的能想红对方修复吵架后的状态,而不是心不干情不愿地。
为作而作为红而红,要留意自己心里的起心动念,其实就是生活,大家常说的态度,有的人的话,他道歉的话,会让我们感受到真诚的歉意,而有的人道歉,会让我们觉得他就是在敷衍,你越是这样子的话,他就会越生气。
你越这样子道歉,他就会越生气,第三点,吵架的时候是软宽架,吵完架之后是硬宽架,这种情况的话,其实也很常见,原至于很多人为了,回避冲突,西式邻人的一些生活习惯,事情过去之后的话,他们还是一样,我醒我数。
让你一次比,一次更加抓款,让你一次比,一次更加生气,而这个习惯的话,有个致命的地方,就是他们服软的出发点,是为了逃避问题,而不是解决问题,他们表现出来的软宽架,是一种虚假的软宽架,他们表现出来的软。
宽架是一种虚假的软宽架,这样子的关系不会进,反而还会越,那其实的话,两数要跟大家分享一下,其实你大可不必,那么害怕吵架,吵架的是一种沟通方式,只不过的话,是你的能量高一点,负一点罢了,但是一般来说。
他至少可以确保一件事情,就是对方此时,是真实的在表达自己的内心,或者宣泄一段时间,或者宣泄一些问题上的情感,垃圾示范,这种沟通和定时示范,是非常有必要的,只要你心里,对对方,詐异一点。
你做好一些调整就OK了,第四种的话,就是我们常见的,吵架的时候是网宽架,吵玩架还是网宽架,那么这种就是极其容易,进入贵田轨道的情感,经营方式,时间不用太长,它会感觉到你的窩难和弱弱。
你的吸引力会逐渐的流失,而且会很快,就算你有反映有车,逐渐的,它对你的荷爾蒙系统,会对你关闭,而这个过程的话,是不受女生大脑,邏輯思维系统所控制的,所以在生活当中的话,经常会出现一点,就是说。
它也很痛苦为,一方面知道,说男生对我很好,但是另一方面的话,就是我对你已经爱不起来了,这种关系的话,对双方来说都是坚熬,而且一般来说分手的话,也是没有必要往回,也往回不了的,总结一下。
这四种吵架方式当中,第一种的话,极其容易进入高冷,感情容易被冷死,第四种的话,就是大家常见的,极其容易进入贵田,感情会被探死,第三种本意上的是,虚假的沟通,沟通无效,问题被暂时的掩盖,但是会在暗出当中。
越长越大,最终感情会病死,所以只有第二种失职的,我们思考学习,运用在生活当中的,至于里面的软硬,宽佳的比例,梁叔会在奥典灵的,高阶,宽佳当中,继续为大家详细讲解。
我们下节课再见
|
PypiClean
|
/rocon_client_sdk_py-0.5.15-py3-none-any.whl/rocon_client_sdk_py/core_logic/trees/tasks_heart_beat.py
|
import py_trees
import asyncio
import pydash
from overrides import overrides
from .task import AsyncTask
from rocon_client_sdk_py.utils.util import *
from rocon_client_sdk_py.const import *
class TaskUpdateWorker(AsyncTask):
def __init__(self, name="updateWorker"):
super(TaskUpdateWorker, self).__init__(name)
self.t_last = None
def __del__(self):
pass
@overrides
def setup(self):
pass
@overrides
def initialise(self):
self.rocon_logger.debug("initialise >>", module_keyword=BT_KEYWORD)
if self.async_task_status == py_trees.common.Status.RUNNING:
print('here')
status = self.context.blackboard.get('status')
if status == 'busy':
self.async_task_status = py_trees.common.Status.SUCCESS
return
worker_content = self.context.blackboard.get_worker_content()
last_updated_at = pydash.get(worker_content, 'updated_at')
last_updated_ms = 0
if last_updated_at:
last_updated_ms = get_time_milliseconds(last_updated_at)
else:
# TODO 'updated_at' 기록되지 않은 경우, 다른 이슈 없는지 재확인 필요
pass
if last_updated_ms is not 0 and self.check_valid_interval(DEFAULT_UPDATE_WORKER_INTERVAL_MS, last_updated_ms) is True:
# interval 조건을 만족할때,
self.async_task_status = py_trees.common.Status.RUNNING
try:
coro_future = asyncio.run_coroutine_threadsafe(self._do_work(), self.context.event_loop)
result = coro_future.result()
except Exception as err:
self.rocon_logger.error('Exception occurred', exception=err)
self.async_task_status = py_trees.common.Status.FAILURE
err.with_traceback()
else:
# interval 이하의 경우 SUCCESS 처리한다.
self.async_task_status = py_trees.common.Status.SUCCESS
async def _do_work(self):
'''
blackboard에 기록된 worker_content를 주기적으로 concert server에 업데이트하는 역할
sdk user에게 노출 불필요하며, sdk 유저는 worker_content의 포맷에 맞는 데이터를 기록할 의무를 가짐
:return:
'''
if self.context.blackboard.get_worker_content_update() is None:
self.context.blackboard.set_worker_content({'updated_at': current_datetime_utc_iso_format()})
try:
# TODO sync_worker를 주기적으로 하는데, 다른 루틴에서도 필요에 따라 호출하고 있다. 큐로 관리해 최신 데이터 기준으로 업데이트 구조 변경 필요
result = await self.context.blackboard.sync_worker()
except Exception as err:
self.rocon_logger.error('Exception occurred', exception=err)
self.async_task_status = py_trees.common.Status.FAILURE
err.with_traceback()
return
self.async_task_status = py_trees.common.Status.SUCCESS
@overrides
def update(self):
self.rocon_logger.debug('update >> {}'.format(self.async_task_status), module_keyword=BT_KEYWORD)
return self.async_task_status
@overrides
def terminate(self, new_status):
self.rocon_logger.debug("terminate >>", module_keyword=BT_KEYWORD)
class TaskUpdateTask(AsyncTask):
def __init__(self, name="updateTask"):
super(TaskUpdateTask, self).__init__(name)
@overrides
def setup(self):
pass
@overrides
def initialise(self):
self.rocon_logger.debug("initialise >>", module_keyword=BT_KEYWORD)
if self.async_task_status == py_trees.common.Status.RUNNING:
return
if self.context.blackboard.get('status') is 'busy':
self.async_task_status = py_trees.common.Status.SUCCESS
return
last_updated_ms = self.context.blackboard.get('task_updated') or 0
if self.check_valid_interval(DEFAULT_UPDATE_TASK_INTERVAL_MS, last_updated_ms) is True:
# interval 조건을 만족할때,
self.async_task_status = py_trees.common.Status.RUNNING
try:
coro_future = asyncio.run_coroutine_threadsafe(self._do_work(), self.context.event_loop)
result = coro_future.result()
except Exception as err:
self.rocon_logger.error('Exception occurred', exception=err)
err.with_traceback()
else:
# interval 이하의 경우 SUCCESS 처리한다.
self.async_task_status = py_trees.common.Status.SUCCESS
async def _do_work(self):
'''
정해진 시간 간격으로 concert server로 부터 task 정보를 받아서 blackboard에 기록한다.
sdk user에게 노출 불필요함.
:return:
'''
task_body = self.context.blackboard.get('task')
worker = self.context.blackboard.get('worker')
id = worker['id']
try:
task_body['worker'] = id
result = await self.context.api_task.upsert(task_body)
'''
2020.05.14
task 업데이트 정책은 robot내의 생성 task 데이터만 업로드한다.
기존 upsert 후 서버로 부터 받은 result에는 개별 robot 외의 부가 데이터가 포함되어 있어 주기적 업데이트시 부가 데이터의 재업로드 방지 위함.
self.context.blackboard.set('task', result)
로봇의 task 변경이 발생할 때 task_body를 업데이트해야한다.
'''
except Exception as err:
self.rocon_logger.error('Exception occurred', exception=err)
self.async_task_status = py_trees.common.Status.FAILURE
err.with_traceback()
return
curnt_time_ms = get_time_milliseconds(current_datetime_utc_iso_format())
self.context.blackboard.set('task_updated', curnt_time_ms)
self.async_task_status = py_trees.common.Status.SUCCESS
@overrides
def update(self):
self.rocon_logger.debug('update >> {}'.format(self.async_task_status), module_keyword=BT_KEYWORD)
return self.async_task_status
@overrides
def terminate(self, new_status):
self.rocon_logger.debug("terminate >>", module_keyword=BT_KEYWORD)
|
PypiClean
|
/Py_FS-0.2.1-py3-none-any.whl/Py_FS/wrapper/nature_inspired/MA.py
|
import numpy as np
import time
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import datasets
from Py_FS.wrapper.nature_inspired._utilities import Solution, Data, initialize, sort_agents, display, compute_fitness, Conv_plot
from Py_FS.wrapper.nature_inspired._transfer_functions import get_trans_function
def MA(num_agents, max_iter, train_data, train_label, obj_function=compute_fitness, trans_function_shape='s', prob_mut=0.2, save_conv_graph=False):
# Mayfly Algorithm
############################### Parameters ####################################
# #
# num_agents: number of mayflies #
# max_iter: maximum number of generations #
# train_data: training samples of data #
# train_label: class labels for the training samples #
# obj_function: the function to maximize while doing feature selection #
# prob_mut: probability of mutation #
# trans_function_shape: shape of the transfer function used #
# save_conv_graph: boolean value for saving convergence graph #
# #
###############################################################################
short_name = 'MA'
agent_name = 'Mayfly'
train_data, train_label = np.array(train_data), np.array(train_label)
num_features = train_data.shape[1]
trans_function = get_trans_function(trans_function_shape)
# setting up the objectives
weight_acc = None
if(obj_function==compute_fitness):
weight_acc = float(input('Weight for the classification accuracy [0-1]: '))
obj = (obj_function, weight_acc)
compute_accuracy = (compute_fitness, 1) # compute_accuracy is just compute_fitness with accuracy weight as 1
# control parameters
a1 = 1
a2 = 1.5
d = 0.1
fl = 0.1
g = 0.8
beta = 2
delta = 0.9
# initialize position and velocities of male and female mayflies' and Leader (the agent with the max fitness)
male_pos = initialize(num_agents, num_features)
female_pos = initialize(num_agents, num_features)
male_vel = np.random.uniform(low = -1, high = 1, size = (num_agents, num_features))
female_vel = np.random.uniform(low = -1, high = 1, size = (num_agents, num_features))
male_fitness = np.zeros((num_agents))
male_accuracy = np.zeros(num_agents)
female_fitness = np.zeros((num_agents))
Leader_agent = np.zeros((num_features))
Leader_fitness = float("-inf")
Leader_accuracy = float("-inf")
male_personal_best = np.zeros((num_agents, num_features))
male_offspring = np.zeros((num_agents, num_features))
female_offspring = np.zeros((num_agents, num_features))
vmax_male = np.zeros((num_features))
vmax_female = np.zeros((num_features))
# initialize convergence curves
convergence_curve = {}
convergence_curve['fitness'] = np.zeros(max_iter)
# initialize data class
data = Data()
val_size = float(input('Enter the percentage of data wanted for valdiation [0, 100]: '))/100
data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(train_data, train_label, stratify=train_label, test_size=val_size)
# create a solution object
solution = Solution()
solution.num_agents = num_agents
solution.max_iter = max_iter
solution.num_features = num_features
solution.obj_function = obj_function
# rank initial population
male_pos, male_fitness = sort_agents(male_pos, obj, data)
female_pos, female_fitness = sort_agents(female_pos, obj, data)
# start timer
start_time = time.time()
# main loop
for iter_no in range(max_iter):
print('\n================================================================================')
print(' Iteration - {}'.format(iter_no+1))
print('================================================================================\n')
#updating velocity limits
vmax_male, vmax_female = update_max_velocity(male_pos, female_pos)
for agent in range(num_agents):
#updating Leader fitness and personal best fitnesses
if male_fitness[agent] > Leader_fitness:
Leader_fitness = male_fitness[agent]
Leader_agent = male_pos[agent]
if male_fitness[agent] > obj_function(male_personal_best[agent], data.train_X, data.val_X, data.train_Y, data.val_Y):
male_personal_best[agent] = male_pos[agent]
#update velocities of male and female mayflies
male_vel[agent], female_vel[agent] = update_velocity(male_pos[agent], female_pos[agent], male_vel[agent], female_vel[agent], Leader_agent, male_personal_best[agent], a1, a2, d, fl, g, beta, agent, data, obj_function)
#check boundary condition of velocities of male and female mayflies
male_vel[agent], female_vel[agent] = check_velocity_limits(male_vel[agent], female_vel[agent], vmax_male, vmax_female)
#applying transfer functions to update positions of male and female mayflies
#the updation is done based on their respective velocity values
for j in range(num_features):
trans_value = trans_function(male_vel[agent][j])
if trans_value > np.random.normal(0,1):
male_pos[agent][j]=1
else:
male_pos[agent][j]=0
trans_value = trans_function(female_vel[agent][j])
if trans_value > np.random.random():
female_pos[agent][j]=1
else:
female_pos[agent][j]=0
#sorting
male_pos, male_fitness = sort_agents(male_pos, obj, data)
female_pos, female_fitness = sort_agents(female_pos, obj, data)
for agent in range(num_agents):
#generation of offsprings by crossover and mutation between male and female parent mayflies
male_offspring[agent], female_offspring[agent] = cross_mut(male_pos[agent], female_pos[agent],prob_mut)
#comparing parents and offsprings and replacing parents wherever necessary
male_pos = compare_and_replace(male_pos, male_offspring, male_fitness, data, obj)
female_pos = compare_and_replace(female_pos, female_offspring, female_fitness, data, obj)
#updating fitness values
male_pos, male_fitness = sort_agents(male_pos, obj, data)
female_pos, female_fitness = sort_agents(female_pos, obj, data)
#updating values of nuptial dance
d = d * delta
fl = fl * delta
#update final information
display(male_pos, male_fitness, agent_name)
if(male_fitness[0] > Leader_fitness):
Leader_agent = male_pos[0].copy()
Leader_fitness = male_fitness[0].copy()
convergence_curve['fitness'][iter_no] = np.mean(male_fitness)
# compute final accuracy
Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
male_pos, male_accuracy = sort_agents(male_pos, compute_accuracy, data)
print('\n================================================================================')
print(' Final Result ')
print('================================================================================\n')
print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
print('\n================================================================================\n')
# stop timer
end_time = time.time()
exec_time = end_time - start_time
# plot convergence graph
fig, axes = Conv_plot(convergence_curve)
if(save_conv_graph):
plt.savefig('convergence_graph_'+ short_name + '.jpg')
plt.show()
# update attributes of solution
solution.best_agent = Leader_agent
solution.best_fitness = Leader_fitness
solution.best_accuracy = Leader_accuracy
solution.convergence_curve = convergence_curve
solution.final_population = male_pos
solution.final_fitness = male_fitness
solution.final_accuracy = male_accuracy
solution.execution_time = exec_time
return solution
def update_max_velocity(male, female):
size, length = male.shape
agent1 = []
agent2 = []
r = np.random.normal(0,1 , size=(length))
for j in range(length):
r[j] *= 2
agent1.append((male[0][j]-male[size-1][j])*r[j])
agent2.append((female[0][j]-female[size-1][j])*r[j])
return (agent1, agent2)
def update_velocity(m_pos, f_pos, m_vel, f_vel, Leader_agent, pbest, a1, a2, d, fl, g, b, i, data, obj_function):
tot_features = m_pos.shape[0]
agent1 = np.zeros((tot_features))
agent2 = np.zeros((tot_features))
tot_features = len(m_pos)
if i==0:
for j in range(tot_features):
agent1[j] = m_vel[j]+d*np.random.uniform(-1,1)
else:
sum = 0
for j in range(tot_features):
sum = sum+(m_pos[j]-Leader_agent[j])*(m_pos[j]-Leader_agent[j])
rg = np.sqrt(sum)
sum = 0
for j in range(tot_features):
sum = sum+(m_pos[j]-pbest[j])*(m_pos[j]-pbest[j])
rp = np.sqrt(sum)
for j in range(tot_features):
agent1[j] = g*m_vel[j]+a1*np.exp(-b*rp*rp)*(pbest[j]-m_pos[j])+a2*np.exp(-b*rg*rg)*(Leader_agent[j]-m_pos[j])
if obj_function(m_pos, data.train_X, data.val_X, data.train_Y, data.val_Y) >= obj_function(f_pos, data.train_X, data.val_X, data.train_Y, data.val_Y):
sum = 0
for j in range(tot_features):
sum = sum+(m_pos[j]-f_pos[j])*(m_pos[j]-f_pos[j])
rmf = np.sqrt(sum)
agent2[j] = g*f_vel[j]+a2*np.exp(-b*rmf*rmf)*(m_pos[j]-f_pos[j])
else:
for j in range(tot_features):
agent2[j] = g*f_vel[j]+fl*np.random.uniform(-1,1)
return (agent1, agent2)
def check_velocity_limits(m_vel, f_vel, vmax_m, vmax_f):
tot_features = len(m_vel)
for j in range(tot_features):
m_vel[j] = np.minimum(m_vel[j], vmax_m[j])
m_vel[j] = np.maximum(m_vel[j], -vmax_m[j])
f_vel[j] = np.minimum(f_vel[j], vmax_f[j])
f_vel[j] = np.maximum(f_vel[j], -vmax_f[j])
return (m_vel, f_vel)
def cross_mut(m_pos, f_pos,prob_mut):
tot_features = len(m_pos)
offspring1 = np.zeros((tot_features))
offspring2 = np.zeros((tot_features))
# partition defines the midpoint of the crossover
partition = np.random.randint(tot_features//4, np.floor((3*tot_features//4)+1))
# starting crossover
for i in range(partition):
offspring1[i] = m_pos[i]
offspring2[i] = f_pos[i]
for i in range(partition, tot_features):
offspring1[i] = f_pos[i]
offspring2[i] = m_pos[i]
# crossover ended
# starting mutation
if np.random.random() <= prob_mut:
percent = 0.2
numChange = int(tot_features*percent)
pos = np.random.randint(0,tot_features-1,numChange)
for j in pos:
offspring1[j] = 1-offspring1[j]
pos=np.random.randint(0,tot_features-1,numChange)
for j in pos:
offspring2[j] = 1-offspring2[j]
# mutation ended
if np.random.random() >= 0.5:
return (offspring1, offspring2)
else:
return (offspring2, offspring1)
def compare_and_replace(pos, off, fit, data, obj):
agents, features = pos.shape
newfit = np.zeros((agents))
temp_pos = np.zeros((agents, features))
pos, fit = sort_agents(pos, obj, data)
# finding fitnesses of offsprings
off, newfit = sort_agents(off, obj, data)
i=0
j=0
cnt=0
# merging offsprings and parents and finding the next generation of mayflies
while(cnt < agents):
if fit[i] > newfit[j]:
temp_pos[cnt] = pos[i].copy()
i+=1
else:
temp_pos[cnt] = off[i].copy()
j+=1
cnt+=1
return temp_pos
if __name__ == '__main__':
data = datasets.load_digits()
MA(20, 30, data.data, data.target, save_conv_graph=True)
|
PypiClean
|
/airbyte-cdk-0.51.10.tar.gz/airbyte-cdk-0.51.10/airbyte_cdk/sources/utils/schema_helpers.py
|
import importlib
import json
import os
import pkgutil
from typing import Any, ClassVar, Dict, List, Mapping, MutableMapping, Optional, Tuple
import jsonref
from airbyte_cdk.models import ConnectorSpecification, FailureType
from airbyte_cdk.utils.traced_exception import AirbyteTracedException
from jsonschema import RefResolver, validate
from jsonschema.exceptions import ValidationError
from pydantic import BaseModel, Field
class JsonFileLoader:
"""
Custom json file loader to resolve references to resources located in "shared" directory.
We need this for compatability with existing schemas cause all of them have references
pointing to shared_schema.json file instead of shared/shared_schema.json
"""
def __init__(self, uri_base: str, shared: str):
self.shared = shared
self.uri_base = uri_base
def __call__(self, uri: str) -> Dict[str, Any]:
uri = uri.replace(self.uri_base, f"{self.uri_base}/{self.shared}/")
with open(uri) as f:
data = json.load(f)
if isinstance(data, dict):
return data
else:
raise ValueError(f"Expected to read a dictionary from {uri}. Got: {data}")
def resolve_ref_links(obj: Any) -> Any:
"""
Scan resolved schema and convert jsonref.JsonRef object to JSON serializable dict.
:param obj - jsonschema object with ref field resolved.
:return JSON serializable object with references without external dependencies.
"""
if isinstance(obj, jsonref.JsonRef):
obj = resolve_ref_links(obj.__subject__)
# Omit existing definitions for external resource since
# we dont need it anymore.
if isinstance(obj, dict):
obj.pop("definitions", None)
return obj
else:
raise ValueError(f"Expected obj to be a dict. Got {obj}")
elif isinstance(obj, dict):
return {k: resolve_ref_links(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [resolve_ref_links(item) for item in obj]
else:
return obj
def _expand_refs(schema: Any, ref_resolver: Optional[RefResolver] = None) -> None:
"""Internal function to iterate over schema and replace all occurrences of $ref with their definitions. Recursive.
:param schema: schema that will be patched
:param ref_resolver: resolver to get definition from $ref, if None pass it will be instantiated
"""
ref_resolver = ref_resolver or RefResolver.from_schema(schema)
if isinstance(schema, MutableMapping):
if "$ref" in schema:
ref_url = schema.pop("$ref")
_, definition = ref_resolver.resolve(ref_url)
_expand_refs(definition, ref_resolver=ref_resolver) # expand refs in definitions as well
schema.update(definition)
else:
for key, value in schema.items():
_expand_refs(value, ref_resolver=ref_resolver)
elif isinstance(schema, List):
for value in schema:
_expand_refs(value, ref_resolver=ref_resolver)
def expand_refs(schema: Any) -> None:
"""Iterate over schema and replace all occurrences of $ref with their definitions.
:param schema: schema that will be patched
"""
_expand_refs(schema)
schema.pop("definitions", None) # remove definitions created by $ref
def rename_key(schema: Any, old_key: str, new_key: str) -> None:
"""Iterate over nested dictionary and replace one key with another. Used to replace anyOf with oneOf. Recursive."
:param schema: schema that will be patched
:param old_key: name of the key to replace
:param new_key: new name of the key
"""
if not isinstance(schema, MutableMapping):
return
for key, value in schema.items():
rename_key(value, old_key, new_key)
if old_key in schema:
schema[new_key] = schema.pop(old_key)
class ResourceSchemaLoader:
"""JSONSchema loader from package resources"""
def __init__(self, package_name: str):
self.package_name = package_name
def get_schema(self, name: str) -> dict[str, Any]:
"""
This method retrieves a JSON schema from the schemas/ folder.
The expected file structure is to have all top-level schemas (corresponding to streams) in the "schemas/" folder, with any shared $refs
living inside the "schemas/shared/" folder. For example:
schemas/shared/<shared_definition>.json
schemas/<name>.json # contains a $ref to shared_definition
schemas/<name2>.json # contains a $ref to shared_definition
"""
schema_filename = f"schemas/{name}.json"
raw_file = pkgutil.get_data(self.package_name, schema_filename)
if not raw_file:
raise IOError(f"Cannot find file {schema_filename}")
try:
raw_schema = json.loads(raw_file)
except ValueError as err:
raise RuntimeError(f"Invalid JSON file format for file {schema_filename}") from err
return self._resolve_schema_references(raw_schema)
def _resolve_schema_references(self, raw_schema: dict[str, Any]) -> dict[str, Any]:
"""
Resolve links to external references and move it to local "definitions" map.
:param raw_schema jsonschema to lookup for external links.
:return JSON serializable object with references without external dependencies.
"""
package = importlib.import_module(self.package_name)
if package.__file__:
base = os.path.dirname(package.__file__) + "/"
else:
raise ValueError(f"Package {package} does not have a valid __file__ field")
resolved = jsonref.JsonRef.replace_refs(raw_schema, loader=JsonFileLoader(base, "schemas/shared"), base_uri=base)
resolved = resolve_ref_links(resolved)
if isinstance(resolved, dict):
return resolved
else:
raise ValueError(f"Expected resolved to be a dict. Got {resolved}")
def check_config_against_spec_or_exit(config: Mapping[str, Any], spec: ConnectorSpecification) -> None:
"""
Check config object against spec. In case of spec is invalid, throws
an exception with validation error description.
:param config - config loaded from file specified over command line
:param spec - spec object generated by connector
"""
spec_schema = spec.connectionSpecification
try:
validate(instance=config, schema=spec_schema)
except ValidationError as validation_error:
raise AirbyteTracedException(
message="Config validation error: " + validation_error.message,
internal_message=validation_error.message,
failure_type=FailureType.config_error,
) from None # required to prevent logging config secrets from the ValidationError's stacktrace
class InternalConfig(BaseModel):
KEYWORDS: ClassVar[set[str]] = {"_limit", "_page_size"}
limit: int = Field(None, alias="_limit")
page_size: int = Field(None, alias="_page_size")
def dict(self, *args: Any, **kwargs: Any) -> dict[str, Any]:
kwargs["by_alias"] = True
kwargs["exclude_unset"] = True
return super().dict(*args, **kwargs)
def is_limit_reached(self, records_counter: int) -> bool:
"""
Check if record count reached limit set by internal config.
:param records_counter - number of records already red
:return True if limit reached, False otherwise
"""
if self.limit:
if records_counter >= self.limit:
return True
return False
def split_config(config: Mapping[str, Any]) -> Tuple[dict[str, Any], InternalConfig]:
"""
Break config map object into 2 instances: first is a dict with user defined
configuration and second is internal config that contains private keys for
acceptance test configuration.
:param
config - Dict object that has been loaded from config file.
:return tuple of user defined config dict with filtered out internal
parameters and connector acceptance test internal config object.
"""
main_config = {}
internal_config = {}
for k, v in config.items():
if k in InternalConfig.KEYWORDS:
internal_config[k] = v
else:
main_config[k] = v
return main_config, InternalConfig.parse_obj(internal_config)
|
PypiClean
|
/azure-mgmt-resource-23.1.0b1.zip/azure-mgmt-resource-23.1.0b1/azure/mgmt/resource/resources/v2019_07_01/aio/operations/_operations.py
|
from io import IOBase
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import (
build_deployment_operations_get_at_management_group_scope_request,
build_deployment_operations_get_at_scope_request,
build_deployment_operations_get_at_subscription_scope_request,
build_deployment_operations_get_at_tenant_scope_request,
build_deployment_operations_get_request,
build_deployment_operations_list_at_management_group_scope_request,
build_deployment_operations_list_at_scope_request,
build_deployment_operations_list_at_subscription_scope_request,
build_deployment_operations_list_at_tenant_scope_request,
build_deployment_operations_list_request,
build_deployments_calculate_template_hash_request,
build_deployments_cancel_at_management_group_scope_request,
build_deployments_cancel_at_scope_request,
build_deployments_cancel_at_subscription_scope_request,
build_deployments_cancel_at_tenant_scope_request,
build_deployments_cancel_request,
build_deployments_check_existence_at_management_group_scope_request,
build_deployments_check_existence_at_scope_request,
build_deployments_check_existence_at_subscription_scope_request,
build_deployments_check_existence_at_tenant_scope_request,
build_deployments_check_existence_request,
build_deployments_create_or_update_at_management_group_scope_request,
build_deployments_create_or_update_at_scope_request,
build_deployments_create_or_update_at_subscription_scope_request,
build_deployments_create_or_update_at_tenant_scope_request,
build_deployments_create_or_update_request,
build_deployments_delete_at_management_group_scope_request,
build_deployments_delete_at_scope_request,
build_deployments_delete_at_subscription_scope_request,
build_deployments_delete_at_tenant_scope_request,
build_deployments_delete_request,
build_deployments_export_template_at_management_group_scope_request,
build_deployments_export_template_at_scope_request,
build_deployments_export_template_at_subscription_scope_request,
build_deployments_export_template_at_tenant_scope_request,
build_deployments_export_template_request,
build_deployments_get_at_management_group_scope_request,
build_deployments_get_at_scope_request,
build_deployments_get_at_subscription_scope_request,
build_deployments_get_at_tenant_scope_request,
build_deployments_get_request,
build_deployments_list_at_management_group_scope_request,
build_deployments_list_at_scope_request,
build_deployments_list_at_subscription_scope_request,
build_deployments_list_at_tenant_scope_request,
build_deployments_list_by_resource_group_request,
build_deployments_validate_at_management_group_scope_request,
build_deployments_validate_at_scope_request,
build_deployments_validate_at_subscription_scope_request,
build_deployments_validate_at_tenant_scope_request,
build_deployments_validate_request,
build_deployments_what_if_at_subscription_scope_request,
build_deployments_what_if_request,
build_operations_list_request,
build_providers_get_at_tenant_scope_request,
build_providers_get_request,
build_providers_list_at_tenant_scope_request,
build_providers_list_request,
build_providers_register_request,
build_providers_unregister_request,
build_resource_groups_check_existence_request,
build_resource_groups_create_or_update_request,
build_resource_groups_delete_request,
build_resource_groups_export_template_request,
build_resource_groups_get_request,
build_resource_groups_list_request,
build_resource_groups_update_request,
build_resources_check_existence_by_id_request,
build_resources_check_existence_request,
build_resources_create_or_update_by_id_request,
build_resources_create_or_update_request,
build_resources_delete_by_id_request,
build_resources_delete_request,
build_resources_get_by_id_request,
build_resources_get_request,
build_resources_list_by_resource_group_request,
build_resources_list_request,
build_resources_move_resources_request,
build_resources_update_by_id_request,
build_resources_update_request,
build_resources_validate_move_resources_request,
build_tags_create_or_update_request,
build_tags_create_or_update_value_request,
build_tags_delete_request,
build_tags_delete_value_request,
build_tags_list_request,
)
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.resources.v2019_07_01.aio.ResourceManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""Lists all of the available Microsoft.Resources REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_operations_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Resources/operations"}
class DeploymentsOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.resources.v2019_07_01.aio.ResourceManagementClient`'s
:attr:`deployments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
async def _delete_at_scope_initial( # pylint: disable=inconsistent-return-statements
self, scope: str, deployment_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_delete_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_scope_initial.metadata = {"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_delete_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_at_scope_initial( # type: ignore
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_at_scope.metadata = {"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def check_existence_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> bool:
"""Checks whether the deployment exists.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_check_existence_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_scope.metadata = {"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
async def _create_or_update_at_scope_initial(
self, scope: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentExtended:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_create_or_update_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_at_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_at_scope_initial.metadata = {
"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@overload
async def begin_create_or_update_at_scope(
self,
scope: str,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at a given scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update_at_scope(
self, scope: str, deployment_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at a given scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update_at_scope(
self, scope: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at a given scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Is either a Deployment type
or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_at_scope_initial(
scope=scope,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update_at_scope.metadata = {
"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def get_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> _models.DeploymentExtended:
"""Gets a deployment.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
request = build_deployments_get_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_scope.metadata = {"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def cancel_at_scope( # pylint: disable=inconsistent-return-statements
self, scope: str, deployment_name: str, **kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_cancel_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_scope.metadata = {"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"}
@overload
async def validate_at_scope(
self,
scope: str,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def validate_at_scope(
self, scope: str, deployment_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def validate_at_scope(
self, scope: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Is either a Deployment type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentValidateResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_validate_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.validate_at_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
validate_at_scope.metadata = {"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"}
@distributed_trace_async
async def export_template_at_scope(
self, scope: str, deployment_name: str, **kwargs: Any
) -> _models.DeploymentExportResult:
"""Exports the template used for specified deployment.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExportResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExportResult] = kwargs.pop("cls", None)
request = build_deployments_export_template_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExportResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_scope.metadata = {
"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"
}
@distributed_trace
def list_at_scope(
self, scope: str, filter: Optional[str] = None, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentExtended"]:
"""Get all the deployments at the given scope.
:param scope: The scope of a deployment. Required.
:type scope: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentExtended or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_scope_request(
scope=scope,
filter=filter,
top=top,
api_version=api_version,
template_url=self.list_at_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_scope.metadata = {"url": "/{scope}/providers/Microsoft.Resources/deployments/"}
async def _delete_at_tenant_scope_initial( # pylint: disable=inconsistent-return-statements
self, deployment_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_delete_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_tenant_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_tenant_scope_initial.metadata = {"url": "/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_delete_at_tenant_scope(self, deployment_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_at_tenant_scope_initial( # type: ignore
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_at_tenant_scope.metadata = {"url": "/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def check_existence_at_tenant_scope(self, deployment_name: str, **kwargs: Any) -> bool:
"""Checks whether the deployment exists.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_check_existence_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_tenant_scope.metadata = {"url": "/providers/Microsoft.Resources/deployments/{deploymentName}"}
async def _create_or_update_at_tenant_scope_initial(
self, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentExtended:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_create_or_update_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_at_tenant_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_at_tenant_scope_initial.metadata = {
"url": "/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@overload
async def begin_create_or_update_at_tenant_scope(
self,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at tenant scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update_at_tenant_scope(
self, deployment_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at tenant scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update_at_tenant_scope(
self, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at tenant scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Is either a Deployment type
or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_at_tenant_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update_at_tenant_scope.metadata = {
"url": "/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def get_at_tenant_scope(self, deployment_name: str, **kwargs: Any) -> _models.DeploymentExtended:
"""Gets a deployment.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
request = build_deployments_get_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {"url": "/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def cancel_at_tenant_scope( # pylint: disable=inconsistent-return-statements
self, deployment_name: str, **kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_cancel_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_tenant_scope.metadata = {"url": "/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"}
@overload
async def validate_at_tenant_scope(
self,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def validate_at_tenant_scope(
self, deployment_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def validate_at_tenant_scope(
self, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Is either a Deployment type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentValidateResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_validate_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.validate_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
validate_at_tenant_scope.metadata = {"url": "/providers/Microsoft.Resources/deployments/{deploymentName}/validate"}
@distributed_trace_async
async def export_template_at_tenant_scope(
self, deployment_name: str, **kwargs: Any
) -> _models.DeploymentExportResult:
"""Exports the template used for specified deployment.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExportResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExportResult] = kwargs.pop("cls", None)
request = build_deployments_export_template_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExportResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_tenant_scope.metadata = {
"url": "/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"
}
@distributed_trace
def list_at_tenant_scope(
self, filter: Optional[str] = None, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentExtended"]:
"""Get all the deployments at the tenant scope.
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentExtended or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_tenant_scope_request(
filter=filter,
top=top,
api_version=api_version,
template_url=self.list_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_tenant_scope.metadata = {"url": "/providers/Microsoft.Resources/deployments/"}
async def _delete_at_management_group_scope_initial( # pylint: disable=inconsistent-return-statements
self, group_id: str, deployment_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_delete_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_management_group_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_management_group_scope_initial.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def begin_delete_at_management_group_scope(
self, group_id: str, deployment_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_at_management_group_scope_initial( # type: ignore
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def check_existence_at_management_group_scope(
self, group_id: str, deployment_name: str, **kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_check_existence_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_management_group_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
async def _create_or_update_at_management_group_scope_initial(
self, group_id: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentExtended:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_create_or_update_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_at_management_group_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_at_management_group_scope_initial.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@overload
async def begin_create_or_update_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at management group scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at management group scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update_at_management_group_scope(
self, group_id: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at management group scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Is either a Deployment type
or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_at_management_group_scope_initial(
group_id=group_id,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def get_at_management_group_scope(
self, group_id: str, deployment_name: str, **kwargs: Any
) -> _models.DeploymentExtended:
"""Gets a deployment.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
request = build_deployments_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_management_group_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def cancel_at_management_group_scope( # pylint: disable=inconsistent-return-statements
self, group_id: str, deployment_name: str, **kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_cancel_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_management_group_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"
}
@overload
async def validate_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def validate_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def validate_at_management_group_scope(
self, group_id: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Is either a Deployment type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentValidateResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_validate_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.validate_at_management_group_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
validate_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"
}
@distributed_trace_async
async def export_template_at_management_group_scope(
self, group_id: str, deployment_name: str, **kwargs: Any
) -> _models.DeploymentExportResult:
"""Exports the template used for specified deployment.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExportResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExportResult] = kwargs.pop("cls", None)
request = build_deployments_export_template_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_management_group_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExportResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"
}
@distributed_trace
def list_at_management_group_scope(
self, group_id: str, filter: Optional[str] = None, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentExtended"]:
"""Get all the deployments for a management group.
:param group_id: The management group ID. Required.
:type group_id: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentExtended or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_management_group_scope_request(
group_id=group_id,
filter=filter,
top=top,
api_version=api_version,
template_url=self.list_at_management_group_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/"
}
async def _delete_at_subscription_scope_initial( # pylint: disable=inconsistent-return-statements
self, deployment_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_delete_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_at_subscription_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_subscription_scope_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def begin_delete_at_subscription_scope(self, deployment_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_at_subscription_scope_initial( # type: ignore
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def check_existence_at_subscription_scope(self, deployment_name: str, **kwargs: Any) -> bool:
"""Checks whether the deployment exists.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_check_existence_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence_at_subscription_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
async def _create_or_update_at_subscription_scope_initial(
self, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentExtended:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_create_or_update_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_at_subscription_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_at_subscription_scope_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@overload
async def begin_create_or_update_at_subscription_scope(
self,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at subscription scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update_at_subscription_scope(
self, deployment_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at subscription scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update_at_subscription_scope(
self, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources at subscription scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Is either a Deployment type
or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_at_subscription_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def get_at_subscription_scope(self, deployment_name: str, **kwargs: Any) -> _models.DeploymentExtended:
"""Gets a deployment.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
request = build_deployments_get_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_at_subscription_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def cancel_at_subscription_scope( # pylint: disable=inconsistent-return-statements
self, deployment_name: str, **kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_cancel_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel_at_subscription_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"
}
@overload
async def validate_at_subscription_scope(
self,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def validate_at_subscription_scope(
self, deployment_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def validate_at_subscription_scope(
self, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Is either a Deployment type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentValidateResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_validate_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.validate_at_subscription_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
validate_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"
}
async def _what_if_at_subscription_scope_initial(
self, deployment_name: str, parameters: Union[_models.DeploymentWhatIf, IO], **kwargs: Any
) -> Optional[_models.WhatIfOperationResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.WhatIfOperationResult]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "DeploymentWhatIf")
request = build_deployments_what_if_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._what_if_at_subscription_scope_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("WhatIfOperationResult", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("str", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_what_if_at_subscription_scope_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"
}
@overload
async def begin_what_if_at_subscription_scope(
self,
deployment_name: str,
parameters: _models.DeploymentWhatIf,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.WhatIfOperationResult]:
"""Returns changes that will be made by the deployment if executed at the scope of the
subscription.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to What If. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentWhatIf
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.WhatIfOperationResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_what_if_at_subscription_scope(
self, deployment_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[_models.WhatIfOperationResult]:
"""Returns changes that will be made by the deployment if executed at the scope of the
subscription.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to What If. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.WhatIfOperationResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_what_if_at_subscription_scope(
self, deployment_name: str, parameters: Union[_models.DeploymentWhatIf, IO], **kwargs: Any
) -> AsyncLROPoller[_models.WhatIfOperationResult]:
"""Returns changes that will be made by the deployment if executed at the scope of the
subscription.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to What If. Is either a DeploymentWhatIf type or a IO type.
Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentWhatIf or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.WhatIfOperationResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.WhatIfOperationResult] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._what_if_at_subscription_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("WhatIfOperationResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_what_if_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"
}
@distributed_trace_async
async def export_template_at_subscription_scope(
self, deployment_name: str, **kwargs: Any
) -> _models.DeploymentExportResult:
"""Exports the template used for specified deployment.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExportResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExportResult] = kwargs.pop("cls", None)
request = build_deployments_export_template_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.export_template_at_subscription_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExportResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"
}
@distributed_trace
def list_at_subscription_scope(
self, filter: Optional[str] = None, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentExtended"]:
"""Get all the deployments for a subscription.
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentExtended or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_subscription_scope_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
api_version=api_version,
template_url=self.list_at_subscription_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, deployment_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_delete_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def begin_delete(self, resource_group_name: str, deployment_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. Deleting a template deployment does
not affect the state of the resource group. This is an asynchronous operation that returns a
status of 202 until the template deployment is successfully deleted. The Location response
header contains the URI that is used to obtain the status of the process. While the process is
running, a call to the URI in the Location header returns a status of 202. When the process
finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param resource_group_name: The name of the resource group with the deployment to delete. The
name is case insensitive. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def check_existence(self, resource_group_name: str, deployment_name: str, **kwargs: Any) -> bool:
"""Checks whether the deployment exists.
:param resource_group_name: The name of the resource group with the deployment to check. The
name is case insensitive. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_check_existence_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
async def _create_or_update_initial(
self, resource_group_name: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentExtended:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_create_or_update_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or link to JSON files.
:param resource_group_name: The name of the resource group to deploy the resources to. The name
is case insensitive. The resource group must already exist. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
deployment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or link to JSON files.
:param resource_group_name: The name of the resource group to deploy the resources to. The name
is case insensitive. The resource group must already exist. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self, resource_group_name: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> AsyncLROPoller[_models.DeploymentExtended]:
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or link to JSON files.
:param resource_group_name: The name of the resource group to deploy the resources to. The name
is case insensitive. The resource group must already exist. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation. Is either a Deployment type
or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def get(self, resource_group_name: str, deployment_name: str, **kwargs: Any) -> _models.DeploymentExtended:
"""Gets a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExtended] = kwargs.pop("cls", None)
request = build_deployments_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExtended", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"
}
@distributed_trace_async
async def cancel( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, deployment_name: str, **kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resource group
partially deployed.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_deployments_cancel_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"
}
@overload
async def validate(
self,
resource_group_name: str,
deployment_name: str,
parameters: _models.Deployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def validate(
self,
resource_group_name: str,
deployment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def validate(
self, resource_group_name: str, deployment_name: str, parameters: Union[_models.Deployment, IO], **kwargs: Any
) -> _models.DeploymentValidateResult:
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Is either a Deployment type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.Deployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentValidateResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentValidateResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Deployment")
request = build_deployments_validate_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.validate.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize("DeploymentValidateResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
validate.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"
}
async def _what_if_initial(
self,
resource_group_name: str,
deployment_name: str,
parameters: Union[_models.DeploymentWhatIf, IO],
**kwargs: Any
) -> Optional[_models.WhatIfOperationResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.WhatIfOperationResult]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "DeploymentWhatIf")
request = build_deployments_what_if_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._what_if_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("WhatIfOperationResult", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("str", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_what_if_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"
}
@overload
async def begin_what_if(
self,
resource_group_name: str,
deployment_name: str,
parameters: _models.DeploymentWhatIf,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.WhatIfOperationResult]:
"""Returns changes that will be made by the deployment if executed at the scope of the resource
group.
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentWhatIf
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.WhatIfOperationResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_what_if(
self,
resource_group_name: str,
deployment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.WhatIfOperationResult]:
"""Returns changes that will be made by the deployment if executed at the scope of the resource
group.
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.WhatIfOperationResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_what_if(
self,
resource_group_name: str,
deployment_name: str,
parameters: Union[_models.DeploymentWhatIf, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.WhatIfOperationResult]:
"""Returns changes that will be made by the deployment if executed at the scope of the resource
group.
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive. Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param parameters: Parameters to validate. Is either a DeploymentWhatIf type or a IO type.
Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentWhatIf or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.WhatIfOperationResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.WhatIfOperationResult] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._what_if_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("WhatIfOperationResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_what_if.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"
}
@distributed_trace_async
async def export_template(
self, resource_group_name: str, deployment_name: str, **kwargs: Any
) -> _models.DeploymentExportResult:
"""Exports the template used for specified deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExportResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentExportResult] = kwargs.pop("cls", None)
request = build_deployments_export_template_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.export_template.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentExportResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"
}
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, filter: Optional[str] = None, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentExtended"]:
"""Get all the deployments for a resource group.
:param resource_group_name: The name of the resource group with the deployments to get. The
name is case insensitive. Required.
:type resource_group_name: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentExtended or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentExtended]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/"
}
@distributed_trace_async
async def calculate_template_hash(self, template: JSON, **kwargs: Any) -> _models.TemplateHashResult:
"""Calculate the hash of the given template.
:param template: The template provided to calculate hash. Required.
:type template: JSON
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TemplateHashResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.TemplateHashResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[_models.TemplateHashResult] = kwargs.pop("cls", None)
_json = self._serialize.body(template, "object")
request = build_deployments_calculate_template_hash_request(
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.calculate_template_hash.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("TemplateHashResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
calculate_template_hash.metadata = {"url": "/providers/Microsoft.Resources/calculateTemplateHash"}
class ProvidersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.resources.v2019_07_01.aio.ResourceManagementClient`'s
:attr:`providers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def unregister(self, resource_provider_namespace: str, **kwargs: Any) -> _models.Provider:
"""Unregisters a subscription from a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to unregister.
Required.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.Provider
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.Provider] = kwargs.pop("cls", None)
request = build_providers_unregister_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.unregister.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Provider", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unregister.metadata = {"url": "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister"}
@distributed_trace_async
async def register(self, resource_provider_namespace: str, **kwargs: Any) -> _models.Provider:
"""Registers a subscription with a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to register.
Required.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.Provider
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.Provider] = kwargs.pop("cls", None)
request = build_providers_register_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.register.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Provider", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {"url": "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"}
@distributed_trace
def list(
self, top: Optional[int] = None, expand: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.Provider"]:
"""Gets all resource providers for a subscription.
:param top: The number of results to return. If null is passed returns all deployments. Default
value is None.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Provider or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.Provider]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.ProviderListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_providers_list_request(
subscription_id=self._config.subscription_id,
top=top,
expand=expand,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers"}
@distributed_trace
def list_at_tenant_scope(
self, top: Optional[int] = None, expand: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.Provider"]:
"""Gets all resource providers for the tenant.
:param top: The number of results to return. If null is passed returns all providers. Default
value is None.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Provider or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.Provider]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.ProviderListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_providers_list_at_tenant_scope_request(
top=top,
expand=expand,
api_version=api_version,
template_url=self.list_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_tenant_scope.metadata = {"url": "/providers"}
@distributed_trace_async
async def get(
self, resource_provider_namespace: str, expand: Optional[str] = None, **kwargs: Any
) -> _models.Provider:
"""Gets the specified resource provider.
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.Provider
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.Provider] = kwargs.pop("cls", None)
request = build_providers_get_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Provider", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"}
@distributed_trace_async
async def get_at_tenant_scope(
self, resource_provider_namespace: str, expand: Optional[str] = None, **kwargs: Any
) -> _models.Provider:
"""Gets the specified resource provider at the tenant level.
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.Provider
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.Provider] = kwargs.pop("cls", None)
request = build_providers_get_at_tenant_scope_request(
resource_provider_namespace=resource_provider_namespace,
expand=expand,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Provider", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {"url": "/providers/{resourceProviderNamespace}"}
class ResourcesOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.resources.v2019_07_01.aio.ResourceManagementClient`'s
:attr:`resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.GenericResourceExpanded"]:
"""Get all the resources for a resource group.
:param resource_group_name: The resource group with the resources to get. Required.
:type resource_group_name: str
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`The properties you
can use for eq (equals) or ne (not equals) are: location, resourceType, name, resourceGroup,
identity, identity/principalId, plan, plan/publisher, plan/product, plan/name, plan/version,
and plan/promotionCode.:code:`<br>`:code:`<br>`For example, to filter by a resource type, use:
$filter=resourceType eq 'Microsoft.Network/virtualNetworks':code:`<br>`:code:`<br>`You can use
substringof(value, property) in the filter. The properties you can use for substring are: name
and resourceGroup.:code:`<br>`:code:`<br>`For example, to get all resources with 'demo'
anywhere in the name, use: $filter=substringof('demo', name):code:`<br>`:code:`<br>`You can
link more than one substringof together by adding and/or operators.:code:`<br>`:code:`<br>`You
can filter by tag names and values. For example, to filter for a tag name and value, use
$filter=tagName eq 'tag1' and tagValue eq 'Value1':code:`<br>`:code:`<br>`You can use some
properties together when filtering. The combinations you can use are: substringof and/or
resourceType, plan and plan/publisher and plan/name, identity and identity/principalId. Default
value is None.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``. Default value is None.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resources. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GenericResourceExpanded or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResourceExpanded]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.ResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_resources_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
expand=expand,
top=top,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources"
}
async def _move_resources_initial( # pylint: disable=inconsistent-return-statements
self, source_resource_group_name: str, parameters: Union[_models.ResourcesMoveInfo, IO], **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ResourcesMoveInfo")
request = build_resources_move_resources_request(
source_resource_group_name=source_resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._move_resources_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_move_resources_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"
}
@overload
async def begin_move_resources(
self,
source_resource_group_name: str,
parameters: _models.ResourcesMoveInfo,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Moves resources from one resource group to another resource group.
The resources to move must be in the same source resource group. The target resource group may
be in a different subscription. When moving resources, both the source group and the target
group are locked for the duration of the operation. Write and delete operations are blocked on
the groups until the move completes.
:param source_resource_group_name: The name of the resource group containing the resources to
move. Required.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourcesMoveInfo
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_move_resources(
self, source_resource_group_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[None]:
"""Moves resources from one resource group to another resource group.
The resources to move must be in the same source resource group. The target resource group may
be in a different subscription. When moving resources, both the source group and the target
group are locked for the duration of the operation. Write and delete operations are blocked on
the groups until the move completes.
:param source_resource_group_name: The name of the resource group containing the resources to
move. Required.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_move_resources(
self, source_resource_group_name: str, parameters: Union[_models.ResourcesMoveInfo, IO], **kwargs: Any
) -> AsyncLROPoller[None]:
"""Moves resources from one resource group to another resource group.
The resources to move must be in the same source resource group. The target resource group may
be in a different subscription. When moving resources, both the source group and the target
group are locked for the duration of the operation. Write and delete operations are blocked on
the groups until the move completes.
:param source_resource_group_name: The name of the resource group containing the resources to
move. Required.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources. Is either a ResourcesMoveInfo type or a IO
type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourcesMoveInfo or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._move_resources_initial( # type: ignore
source_resource_group_name=source_resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_move_resources.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"
}
async def _validate_move_resources_initial( # pylint: disable=inconsistent-return-statements
self, source_resource_group_name: str, parameters: Union[_models.ResourcesMoveInfo, IO], **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ResourcesMoveInfo")
request = build_resources_validate_move_resources_request(
source_resource_group_name=source_resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._validate_move_resources_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_validate_move_resources_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources"
}
@overload
async def begin_validate_move_resources(
self,
source_resource_group_name: str,
parameters: _models.ResourcesMoveInfo,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Validates whether resources can be moved from one resource group to another resource group.
This operation checks whether the specified resources can be moved to the target. The resources
to move must be in the same source resource group. The target resource group may be in a
different subscription. If validation succeeds, it returns HTTP response code 204 (no content).
If validation fails, it returns HTTP response code 409 (Conflict) with an error message.
Retrieve the URL in the Location header value to check the result of the long-running
operation.
:param source_resource_group_name: The name of the resource group containing the resources to
validate for move. Required.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourcesMoveInfo
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_validate_move_resources(
self, source_resource_group_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[None]:
"""Validates whether resources can be moved from one resource group to another resource group.
This operation checks whether the specified resources can be moved to the target. The resources
to move must be in the same source resource group. The target resource group may be in a
different subscription. If validation succeeds, it returns HTTP response code 204 (no content).
If validation fails, it returns HTTP response code 409 (Conflict) with an error message.
Retrieve the URL in the Location header value to check the result of the long-running
operation.
:param source_resource_group_name: The name of the resource group containing the resources to
validate for move. Required.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_validate_move_resources(
self, source_resource_group_name: str, parameters: Union[_models.ResourcesMoveInfo, IO], **kwargs: Any
) -> AsyncLROPoller[None]:
"""Validates whether resources can be moved from one resource group to another resource group.
This operation checks whether the specified resources can be moved to the target. The resources
to move must be in the same source resource group. The target resource group may be in a
different subscription. If validation succeeds, it returns HTTP response code 204 (no content).
If validation fails, it returns HTTP response code 409 (Conflict) with an error message.
Retrieve the URL in the Location header value to check the result of the long-running
operation.
:param source_resource_group_name: The name of the resource group containing the resources to
validate for move. Required.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources. Is either a ResourcesMoveInfo type or a IO
type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourcesMoveInfo or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._validate_move_resources_initial( # type: ignore
source_resource_group_name=source_resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_validate_move_resources.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources"
}
@distributed_trace
def list(
self, filter: Optional[str] = None, expand: Optional[str] = None, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.GenericResourceExpanded"]:
"""Get all the resources in a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`The properties you
can use for eq (equals) or ne (not equals) are: location, resourceType, name, resourceGroup,
identity, identity/principalId, plan, plan/publisher, plan/product, plan/name, plan/version,
and plan/promotionCode.:code:`<br>`:code:`<br>`For example, to filter by a resource type, use:
$filter=resourceType eq 'Microsoft.Network/virtualNetworks':code:`<br>`:code:`<br>`You can use
substringof(value, property) in the filter. The properties you can use for substring are: name
and resourceGroup.:code:`<br>`:code:`<br>`For example, to get all resources with 'demo'
anywhere in the name, use: $filter=substringof('demo', name):code:`<br>`:code:`<br>`You can
link more than one substringof together by adding and/or operators.:code:`<br>`:code:`<br>`You
can filter by tag names and values. For example, to filter for a tag name and value, use
$filter=tagName eq 'tag1' and tagValue eq 'Value1':code:`<br>`:code:`<br>`You can use some
properties together when filtering. The combinations you can use are: substringof and/or
resourceType, plan and plan/publisher and plan/name, identity and identity/principalId. Default
value is None.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``. Default value is None.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resources. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GenericResourceExpanded or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResourceExpanded]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.ResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_resources_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
expand=expand,
top=top,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resources"}
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> bool:
"""Checks whether a resource exists.
:param resource_group_name: The name of the resource group containing the resource to check.
The name is case insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider of the resource to check. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type. Required.
:type resource_type: str
:param resource_name: The name of the resource to check whether it exists. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_resources_check_existence_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_resources_delete_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"
}
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a resource.
:param resource_group_name: The name of the resource group that contains the resource to
delete. The name is case insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type. Required.
:type resource_type: str
:param resource_name: The name of the resource to delete. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: Union[_models.GenericResource, IO],
**kwargs: Any
) -> Optional[_models.GenericResource]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.GenericResource]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "GenericResource")
request = build_resources_create_or_update_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("GenericResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: _models.GenericResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Creates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to create. Required.
:type resource_type: str
:param resource_name: The name of the resource to create. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Parameters for creating or updating the resource. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Creates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to create. Required.
:type resource_type: str
:param resource_name: The name of the resource to create. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Parameters for creating or updating the resource. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: Union[_models.GenericResource, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Creates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to create. Required.
:type resource_type: str
:param resource_name: The name of the resource to create. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Parameters for creating or updating the resource. Is either a
GenericResource type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.GenericResource] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"
}
async def _update_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: Union[_models.GenericResource, IO],
**kwargs: Any
) -> Optional[_models.GenericResource]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.GenericResource]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "GenericResource")
request = build_resources_update_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: _models.GenericResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Updates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to update. Required.
:type resource_type: str
:param resource_name: The name of the resource to update. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Parameters for updating the resource. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Updates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to update. Required.
:type resource_type: str
:param resource_name: The name of the resource to update. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Parameters for updating the resource. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: Union[_models.GenericResource, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Updates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to update. Required.
:type resource_type: str
:param resource_name: The name of the resource to update. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Parameters for updating the resource. Is either a GenericResource type or a
IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.GenericResource] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"
}
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> _models.GenericResource:
"""Gets a resource.
:param resource_group_name: The name of the resource group containing the resource to get. The
name is case insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity. Required.
:type parent_resource_path: str
:param resource_type: The resource type of the resource. Required.
:type resource_type: str
:param resource_name: The name of the resource to get. Required.
:type resource_name: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[_models.GenericResource] = kwargs.pop("cls", None)
request = build_resources_get_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"
}
@distributed_trace_async
async def check_existence_by_id(self, resource_id: str, api_version: str, **kwargs: Any) -> bool:
"""Checks by ID whether a resource exists.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_resources_check_existence_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self.check_existence_by_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_by_id.metadata = {"url": "/{resourceId}"}
async def _delete_by_id_initial( # pylint: disable=inconsistent-return-statements
self, resource_id: str, api_version: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_resources_delete_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self._delete_by_id_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_by_id_initial.metadata = {"url": "/{resourceId}"}
@distributed_trace_async
async def begin_delete_by_id(self, resource_id: str, api_version: str, **kwargs: Any) -> AsyncLROPoller[None]:
"""Deletes a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_by_id_initial( # type: ignore
resource_id=resource_id,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_by_id.metadata = {"url": "/{resourceId}"}
async def _create_or_update_by_id_initial(
self, resource_id: str, api_version: str, parameters: Union[_models.GenericResource, IO], **kwargs: Any
) -> Optional[_models.GenericResource]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.GenericResource]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "GenericResource")
request = build_resources_create_or_update_by_id_request(
resource_id=resource_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_by_id_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("GenericResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_by_id_initial.metadata = {"url": "/{resourceId}"}
@overload
async def begin_create_or_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: _models.GenericResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Create a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Create or update resource parameters. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Create a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Create or update resource parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update_by_id(
self, resource_id: str, api_version: str, parameters: Union[_models.GenericResource, IO], **kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Create a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Create or update resource parameters. Is either a GenericResource type or a
IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.GenericResource] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update_by_id.metadata = {"url": "/{resourceId}"}
async def _update_by_id_initial(
self, resource_id: str, api_version: str, parameters: Union[_models.GenericResource, IO], **kwargs: Any
) -> Optional[_models.GenericResource]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.GenericResource]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "GenericResource")
request = build_resources_update_by_id_request(
resource_id=resource_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_by_id_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_by_id_initial.metadata = {"url": "/{resourceId}"}
@overload
async def begin_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: _models.GenericResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Updates a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Update resource parameters. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Updates a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Update resource parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update_by_id(
self, resource_id: str, api_version: str, parameters: Union[_models.GenericResource, IO], **kwargs: Any
) -> AsyncLROPoller[_models.GenericResource]:
"""Updates a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:param parameters: Update resource parameters. Is either a GenericResource type or a IO type.
Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.GenericResource] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update_by_id.metadata = {"url": "/{resourceId}"}
@distributed_trace_async
async def get_by_id(self, resource_id: str, api_version: str, **kwargs: Any) -> _models.GenericResource:
"""Gets a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
Required.
:type resource_id: str
:param api_version: The API version to use for the operation. Required.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.GenericResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[_models.GenericResource] = kwargs.pop("cls", None)
request = build_resources_get_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self.get_by_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("GenericResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {"url": "/{resourceId}"}
class ResourceGroupsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.resources.v2019_07_01.aio.ResourceManagementClient`'s
:attr:`resource_groups` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def check_existence(self, resource_group_name: str, **kwargs: Any) -> bool:
"""Checks whether a resource group exists.
:param resource_group_name: The name of the resource group to check. The name is case
insensitive. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_resource_groups_check_existence_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@overload
async def create_or_update(
self,
resource_group_name: str,
parameters: _models.ResourceGroup,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ResourceGroup:
"""Creates or updates a resource group.
:param resource_group_name: The name of the resource group to create or update. Can include
alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters
that match the allowed characters. Required.
:type resource_group_name: str
:param parameters: Parameters supplied to the create or update a resource group. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self, resource_group_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.ResourceGroup:
"""Creates or updates a resource group.
:param resource_group_name: The name of the resource group to create or update. Can include
alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters
that match the allowed characters. Required.
:type resource_group_name: str
:param parameters: Parameters supplied to the create or update a resource group. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self, resource_group_name: str, parameters: Union[_models.ResourceGroup, IO], **kwargs: Any
) -> _models.ResourceGroup:
"""Creates or updates a resource group.
:param resource_group_name: The name of the resource group to create or update. Can include
alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters
that match the allowed characters. Required.
:type resource_group_name: str
:param parameters: Parameters supplied to the create or update a resource group. Is either a
ResourceGroup type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ResourceGroup] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ResourceGroup")
request = build_resource_groups_create_or_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ResourceGroup", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ResourceGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_resource_groups_delete_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@distributed_trace_async
async def begin_delete(self, resource_group_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
"""Deletes a resource group.
When you delete a resource group, all of its resources are also deleted. Deleting a resource
group deletes all of its template deployments and currently stored operations.
:param resource_group_name: The name of the resource group to delete. The name is case
insensitive. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@distributed_trace_async
async def get(self, resource_group_name: str, **kwargs: Any) -> _models.ResourceGroup:
"""Gets a resource group.
:param resource_group_name: The name of the resource group to get. The name is case
insensitive. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.ResourceGroup] = kwargs.pop("cls", None)
request = build_resource_groups_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ResourceGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@overload
async def update(
self,
resource_group_name: str,
parameters: _models.ResourceGroupPatchable,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ResourceGroup:
"""Updates a resource group.
Resource groups can be updated through a simple PATCH operation to a group address. The format
of the request is the same as that for creating a resource group. If a field is unspecified,
the current value is retained.
:param resource_group_name: The name of the resource group to update. The name is case
insensitive. Required.
:type resource_group_name: str
:param parameters: Parameters supplied to update a resource group. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroupPatchable
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self, resource_group_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.ResourceGroup:
"""Updates a resource group.
Resource groups can be updated through a simple PATCH operation to a group address. The format
of the request is the same as that for creating a resource group. If a field is unspecified,
the current value is retained.
:param resource_group_name: The name of the resource group to update. The name is case
insensitive. Required.
:type resource_group_name: str
:param parameters: Parameters supplied to update a resource group. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self, resource_group_name: str, parameters: Union[_models.ResourceGroupPatchable, IO], **kwargs: Any
) -> _models.ResourceGroup:
"""Updates a resource group.
Resource groups can be updated through a simple PATCH operation to a group address. The format
of the request is the same as that for creating a resource group. If a field is unspecified,
the current value is retained.
:param resource_group_name: The name of the resource group to update. The name is case
insensitive. Required.
:type resource_group_name: str
:param parameters: Parameters supplied to update a resource group. Is either a
ResourceGroupPatchable type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroupPatchable or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ResourceGroup] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ResourceGroupPatchable")
request = build_resource_groups_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ResourceGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@overload
async def export_template(
self,
resource_group_name: str,
parameters: _models.ExportTemplateRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ResourceGroupExportResult:
"""Captures the specified resource group as a template.
:param resource_group_name: The name of the resource group to export as a template. Required.
:type resource_group_name: str
:param parameters: Parameters for exporting the template. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ExportTemplateRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroupExportResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroupExportResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def export_template(
self, resource_group_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.ResourceGroupExportResult:
"""Captures the specified resource group as a template.
:param resource_group_name: The name of the resource group to export as a template. Required.
:type resource_group_name: str
:param parameters: Parameters for exporting the template. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroupExportResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroupExportResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def export_template(
self, resource_group_name: str, parameters: Union[_models.ExportTemplateRequest, IO], **kwargs: Any
) -> _models.ResourceGroupExportResult:
"""Captures the specified resource group as a template.
:param resource_group_name: The name of the resource group to export as a template. Required.
:type resource_group_name: str
:param parameters: Parameters for exporting the template. Is either a ExportTemplateRequest
type or a IO type. Required.
:type parameters: ~azure.mgmt.resource.resources.v2019_07_01.models.ExportTemplateRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroupExportResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroupExportResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ResourceGroupExportResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ExportTemplateRequest")
request = build_resource_groups_export_template_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.export_template.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ResourceGroupExportResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate"
}
@distributed_trace
def list(
self, filter: Optional[str] = None, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.ResourceGroup"]:
"""Gets all the resource groups for a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`You can filter by
tag names and values. For example, to filter for a tag name and value, use $filter=tagName eq
'tag1' and tagValue eq 'Value1'. Default value is None.
:type filter: str
:param top: The number of results to return. If null is passed, returns all resource groups.
Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceGroup or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.ResourceGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.ResourceGroupListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_resource_groups_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups"}
class TagsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.resources.v2019_07_01.aio.ResourceManagementClient`'s
:attr:`tags` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def delete_value( # pylint: disable=inconsistent-return-statements
self, tag_name: str, tag_value: str, **kwargs: Any
) -> None:
"""Deletes a tag value.
:param tag_name: The name of the tag. Required.
:type tag_name: str
:param tag_value: The value of the tag to delete. Required.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_tags_delete_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete_value.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_value.metadata = {"url": "/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"}
@distributed_trace_async
async def create_or_update_value(self, tag_name: str, tag_value: str, **kwargs: Any) -> _models.TagValue:
"""Creates a tag value. The name of the tag must already exist.
:param tag_name: The name of the tag. Required.
:type tag_name: str
:param tag_value: The value of the tag to create. Required.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagValue or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.TagValue
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.TagValue] = kwargs.pop("cls", None)
request = build_tags_create_or_update_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.create_or_update_value.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("TagValue", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("TagValue", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update_value.metadata = {"url": "/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"}
@distributed_trace_async
async def create_or_update(self, tag_name: str, **kwargs: Any) -> _models.TagDetails:
"""Creates a tag in the subscription.
The tag name can have a maximum of 512 characters and is case insensitive. Tag names created by
Azure have prefixes of microsoft, azure, or windows. You cannot create tags with one of these
prefixes.
:param tag_name: The name of the tag to create. Required.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagDetails or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.TagDetails
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.TagDetails] = kwargs.pop("cls", None)
request = build_tags_create_or_update_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("TagDetails", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("TagDetails", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/tagNames/{tagName}"}
@distributed_trace_async
async def delete(self, tag_name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements
"""Deletes a tag from the subscription.
You must remove all values from a resource tag before you can delete it.
:param tag_name: The name of the tag. Required.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_tags_delete_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/tagNames/{tagName}"}
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.TagDetails"]:
"""Gets the names and values of all resource tags that are defined in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagDetails or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.TagDetails]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.TagsListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_tags_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TagsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/tagNames"}
class DeploymentOperationsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.resources.v2019_07_01.aio.ResourceManagementClient`'s
:attr:`deployment_operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get_at_scope(
self, scope: str, deployment_name: str, operation_id: str, **kwargs: Any
) -> _models.DeploymentOperation:
"""Gets a deployments operation.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param operation_id: The ID of the operation to get. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperation] = kwargs.pop("cls", None)
request = build_deployment_operations_get_at_scope_request(
scope=scope,
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentOperation", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_scope.metadata = {
"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"
}
@distributed_trace
def list_at_scope(
self, scope: str, deployment_name: str, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentOperation"]:
"""Gets all deployments operations for a deployment.
:param scope: The scope of a deployment. Required.
:type scope: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperationsListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_scope_request(
scope=scope,
deployment_name=deployment_name,
top=top,
api_version=api_version,
template_url=self.list_at_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_scope.metadata = {"url": "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"}
@distributed_trace_async
async def get_at_tenant_scope(
self, deployment_name: str, operation_id: str, **kwargs: Any
) -> _models.DeploymentOperation:
"""Gets a deployments operation.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param operation_id: The ID of the operation to get. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperation] = kwargs.pop("cls", None)
request = build_deployment_operations_get_at_tenant_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentOperation", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {
"url": "/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"
}
@distributed_trace
def list_at_tenant_scope(
self, deployment_name: str, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentOperation"]:
"""Gets all deployments operations for a deployment.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperationsListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_tenant_scope_request(
deployment_name=deployment_name,
top=top,
api_version=api_version,
template_url=self.list_at_tenant_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_tenant_scope.metadata = {"url": "/providers/Microsoft.Resources/deployments/{deploymentName}/operations"}
@distributed_trace_async
async def get_at_management_group_scope(
self, group_id: str, deployment_name: str, operation_id: str, **kwargs: Any
) -> _models.DeploymentOperation:
"""Gets a deployments operation.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param operation_id: The ID of the operation to get. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperation] = kwargs.pop("cls", None)
request = build_deployment_operations_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_management_group_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentOperation", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"
}
@distributed_trace
def list_at_management_group_scope(
self, group_id: str, deployment_name: str, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentOperation"]:
"""Gets all deployments operations for a deployment.
:param group_id: The management group ID. Required.
:type group_id: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperationsListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
top=top,
api_version=api_version,
template_url=self.list_at_management_group_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_management_group_scope.metadata = {
"url": "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"
}
@distributed_trace_async
async def get_at_subscription_scope(
self, deployment_name: str, operation_id: str, **kwargs: Any
) -> _models.DeploymentOperation:
"""Gets a deployments operation.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param operation_id: The ID of the operation to get. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperation] = kwargs.pop("cls", None)
request = build_deployment_operations_get_at_subscription_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_at_subscription_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentOperation", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"
}
@distributed_trace
def list_at_subscription_scope(
self, deployment_name: str, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentOperation"]:
"""Gets all deployments operations for a deployment.
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperationsListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
top=top,
api_version=api_version,
template_url=self.list_at_subscription_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_at_subscription_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, deployment_name: str, operation_id: str, **kwargs: Any
) -> _models.DeploymentOperation:
"""Gets a deployments operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param operation_id: The ID of the operation to get. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperation] = kwargs.pop("cls", None)
request = build_deployment_operations_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentOperation", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}"
}
@distributed_trace
def list(
self, resource_group_name: str, deployment_name: str, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.DeploymentOperation"]:
"""Gets all deployments operations for a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param deployment_name: The name of the deployment. Required.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_07_01.models.DeploymentOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01"))
cls: ClsType[_models.DeploymentOperationsListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
top=top,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations"
}
|
PypiClean
|
/protobuf_to_pydantic-0.1.7.4-py3-none-any.whl/protobuf_to_pydantic/contrib/proto_parser.py
|
import json
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from lark import Lark, Token, Transformer, Tree
from lark.tree import ParseTree # type: ignore
BNF = r"""
OCTALDIGIT: "0..7"
IDENT: ( "_" )* LETTER ( LETTER | DECIMALDIGIT | "_" )*
FULLIDENT: IDENT ( "." IDENT )*
MESSAGENAME: IDENT
ENUMNAME: IDENT
FIELDNAME: IDENT
ONEOFNAME: IDENT
MAPNAME: IDENT
SERVICENAME: IDENT
TAGNAME: IDENT
TAGVALUE: IDENT
RPCNAME: IDENT
MESSAGETYPE: [ "." ] ( IDENT "." )* MESSAGENAME
ENUMTYPE: [ "." ] ( IDENT "." )* ENUMNAME
INTLIT : DECIMALLIT | OCTALLIT | HEXLIT
DECIMALLIT: ( "1".."9" ) ( DECIMALDIGIT )*
OCTALLIT : "0" ( OCTALDIGIT )*
HEXLIT : "0" ( "x" | "X" ) HEXDIGIT ( HEXDIGIT )*
FLOATLIT: ( DECIMALS "." [ DECIMALS ] [ EXPONENT ] | DECIMALS EXPONENT | "."DECIMALS [ EXPONENT ] ) | "inf" | "nan"
DECIMALS : DECIMALDIGIT ( DECIMALDIGIT )*
EXPONENT : ( "e" | "E" ) [ "+" | "-" ] DECIMALS
BOOLLIT: "true" | "false"
STRLIT: ( "'" ( CHARVALUE )* "'" ) | ( "\"" ( CHARVALUE )* "\"" )
CHARVALUE: HEXESCAPE | OCTESCAPE | CHARESCAPE | /[^\0\n\\]/
HEXESCAPE: "\\" ( "x" | "X" ) HEXDIGIT HEXDIGIT
OCTESCAPE: "\\" OCTALDIGIT OCTALDIGIT OCTALDIGIT
CHARESCAPE: "\\" ( "a" | "b" | "f" | "n" | "r" | "t" | "v" | "\\" | "'" | "\"" )
QUOTE: "'" | "\""
EMPTYSTATEMENT: ";"
CONSTANT: FULLIDENT | ( [ "-" | "+" ] INTLIT ) | ( [ "-" | "+" ] FLOATLIT ) | STRLIT | BOOLLIT
syntax: "syntax" "=" QUOTE "proto3" QUOTE ";"
import: "import" [ "weak" | "public" ] STRLIT ";"
package: "package" FULLIDENT ";"
option: "option" OPTIONNAME "=" CONSTANT ";"
OPTIONNAME: ( IDENT | "(" FULLIDENT ")" ) ( "." IDENT )*
TYPE: "double" | "float" | "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | "fixed32"
| "fixed64" | "sfixed32" | "sfixed64" | "bool" | "string" | "bytes" | MESSAGETYPE | ENUMTYPE
FIELDNUMBER: INTLIT
field: [ comments ] TYPE FIELDNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] TAIL
fieldoptions: fieldoption ( "," fieldoption )*
fieldoption: OPTIONNAME "=" CONSTANT
repeatedfield: [ comments ] "repeated" field
oneof: "oneof" ONEOFNAME "{" ( oneoffield | EMPTYSTATEMENT )* "}"
oneoffield: TYPE FIELDNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] ";"
mapfield: [ comments ] "map" "<" KEYTYPE "," TYPE ">" MAPNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] TAIL
KEYTYPE: "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | "fixed32" | "fixed64" | "sfixed32"
| "sfixed64" | "bool" | "string"
reserved: "reserved" ( ranges | fieldnames ) ";"
ranges: range ( "," range )*
range: INTLIT [ "to" ( INTLIT | "max" ) ]
fieldnames: FIELDNAME ( "," FIELDNAME )*
enum: [ comments ] "enum" ENUMNAME enumbody
enumbody: "{" ( enumfield | EMPTYSTATEMENT )* "}"
enumfield: [ COMMENTS ] IDENT "=" INTLIT [ "[" enumvalueoption ( "," enumvalueoption )* "]" ] TAIL
enumvalueoption: OPTIONNAME "=" CONSTANT
message: [ comments ] "message" MESSAGENAME messagebody
messagebody: "{" ( repeatedfield | field | enum | message | option | oneof | mapfield | reserved | EMPTYSTATEMENT )* "}"
googleoption: "option" "(google.api.http)" "=" "{" [ "post:" CONSTANT [ "body:" CONSTANT ] ] "}" ";"
service: [ comments ] "service" SERVICENAME "{" ( option | rpc | EMPTYSTATEMENT )* "}"
rpc: [ comments ] "rpc" RPCNAME "(" [ "stream" ] MESSAGETYPE ")" "returns" "(" [ "stream" ] MESSAGETYPE ")" \
( ( "{" ( googleoption | option | EMPTYSTATEMENT )* "}" ) | ";" )
proto:[ comments ] syntax ( import | package | option | topleveldef | EMPTYSTATEMENT )*
topleveldef: message | enum | service | comments
TAIL: ";" [/[\s|\t]/] [ COMMENT ]
COMMENT: "//" /.*/ [ "\n" ]
comments: COMMENT ( COMMENT )*
COMMENTS: COMMENT ( COMMENT )*
%import common.HEXDIGIT
%import common.DIGIT -> DECIMALDIGIT
%import common.LETTER
%import common.WS
%import common.NEWLINE
%ignore WS
"""
@dataclass
class Comment(object):
content: str
tags: Dict[str, Any]
@dataclass
class Field(object):
comment: Comment
type: str
key_type: str
val_type: str
name: str
number: int
@dataclass
class Enum(object):
comment: Comment
name: str
fields: Dict[str, Field]
@dataclass
class Message(object):
comment: Comment
name: str
fields: List[Field]
messages: Dict[str, "Message"]
enums: Dict[str, Enum]
@dataclass
class RpcFunc(object):
name: str
in_type: str
out_type: str
uri: str
@dataclass
class Service(object):
name: str
functions: List[RpcFunc]
@dataclass
class ProtoFile(object):
messages: Dict[str, Message]
enums: Dict[str, Enum]
services: Dict[str, Service]
imports: List[str]
options: Dict[str, str]
package: str
class ProtoTransformer(Transformer):
"""Converts syntax tree token into more easily usable namedtuple objects"""
@staticmethod
def message(tokens: list) -> Message:
"""Returns a Message namedtuple"""
comment: Comment = Comment("", {})
if len(tokens) < 3:
name_token, body = tokens
else:
comment, name_token, body = tokens
return Message(comment, name_token.value, *body)
@staticmethod
def messagebody(
items: List[Union[Message, Enum, Field]]
) -> Tuple[List[Field], Dict[str, Message], Dict[str, Enum]]:
"""Returns a tuple of message body namedtuples"""
messages: Dict[str, Message] = {}
enums: Dict[str, Enum] = {}
fields: List[Field] = []
for item in items:
if isinstance(item, Message):
messages[item.name] = item
elif isinstance(item, Enum):
enums[item.name] = item
elif isinstance(item, Field):
fields.append(item)
return fields, messages, enums
@staticmethod
def field(tokens: list) -> Field:
"""Returns a Field namedtuple"""
comment: Comment = Comment("", {})
type_token: Token = Token("TYPE", "")
field_name_token: Token = Token("FIELDNAME", "")
field_number_token: Token = Token("FIELDNUMBER", "")
for token in tokens:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "TYPE":
type_token = token
elif token.type == "FIELDNAME":
field_name_token = token
elif token.type == "FIELDNUMBER":
field_number_token = token
elif token.type == "COMMENT":
if not comment:
comment = Comment(token.value, {})
else:
comment.content += token.value
elif token.type == "TAIL" and "//" in token.value:
value = token.value.strip(";").strip()
if not comment:
comment = Comment(value, {})
else:
comment.content += value
return Field(
comment,
type_token.value,
type_token.value,
type_token.value,
field_name_token.value,
int(field_number_token.value),
)
@staticmethod
def repeatedfield(tokens: list) -> Field:
"""Returns a Field namedtuple"""
comment: Comment = Comment("", {})
if len(tokens) < 2:
field: Field = tokens[0]
else:
comment, field = tuple(tokens)
return Field(comment, "repeated", field.type, field.type, field.name, field.number)
@staticmethod
def mapfield(tokens: list) -> Field:
"""Returns a Field namedtuple"""
comment = Comment("", {})
val_type = Token("TYPE", "")
key_type = Token("KEYTYPE", "")
fieldname = Token("MAPNAME", "")
fieldnumber = Token("FIELDNUMBER", "")
for token in tokens:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "TYPE":
val_type = token
elif token.type == "KEYTYPE":
key_type = token
elif token.type == "MAPNAME":
fieldname = token
elif token.type == "FIELDNUMBER":
fieldnumber = token
elif token.type == "COMMENT":
comment = Comment(token.value, {})
return Field(comment, "map", key_type.value, val_type.value, fieldname.value, int(fieldnumber.value))
@staticmethod
def comments(tokens: list) -> Comment:
"""Returns a Tag namedtuple"""
comment: str = ""
tags: Dict[str, Any] = {}
for token in tokens:
comment += token
if token.find("@") < 0:
continue
kvs = token.strip(" /\n").split("@")
for kv in kvs:
kv = kv.strip(" /\n")
if not kv:
continue
tmp = kv.split("=")
key = tmp[0].strip(" /\n").lower()
if key.find(" ") >= 0:
continue
if len(tmp) > 1:
tags[key] = tmp[1].lower()
else:
tags[key] = True
return Comment(comment, tags)
@staticmethod
def enum(tokens: list) -> Enum:
"""Returns an Enum namedtuple"""
comment: Comment = Comment("", {})
if len(tokens) < 3:
name, fields = tokens
else:
comment, name, fields = tokens
return Enum(comment, name.value, fields)
@staticmethod
def enumbody(tokens: list) -> List[Field]:
"""Returns a sequence of enum identifiers"""
enumitems: List[Field] = []
for tree in tokens:
if tree.data != "enumfield":
continue
comment: Comment = Comment("", {})
name: Token = Token("IDENT", "")
value: Token = Token("INTLIT", "")
for token in tree.children:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "IDENT":
name = token
elif token.type == "INTLIT":
value = token
elif token.type == "COMMENTS":
comment = Comment(token.value, {})
enumitems.append(Field(comment, "enum", "enum", "enum", name.value, value.value))
return enumitems
@staticmethod
def service(tokens: list) -> Service:
"""Returns a Service namedtuple"""
functions: List[RpcFunc] = []
name: str = ""
for token in tokens:
if token is None:
continue
if not isinstance(token, Comment):
if isinstance(token, RpcFunc):
functions.append(token)
else:
name = token.value
return Service(name, functions)
@staticmethod
def rpc(tokens: List) -> RpcFunc:
"""Returns a RpcFunc namedtuple"""
uri: str = ""
name: Token = Token("RPCNAME", "")
in_type: Token = Token("MESSAGETYPE", "")
out_type: Token = Token("MESSAGETYPE", "")
for token in tokens:
if token is None:
continue
if isinstance(token, Token):
if token.type == "RPCNAME":
name = token
elif token.type == "MESSAGETYPE":
if in_type:
out_type = token
else:
in_type = token
elif not isinstance(token, Comment):
option_token = token
uri = option_token.children[0].value
return RpcFunc(name.value, in_type.value, out_type.value, uri.strip('"'))
def _recursive_to_dict(obj: Any) -> Dict[str, Any]:
_dict: Dict[str, Any] = {}
if hasattr(obj, "__dataclass_fields__"):
node: Dict[str, Any] = asdict(obj)
for item in node:
if isinstance(node[item], list): # Process as a list
_dict[item] = [_recursive_to_dict(x) for x in (node[item])]
elif isinstance(node[item], tuple): # Process as a NamedTuple
_dict[item] = _recursive_to_dict(node[item])
elif isinstance(node[item], dict):
for k in node[item]:
if isinstance(node[item][k], tuple):
node[item][k] = _recursive_to_dict(node[item][k])
_dict[item] = node[item]
else: # Process as a regular element
_dict[item] = node[item]
return _dict
def parse_from_file(file: str) -> Optional[ProtoFile]:
with open(file, "r") as f:
data = f.read()
if data:
return parse(data)
return None
def parse(data: str) -> ProtoFile:
parser: Lark = Lark(BNF, start="proto", parser="lalr")
tree: ParseTree = parser.parse(data)
trans_tree: Tree = ProtoTransformer().transform(tree)
enums: Dict[str, Enum] = {}
messages: Dict[str, Message] = {}
services: Dict[str, Service] = {}
imports: List[str] = []
options: Dict[str, str] = {}
package: str = ""
import_tree = trans_tree.find_data("import")
for tree in import_tree:
for child in tree.children:
imports.append(child.value.strip('"'))
option_tree = trans_tree.find_data("option")
for tree in option_tree:
options[tree.children[0]] = tree.children[1].strip('"')
package_tree = trans_tree.find_data("package")
for tree in package_tree:
package = tree.children[0]
top_data = trans_tree.find_data("topleveldef")
for top_level in top_data:
for child in top_level.children:
if isinstance(child, Message):
messages[child.name] = child
if isinstance(child, Enum):
enums[child.name] = child
if isinstance(child, Service):
services[child.name] = child
return ProtoFile(messages, enums, services, imports, options, package)
def serialize2json(data: str) -> str:
return json.dumps(_recursive_to_dict(parse(data)))
def serialize2json_from_file(file: str) -> Optional[str]:
with open(file, "r") as f:
data = f.read()
if data:
return json.dumps(_recursive_to_dict(parse(data)))
return None
|
PypiClean
|
/Skailar-framework-5.0.tar.gz/Skailar-framework-5.0/skailar/views/csrf.py
|
from pathlib import Path
from skailar.conf import settings
from skailar.http import HttpResponseForbidden
from skailar.template import Context, Engine, TemplateDoesNotExist, loader
from skailar.utils.translation import gettext as _
from skailar.utils.version import get_docs_version
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE_NAME = "403_csrf.html"
def builtin_template_path(name):
"""
Return a path to a builtin template.
Avoid calling this function at the module level or in a class-definition
because __file__ may not exist, e.g. in frozen environments.
"""
return Path(__file__).parent / "templates" / name
def csrf_failure(request, reason="", template_name=CSRF_FAILURE_TEMPLATE_NAME):
"""
Default view used when request fails CSRF protection
"""
from skailar.middleware.csrf import REASON_NO_CSRF_COOKIE, REASON_NO_REFERER
c = {
"title": _("Forbidden"),
"main": _("CSRF verification failed. Request aborted."),
"reason": reason,
"no_referer": reason == REASON_NO_REFERER,
"no_referer1": _(
"You are seeing this message because this HTTPS site requires a "
"“Referer header” to be sent by your web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."
),
"no_referer2": _(
"If you have configured your browser to disable “Referer” headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for “same-origin” requests."
),
"no_referer3": _(
'If you are using the <meta name="referrer" '
'content="no-referrer"> tag or including the “Referrer-Policy: '
"no-referrer” header, please remove them. The CSRF protection "
"requires the “Referer” header to do strict referer checking. If "
"you’re concerned about privacy, use alternatives like "
'<a rel="noreferrer" …> for links to third-party sites.'
),
"no_cookie": reason == REASON_NO_CSRF_COOKIE,
"no_cookie1": _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."
),
"no_cookie2": _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for “same-origin” "
"requests."
),
"DEBUG": settings.DEBUG,
"docs_version": get_docs_version(),
"more": _("More information is available with DEBUG=True."),
}
try:
t = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name == CSRF_FAILURE_TEMPLATE_NAME:
# If the default template doesn't exist, use the fallback template.
with builtin_template_path("csrf_403.html").open(encoding="utf-8") as fh:
t = Engine().from_string(fh.read())
c = Context(c)
else:
# Raise if a developer-specified template doesn't exist.
raise
return HttpResponseForbidden(t.render(c))
|
PypiClean
|
/dapp_runner-0.3.0-py3-none-any.whl/dapp_runner/_util.py
|
import asyncio
import socket
from asyncio import Task
from datetime import datetime, timezone
from typing import Any, Generator
import statemachine
from colors import yellow
from yapapi import Golem
from yapapi import __version__ as yapapi_version
from dapp_runner.singleton import SingletonMeta
class FreePortProvider(metaclass=SingletonMeta):
"""Provide free port to reserve by dapp-runner.
Usage of singleton prevents race condition on single dapp-runner instance
when reserving free ports.
This is temporary solution until issue https://github.com/golemfactory/yapapi/issues/1098
is resolved and proper fix can be implemented.
"""
_generator: Generator[int, None, None]
def __init__(self, range_start: int = 8080, range_end: int = 9090):
self._generator = self._free_port_generator(range_start, range_end)
def _free_port_generator(
self, range_start: int = 8080, range_end: int = 9090
) -> Generator[int, None, None]:
"""Yield the first available port on localhost within the specified range.
The range is inclusive on both sides (i.e. `range_end` will be included).
Raises `RuntimeError` when no free port could be found.
"""
return_port = None
for port in range(range_start, range_end + 1):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(("", port))
return_port = port
except OSError:
pass
if return_port is not None:
yield return_port
return_port = None
raise RuntimeError(f"No free ports found. range_start={range_start}, range_end={range_end}")
def get_free_port(self) -> int:
"""Get next available port on localhost."""
return next(self._generator)
def _print_env_info(golem: Golem):
print(
f"yapapi version: {yellow(yapapi_version)}\n"
f"Using subnet: {yellow(golem.subnet_tag)}, "
f"payment driver: {yellow(golem.payment_driver)}, "
f"and network: {yellow(golem.payment_network)}\n"
)
def utcnow() -> datetime:
"""Get a timezone-aware datetime for _now_."""
return datetime.now(tz=timezone.utc)
def utcnow_iso_str() -> str:
"""Get ISO formatted timezone-aware string for _now_."""
return utcnow().isoformat()
def json_encoder(obj: Any):
"""Handle additional object types for `json.dump*` encoding."""
if isinstance(obj, statemachine.State):
return obj.name
return obj
async def cancel_and_await_tasks(*tasks: Task) -> None:
"""Cancel and await cleanup of provided tasks."""
# Mark all remaining tasks as cancelled at once
for task in tasks:
task.cancel()
# Give tasks a chance for cleanup by awaiting and
# expecting CancelledError (default asyncio behaviour)
for task in tasks:
try:
await task
except asyncio.CancelledError:
pass
|
PypiClean
|
/django-cked-2.0.2.tar.gz/django-cked-2.0.2/src/cked/static/cked/ckeditor/lang/af.js
|
/*
Copyright (c) 2003-2021, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['af']={"editor":"Woordverwerker","editorPanel":"Woordverwerkerpaneel","common":{"editorHelp":"Druk op ALT 0 vir hulp","browseServer":"Blaai op bediener","url":"URL","protocol":"Protokol","upload":"Oplaai","uploadSubmit":"Stuur aan die bediener","image":"Beeld","form":"Vorm","checkbox":"Merkhokkie","radio":"Radioknoppie","textField":"Teksveld","textarea":"Teksarea","hiddenField":"Versteekteveld","button":"Knop","select":"Keuseveld","imageButton":"Beeldknop","notSet":"<geen instelling>","id":"Id","name":"Naam","langDir":"Skryfrigting","langDirLtr":"Links na regs (LTR)","langDirRtl":"Regs na links (RTL)","langCode":"Taalkode","longDescr":"Lang beskrywing URL","cssClass":"CSS klasse","advisoryTitle":"Aanbevole titel","cssStyle":"Styl","ok":"OK","cancel":"Kanselleer","close":"Sluit","preview":"Voorbeeld","resize":"Skalierung","generalTab":"Algemeen","advancedTab":"Gevorderd","validateNumberFailed":"Hierdie waarde is nie 'n nommer nie.","confirmNewPage":"Alle wysiginge sal verlore gaan. Is jy seker dat jy 'n nuwe bladsy wil laai?","confirmCancel":"Sommige opsies is gewysig. Is jy seker dat jy hierdie dialoogvenster wil sluit?","options":"Opsies","target":"Teiken","targetNew":"Nuwe venster (_blank)","targetTop":"Boonste venster (_top)","targetSelf":"Selfde venster (_self)","targetParent":"Oorspronklike venster (_parent)","langDirLTR":"Links na Regs (LTR)","langDirRTL":"Regs na Links (RTL)","styles":"Styl","cssClasses":"CSS klasse","width":"Breedte","height":"Hoogte","align":"Orienteerung","left":"Links","right":"Regs","center":"Middel","justify":"Eweredig","alignLeft":"Links oplyn","alignRight":"Regs oplyn","alignCenter":"Middel oplyn","alignTop":"Bo","alignMiddle":"Middel","alignBottom":"Onder","alignNone":"Geen","invalidValue":"Ongeldige waarde","invalidHeight":"Hoogte moet 'n getal wees","invalidWidth":"Breedte moet 'n getal wees.","invalidLength":"Die waarde vir die veld \"%1\" moet 'n posetiewe nommer wees met of sonder die meeteenheid (%2).","invalidCssLength":"Die waarde vir die \"%1\" veld moet 'n posetiewe getal wees met of sonder 'n geldige CSS eenheid (px, %, in, cm, mm, em, ex, pt, of pc).","invalidHtmlLength":"Die waarde vir die \"%1\" veld moet 'n posetiewe getal wees met of sonder 'n geldige HTML eenheid (px of %).","invalidInlineStyle":"Ongeldige CSS. Formaat is een of meer sleutel-wert paare, \"naam : wert\" met kommapunte gesky.","cssLengthTooltip":"Voeg 'n getal wert in pixel in, of 'n waarde met geldige CSS eenheid (px, %, in, cm, mm, em, ex, pt, of pc).","unavailable":"%1<span class=\"cke_accessibility\">, nie beskikbaar nie</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Skuif","17":"Ctrl","18":"Alt","32":"Spasie","35":"Einde","36":"Tuis","46":"Verwyder","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Bevel"},"keyboardShortcut":"Sleutel kombenasie","optionDefault":"Verstek"},"about":{"copy":"Kopiereg © $1. Alle regte voorbehou.","dlgTitle":"Meer oor CKEditor 4","moreInfo":"Vir lisensie-informasie, besoek asb. ons webwerf:"},"basicstyles":{"bold":"Vet","italic":"Skuins","strike":"Deurgestreep","subscript":"Onderskrif","superscript":"Bo-skrif","underline":"Onderstreep"},"bidi":{"ltr":"Skryfrigting van links na regs","rtl":"Skryfrigting van regs na links"},"blockquote":{"toolbar":"Sitaatblok"},"notification":{"closed":"Notification closed."},"toolbar":{"toolbarCollapse":"Verklein werkbalk","toolbarExpand":"Vergroot werkbalk","toolbarGroups":{"document":"Dokument","clipboard":"Knipbord/Undo","editing":"Verander","forms":"Vorms","basicstyles":"Eenvoudige Styl","paragraph":"Paragraaf","links":"Skakels","insert":"Toevoeg","styles":"Style","colors":"Kleure","tools":"Gereedskap"},"toolbars":"Werkbalke"},"clipboard":{"copy":"Kopiëer","copyError":"U leser se sekuriteitsinstelling belet die kopiëringsaksie. Gebruik die sleutelbordkombinasie (Ctrl/Cmd+C).","cut":"Uitsnei","cutError":"U leser se sekuriteitsinstelling belet die outomatiese uitsnei-aksie. Gebruik die sleutelbordkombinasie (Ctrl/Cmd+X).","paste":"Byvoeg","pasteNotification":"Druk %1 om by te voeg. You leser ondersteun nie die toolbar knoppie of inoud kieslysie opsie nie. ","pasteArea":"Area byvoeg","pasteMsg":"Voeg jou inhoud in die gebied onder by en druk OK","fileFormatNotSupportedNotification":"This file format is not supported. You can try with one of the supported formats: ${formats}."},"colorbutton":{"auto":"Outomaties","bgColorTitle":"Agtergrondkleur","colors":{"000":"Swart","800000":"Meroen","8B4513":"Sjokoladebruin","2F4F4F":"Donkerleisteengrys","008080":"Blougroen","000080":"Vlootblou","4B0082":"Indigo","696969":"Donkergrys","B22222":"Rooibaksteen","A52A2A":"Bruin","DAA520":"Donkergeel","006400":"Donkergroen","40E0D0":"Turkoois","0000CD":"Middelblou","800080":"Pers","808080":"Grys","F00":"Rooi","FF8C00":"Donkeroranje","FFD700":"Goud","008000":"Groen","0FF":"Siaan","00F":"Blou","EE82EE":"Viooltjieblou","A9A9A9":"Donkergrys","FFA07A":"Ligsalm","FFA500":"Oranje","FFFF00":"Geel","00FF00":"Lemmetjie","AFEEEE":"Ligturkoois","ADD8E6":"Ligblou","DDA0DD":"Pruim","D3D3D3":"Liggrys","FFF0F5":"Linne","FAEBD7":"Ivoor","FFFFE0":"Liggeel","F0FFF0":"Heuningdou","F0FFFF":"Asuur","F0F8FF":"Ligte hemelsblou","E6E6FA":"Laventel","FFF":"Wit","1ABC9C":"Strong Cyan","2ECC71":"Emerald","3498DB":"Bright Blue","9B59B6":"Amethyst","4E5F70":"Grayish Blue","F1C40F":"Vivid Yellow","16A085":"Dark Cyan","27AE60":"Dark Emerald","2980B9":"Strong Blue","8E44AD":"Dark Violet","2C3E50":"Desaturated Blue","F39C12":"Orange","E67E22":"Carrot","E74C3C":"Pale Red","ECF0F1":"Bright Silver","95A5A6":"Light Grayish Cyan","DDD":"Light Gray","D35400":"Pumpkin","C0392B":"Strong Red","BDC3C7":"Silver","7F8C8D":"Grayish Cyan","999":"Dark Gray"},"more":"Meer Kleure...","panelTitle":"Kleure","textColorTitle":"Tekskleur"},"colordialog":{"clear":"Herstel","highlight":"Aktief","options":"Kleuropsies","selected":"Geselekteer","title":"Kies kleur"},"templates":{"button":"Sjablone","emptyListMsg":"(Geen sjablone gedefineer nie)","insertOption":"Vervang huidige inhoud","options":"Sjabloon opsies","selectPromptMsg":"Kies die sjabloon om te gebruik in die redigeerder (huidige inhoud gaan verlore):","title":"Inhoud Sjablone"},"contextmenu":{"options":"Konteks Spyskaart-opsies"},"copyformatting":{"label":"Copy Formatting","notification":{"copied":"Formatting copied","applied":"Formatting applied","canceled":"Formatting canceled","failed":"Formatting failed. You cannot apply styles without copying them first."}},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Aanbevole Titel","cssClassInputLabel":"CSS klasse","edit":"Wysig Div","inlineStyleInputLabel":"Inlyn Styl","langDirLTRLabel":"Links na regs (LTR)","langDirLabel":"Skryfrigting","langDirRTLLabel":"Regs na links (RTL)","languageCodeInputLabel":" Taalkode","remove":"Verwyder Div","styleSelectLabel":"Styl","title":"Skep Div houer","toolbar":"Skep Div houer"},"elementspath":{"eleLabel":"Elemente-pad","eleTitle":"%1 element"},"exportpdf":{"documentReady":"Document is ready!","error":"Error occurred.","processingDocument":"Processing PDF document...","toolbar":"Export to PDF"},"filetools":{"loadError":"Error occurred during file read.","networkError":"Network error occurred during file upload.","httpError404":"HTTP error occurred during file upload (404: File not found).","httpError403":"HTTP error occurred during file upload (403: Forbidden).","httpError":"HTTP error occurred during file upload (error status: %1).","noUrlError":"Upload URL is not defined.","responseError":"Incorrect server response."},"find":{"find":"Soek","findOptions":"Find Options","findWhat":"Soek na:","matchCase":"Hoof/kleinletter sensitief","matchCyclic":"Soek deurlopend","matchWord":"Hele woord moet voorkom","notFoundMsg":"Teks nie gevind nie.","replace":"Vervang","replaceAll":"Vervang alles","replaceSuccessMsg":"%1 voorkoms(te) vervang.","replaceWith":"Vervang met:","title":"Soek en vervang"},"font":{"fontSize":{"label":"Grootte","voiceLabel":"Fontgrootte","panelTitle":"Fontgrootte"},"label":"Font","panelTitle":"Fontnaam","voiceLabel":"Font"},"fakeobjects":{"anchor":"Anker","hiddenfield":"Verborge veld","iframe":"IFrame","unknown":"Onbekende objek"},"forms":{"button":{"title":"Knop eienskappe","text":"Teks (Waarde)","type":"Soort","typeBtn":"Knop","typeSbm":"Stuur","typeRst":"Maak leeg"},"checkboxAndRadio":{"checkboxTitle":"Merkhokkie eienskappe","radioTitle":"Radioknoppie eienskappe","value":"Waarde","selected":"Geselekteer","required":"Required"},"form":{"title":"Vorm eienskappe","menu":"Vorm eienskappe","action":"Aksie","method":"Metode","encoding":"Kodering"},"hidden":{"title":"Verborge veld eienskappe","name":"Naam","value":"Waarde"},"select":{"title":"Keuseveld eienskappe","selectInfo":"Info","opAvail":"Beskikbare opsies","value":"Waarde","size":"Grootte","lines":"Lyne","chkMulti":"Laat meer as een keuse toe","required":"Required","opText":"Teks","opValue":"Waarde","btnAdd":"Byvoeg","btnModify":"Wysig","btnUp":"Op","btnDown":"Af","btnSetValue":"Stel as geselekteerde waarde","btnDelete":"Verwyder"},"textarea":{"title":"Teks-area eienskappe","cols":"Kolomme","rows":"Rye"},"textfield":{"title":"Teksveld eienskappe","name":"Naam","value":"Waarde","charWidth":"Breedte (karakters)","maxChars":"Maksimum karakters","required":"Required","type":"Soort","typeText":"Teks","typePass":"Wagwoord","typeEmail":"Email","typeSearch":"Search","typeTel":"Telephone Number","typeUrl":"URL"}},"format":{"label":"Opmaak","panelTitle":"Opmaak","tag_address":"Adres","tag_div":"Normaal (DIV)","tag_h1":"Opskrif 1","tag_h2":"Opskrif 2","tag_h3":"Opskrif 3","tag_h4":"Opskrif 4","tag_h5":"Opskrif 5","tag_h6":"Opskrif 6","tag_p":"Normaal","tag_pre":"Opgemaak"},"horizontalrule":{"toolbar":"Horisontale lyn invoeg"},"iframe":{"border":"Wys rand van raam","noUrl":"Gee die iframe URL","scrolling":"Skuifbalke aan","title":"IFrame Eienskappe","toolbar":"IFrame","tabindex":"Remove from tabindex"},"image":{"alt":"Alternatiewe teks","border":"Rand","btnUpload":"Stuur na bediener","button2Img":"Wil u die geselekteerde afbeeldingsknop vervang met 'n eenvoudige afbeelding?","hSpace":"HSpasie","img2Button":"Wil u die geselekteerde afbeelding vervang met 'n afbeeldingsknop?","infoTab":"Afbeelding informasie","linkTab":"Skakel","lockRatio":"Vaste proporsie","menu":"Afbeelding eienskappe","resetSize":"Herstel grootte","title":"Afbeelding eienskappe","titleButton":"Afbeeldingsknop eienskappe","upload":"Oplaai","urlMissing":"Die URL na die afbeelding ontbreek.","vSpace":"VSpasie","validateBorder":"Rand moet 'n heelgetal wees.","validateHSpace":"HSpasie moet 'n heelgetal wees.","validateVSpace":"VSpasie moet 'n heelgetal wees."},"indent":{"indent":"Vergroot inspring","outdent":"Verklein inspring"},"smiley":{"options":"Lagbekkie opsies","title":"Voeg lagbekkie by","toolbar":"Lagbekkie"},"language":{"button":"Set language","remove":"Remove language"},"link":{"acccessKey":"Toegangsleutel","advanced":"Gevorderd","advisoryContentType":"Aanbevole inhoudstipe","advisoryTitle":"Aanbevole titel","anchor":{"toolbar":"Anker byvoeg/verander","menu":"Anker-eienskappe","title":"Anker-eienskappe","name":"Ankernaam","errorName":"Voltooi die ankernaam asseblief","remove":"Remove Anchor"},"anchorId":"Op element Id","anchorName":"Op ankernaam","charset":"Karakterstel van geskakelde bron","cssClasses":"CSS klasse","download":"Force Download","displayText":"Display Text","emailAddress":"E-posadres","emailBody":"Berig-inhoud","emailSubject":"Berig-onderwerp","id":"Id","info":"Skakel informasie","langCode":"Taalkode","langDir":"Skryfrigting","langDirLTR":"Links na regs (LTR)","langDirRTL":"Regs na links (RTL)","menu":"Wysig skakel","name":"Naam","noAnchors":"(Geen ankers beskikbaar in dokument)","noEmail":"Gee die e-posadres","noUrl":"Gee die skakel se URL","noTel":"Please type the phone number","other":"<ander>","phoneNumber":"Phone number","popupDependent":"Afhanklik (Netscape)","popupFeatures":"Eienskappe van opspringvenster","popupFullScreen":"Volskerm (IE)","popupLeft":"Posisie links","popupLocationBar":"Adresbalk","popupMenuBar":"Spyskaartbalk","popupResizable":"Herskaalbaar","popupScrollBars":"Skuifbalke","popupStatusBar":"Statusbalk","popupToolbar":"Werkbalk","popupTop":"Posisie bo","rel":"Relationship","selectAnchor":"Kies 'n anker","styles":"Styl","tabIndex":"Tab indeks","target":"Doel","targetFrame":"<raam>","targetFrameName":"Naam van doelraam","targetPopup":"<opspringvenster>","targetPopupName":"Naam van opspringvenster","title":"Skakel","toAnchor":"Anker in bladsy","toEmail":"E-pos","toUrl":"URL","toPhone":"Phone","toolbar":"Skakel invoeg/wysig","type":"Skakelsoort","unlink":"Verwyder skakel","upload":"Oplaai"},"list":{"bulletedlist":"Ongenommerde lys","numberedlist":"Genommerde lys"},"liststyle":{"bulletedTitle":"Eienskappe van ongenommerde lys","circle":"Sirkel","decimal":"Desimale syfers (1, 2, 3, ens.)","disc":"Skyf","lowerAlpha":"Kleinletters (a, b, c, d, e, ens.)","lowerRoman":"Romeinse kleinletters (i, ii, iii, iv, v, ens.)","none":"Geen","notset":"<nie ingestel nie>","numberedTitle":"Eienskappe van genommerde lys","square":"Vierkant","start":"Begin","type":"Tipe","upperAlpha":"Hoofletters (A, B, C, D, E, ens.)","upperRoman":"Romeinse hoofletters (I, II, III, IV, V, ens.)","validateStartNumber":"Beginnommer van lys moet 'n heelgetal wees."},"magicline":{"title":"Voeg paragraaf hier in"},"maximize":{"maximize":"Maksimaliseer","minimize":"Minimaliseer"},"newpage":{"toolbar":"Nuwe bladsy"},"pagebreak":{"alt":"Bladsy-einde","toolbar":"Bladsy-einde invoeg"},"pastetext":{"button":"Voeg by as eenvoudige teks","pasteNotification":"Druk %1 om by te voeg. Jou leser ondersteun nie byvoeg deur die toolbar knoppie of die konteks kieslys nie","title":"Voeg by as eenvoudige teks"},"pastefromword":{"confirmCleanup":"Die teks wat u wil byvoeg lyk asof dit uit Word gekopiëer is. Wil u dit eers skoonmaak voordat dit bygevoeg word?","error":"Die bygevoegte teks kon nie skoongemaak word nie, weens 'n interne fout","title":"Uit Word byvoeg","toolbar":"Uit Word byvoeg"},"preview":{"preview":"Voorbeeld"},"print":{"toolbar":"Druk"},"removeformat":{"toolbar":"Verwyder opmaak"},"save":{"toolbar":"Bewaar"},"selectall":{"toolbar":"Selekteer alles"},"showblocks":{"toolbar":"Toon blokke"},"sourcearea":{"toolbar":"Bron"},"specialchar":{"options":"Spesiale karakter-opsies","title":"Kies spesiale karakter","toolbar":"Voeg spesiaale karakter in"},"scayt":{"btn_about":"SCAYT info","btn_dictionaries":"Woordeboeke","btn_disable":"SCAYT af","btn_enable":"SCAYT aan","btn_langs":"Tale","btn_options":"Opsies","text_title":"Speltoets terwyl u tik"},"stylescombo":{"label":"Styl","panelTitle":"Vormaat style","panelTitle1":"Blok style","panelTitle2":"Inlyn style","panelTitle3":"Objek style"},"table":{"border":"Randbreedte","caption":"Naam","cell":{"menu":"Sel","insertBefore":"Voeg sel in voor","insertAfter":"Voeg sel in na","deleteCell":"Verwyder sel","merge":"Voeg selle saam","mergeRight":"Voeg saam na regs","mergeDown":"Voeg saam ondertoe","splitHorizontal":"Splits sel horisontaal","splitVertical":"Splits sel vertikaal","title":"Sel eienskappe","cellType":"Sel tipe","rowSpan":"Omspan rye","colSpan":"Omspan kolomme","wordWrap":"Woord terugloop","hAlign":"Horisontale oplyning","vAlign":"Vertikale oplyning","alignBaseline":"Basislyn","bgColor":"Agtergrondkleur","borderColor":"Randkleur","data":"Inhoud","header":"Opskrif","yes":"Ja","no":"Nee","invalidWidth":"Selbreedte moet 'n getal wees.","invalidHeight":"Selhoogte moet 'n getal wees.","invalidRowSpan":"Omspan rye moet 'n heelgetal wees.","invalidColSpan":"Omspan kolomme moet 'n heelgetal wees.","chooseColor":"Kies"},"cellPad":"Sel-spasie","cellSpace":"Sel-afstand","column":{"menu":"Kolom","insertBefore":"Voeg kolom in voor","insertAfter":"Voeg kolom in na","deleteColumn":"Verwyder kolom"},"columns":"Kolomme","deleteTable":"Verwyder tabel","headers":"Opskrifte","headersBoth":"Beide ","headersColumn":"Eerste kolom","headersNone":"Geen","headersRow":"Eerste ry","heightUnit":"height unit","invalidBorder":"Randbreedte moet 'n getal wees.","invalidCellPadding":"Sel-spasie moet 'n getal wees.","invalidCellSpacing":"Sel-afstand moet 'n getal wees.","invalidCols":"Aantal kolomme moet 'n getal groter as 0 wees.","invalidHeight":"Tabelhoogte moet 'n getal wees.","invalidRows":"Aantal rye moet 'n getal groter as 0 wees.","invalidWidth":"Tabelbreedte moet 'n getal wees.","menu":"Tabel eienskappe","row":{"menu":"Ry","insertBefore":"Voeg ry in voor","insertAfter":"Voeg ry in na","deleteRow":"Verwyder ry"},"rows":"Rye","summary":"Opsomming","title":"Tabel eienskappe","toolbar":"Tabel","widthPc":"persent","widthPx":"piksels","widthUnit":"breedte-eenheid"},"undo":{"redo":"Oordoen","undo":"Ontdoen"},"widget":{"move":"Klik en trek on te beweeg","label":"%1 widget"},"uploadwidget":{"abort":"Upload aborted by the user.","doneOne":"File successfully uploaded.","doneMany":"Successfully uploaded %1 files.","uploadOne":"Uploading file ({percentage}%)...","uploadMany":"Uploading files, {current} of {max} done ({percentage}%)..."}};
|
PypiClean
|
/ais_dom-2023.7.2-py3-none-any.whl/homeassistant/components/vallox/binary_sensor.py
|
from __future__ import annotations
from dataclasses import dataclass
from homeassistant.components.binary_sensor import (
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ValloxDataUpdateCoordinator, ValloxEntity
from .const import DOMAIN
class ValloxBinarySensorEntity(ValloxEntity, BinarySensorEntity):
"""Representation of a Vallox binary sensor."""
entity_description: ValloxBinarySensorEntityDescription
_attr_entity_category = EntityCategory.DIAGNOSTIC
_attr_has_entity_name = True
def __init__(
self,
name: str,
coordinator: ValloxDataUpdateCoordinator,
description: ValloxBinarySensorEntityDescription,
) -> None:
"""Initialize the Vallox binary sensor."""
super().__init__(name, coordinator)
self.entity_description = description
self._attr_unique_id = f"{self._device_uuid}-{description.key}"
@property
def is_on(self) -> bool | None:
"""Return true if the binary sensor is on."""
return self.coordinator.data.get_metric(self.entity_description.metric_key) == 1
@dataclass
class ValloxMetricKeyMixin:
"""Dataclass to allow defining metric_key without a default value."""
metric_key: str
@dataclass
class ValloxBinarySensorEntityDescription(
BinarySensorEntityDescription, ValloxMetricKeyMixin
):
"""Describes Vallox binary sensor entity."""
BINARY_SENSOR_ENTITIES: tuple[ValloxBinarySensorEntityDescription, ...] = (
ValloxBinarySensorEntityDescription(
key="post_heater",
name="Post heater",
icon="mdi:radiator",
metric_key="A_CYC_IO_HEATER",
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the sensors."""
data = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
ValloxBinarySensorEntity(data["name"], data["coordinator"], description)
for description in BINARY_SENSOR_ENTITIES
]
)
|
PypiClean
|
/athena-mathlab-0.1.2.post2304.tar.gz/athena-mathlab-0.1.2.post2304/LICENSE.rst
|
The MIT License (MIT)
Copyright (c) 2019-2020 ATHENA contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
PypiClean
|
/kiwitcms-12.4.tar.gz/kiwitcms-12.4/tcms/node_modules/bootstrap-switch/README.md
|
# Bootstrap Switch
[](https://david-dm.org/Bttstrp/bootstrap-switch)
[](https://david-dm.org/Bttstrp/bootstrap-switch#info=devDependencies)
[](https://www.npmjs.org/)
Turn checkboxes and radio buttons into toggle switches. Created by [Mattia Larentis](http://github.com/nostalgiaz), maintained by [Emanuele Marchi](http://github.com/lostcrew) and [Peter Stein](http://www.bdmdesign.org) with the help of the community.
To get started, check out [https://bttstrp.github.io/bootstrap-switch](https://bttstrp.github.io/bootstrap-switch)!
## Quick start
Several quick start options are available:
- Download the [latest release](https://github.com/Bttstrp/bootstrap-switch/releases/latest)
- Clone the repo: `git clone https://github.com/Bttstrp/bootstrap-switch.git`
- Install with npm: `npm install bootstrap-switch`
- Install with yarn: `yarn add bootstrap-switch`
- Install with Composer: `composer require components/bootstrap-switch`
- Install with Bower: `bower install bootstrap-switch`
- Install with NuGet: `PM> Install-Package Bootstrap.Switch` ([NuGet package](https://github.com/blachniet/bootstrap-switch-nuget))
Include the dependencies: jQuery, Bootstrap and Bootstrap Switch CSS + Javascript:
``` html
<link href="bootstrap.css" rel="stylesheet">
<link href="bootstrap-switch.css" rel="stylesheet">
<script src="jquery.js"></script>
<script src="bootstrap-switch.js"></script>
```
Add your checkbox:
```html
<input type="checkbox" name="my-checkbox" checked>
```
Initialize Bootstrap Switch on it:
```javascript
$("[name='my-checkbox']").bootstrapSwitch();
```
Enjoy.
## Supported browsers
IE9+ and all the other modern browsers.
## LESS
- For Bootstrap 2 (no longer officially supported), import `src/less/bootstrap2/bootstrap-switch.less`
- For Bootstrap 3, import `src/less/bootstrap3/bootstrap-switch.less`
## Bugs and feature requests
Have a bug or a feature request? Please first search for existing and closed issues. If your problem or idea is not addressed yet, [please open a new issue](https://github.com/Bttstrp/bootstrap-switch/issues/new).
The new issue should contain both a summary of the issue and the browser/OS environment in which it occurs and a link to the playground you prefer with the reduced test case.
If suitable, include the steps required to reproduce the bug.
Please do not use the issue tracker for personal support requests: [Stack Overflow](https://stackoverflow.com/questions/tagged/bootstrap-switch) is a better place to get help.
### Known issues
- Make sure `.form-control` is not applied to the input. Bootstrap does not support that, refer to [Checkboxes and radios](https://getbootstrap.com/css/#checkboxes-and-radios)
## Integrations
- Angular: [angular-bootstrap-switch](https://github.com/frapontillo/angular-bootstrap-switch)
- Angular: [angular-toggle-switch](https://github.com/JumpLink/angular-toggle-switch)
- Knockout: [knockout-bootstrap-switch](https://github.com/pauloortins/knockout-bootstrap-switch)
## License
Licensed under the [MIT License](https://github.com/Bttstrp/bootstrap-switch/blob/master/LICENSE).
|
PypiClean
|
/columbia-discord-bot-0.2.1.tar.gz/columbia-discord-bot-0.2.1/docs/_build/html/_static/pip/_internal/resolution/resolvelib/requirements.py
|
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
from pip._internal.req.req_install import InstallRequirement
from .base import Candidate, CandidateLookup, Requirement, format_name
class ExplicitRequirement(Requirement):
def __init__(self, candidate: Candidate) -> None:
self.candidate = candidate
def __str__(self) -> str:
return str(self.candidate)
def __repr__(self) -> str:
return "{class_name}({candidate!r})".format(
class_name=self.__class__.__name__,
candidate=self.candidate,
)
@property
def project_name(self) -> NormalizedName:
# No need to canonicalize - the candidate did this
return self.candidate.project_name
@property
def name(self) -> str:
# No need to canonicalize - the candidate did this
return self.candidate.name
def format_for_error(self) -> str:
return self.candidate.format_for_error()
def get_candidate_lookup(self) -> CandidateLookup:
return self.candidate, None
def is_satisfied_by(self, candidate: Candidate) -> bool:
return candidate == self.candidate
class SpecifierRequirement(Requirement):
def __init__(self, ireq: InstallRequirement) -> None:
assert ireq.link is None, "This is a link, not a specifier"
self._ireq = ireq
self._extras = frozenset(ireq.extras)
def __str__(self) -> str:
return str(self._ireq.req)
def __repr__(self) -> str:
return "{class_name}({requirement!r})".format(
class_name=self.__class__.__name__,
requirement=str(self._ireq.req),
)
@property
def project_name(self) -> NormalizedName:
assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
return canonicalize_name(self._ireq.req.name)
@property
def name(self) -> str:
return format_name(self.project_name, self._extras)
def format_for_error(self) -> str:
# Convert comma-separated specifiers into "A, B, ..., F and G"
# This makes the specifier a bit more "human readable", without
# risking a change in meaning. (Hopefully! Not all edge cases have
# been checked)
parts = [s.strip() for s in str(self).split(",")]
if len(parts) == 0:
return ""
elif len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def get_candidate_lookup(self) -> CandidateLookup:
return None, self._ireq
def is_satisfied_by(self, candidate: Candidate) -> bool:
assert candidate.name == self.name, (
f"Internal issue: Candidate is not for this requirement "
f"{candidate.name} vs {self.name}"
)
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
spec = self._ireq.req.specifier
return spec.contains(candidate.version, prereleases=True)
class RequiresPythonRequirement(Requirement):
"""A requirement representing Requires-Python metadata."""
def __init__(self, specifier: SpecifierSet, match: Candidate) -> None:
self.specifier = specifier
self._candidate = match
def __str__(self) -> str:
return f"Python {self.specifier}"
def __repr__(self) -> str:
return "{class_name}({specifier!r})".format(
class_name=self.__class__.__name__,
specifier=str(self.specifier),
)
@property
def project_name(self) -> NormalizedName:
return self._candidate.project_name
@property
def name(self) -> str:
return self._candidate.name
def format_for_error(self) -> str:
return str(self)
def get_candidate_lookup(self) -> CandidateLookup:
if self.specifier.contains(self._candidate.version, prereleases=True):
return self._candidate, None
return None, None
def is_satisfied_by(self, candidate: Candidate) -> bool:
assert candidate.name == self._candidate.name, "Not Python candidate"
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
return self.specifier.contains(candidate.version, prereleases=True)
class UnsatisfiableRequirement(Requirement):
"""A requirement that cannot be satisfied."""
def __init__(self, name: NormalizedName) -> None:
self._name = name
def __str__(self) -> str:
return f"{self._name} (unavailable)"
def __repr__(self) -> str:
return "{class_name}({name!r})".format(
class_name=self.__class__.__name__,
name=str(self._name),
)
@property
def project_name(self) -> NormalizedName:
return self._name
@property
def name(self) -> str:
return self._name
def format_for_error(self) -> str:
return str(self)
def get_candidate_lookup(self) -> CandidateLookup:
return None, None
def is_satisfied_by(self, candidate: Candidate) -> bool:
return False
|
PypiClean
|
/pyactr-0.3.1.tar.gz/pyactr-0.3.1/tutorials/u3_multiple_objects.py
|
import string
import random
import pyactr as actr
class Model(object):
"""
Model searching and attending to various stimuli.
"""
def __init__(self, env, **kwargs):
self.m = actr.ACTRModel(environment=env, **kwargs)
actr.chunktype("pair", "probe answer")
actr.chunktype("goal", "state")
self.dm = self.m.decmem
self.m.visualBuffer("visual", "visual_location", self.dm, finst=30)
start = actr.makechunk(nameofchunk="start", typename="chunk", value="start")
actr.makechunk(nameofchunk="attending", typename="chunk", value="attending")
actr.makechunk(nameofchunk="done", typename="chunk", value="done")
self.m.goal.add(actr.makechunk(typename="read", state=start))
self.m.set_goal("g2")
self.m.goals["g2"].delay=0.2
self.m.productionstring(name="find_probe", string="""
=g>
isa goal
state start
?visual_location>
buffer empty
==>
=g>
isa goal
state attend
?visual_location>
attended False
+visual_location>
isa _visuallocation
screen_x closest""") #this rule is used if automatic visual search does not put anything in the buffer
self.m.productionstring(name="check_probe", string="""
=g>
isa goal
state start
?visual_location>
buffer full
==>
=g>
isa goal
state attend""") #this rule is used if automatic visual search is enabled and it puts something in the buffer
self.m.productionstring(name="attend_probe", string="""
=g>
isa goal
state attend
=visual_location>
isa _visuallocation
?visual>
state free
==>
=g>
isa goal
state reading
+visual>
isa _visual
cmd move_attention
screen_pos =visual_location
~visual_location>""")
self.m.productionstring(name="encode_probe_and_find_new_location", string="""
=g>
isa goal
state reading
=visual>
isa _visual
value =val
?visual_location>
buffer empty
==>
=g>
isa goal
state attend
~visual>
?visual_location>
attended False
+visual_location>
isa _visuallocation
screen_x closest""")
if __name__ == "__main__":
stim_d = {key: {'text': x, 'position': (random.randint(10,630), random.randint(10, 310)), 'vis_delay': 10} for key, x in enumerate(string.ascii_uppercase)}
#stim_d = {key: {'text': x, 'position': (random.randint(10,630), random.randint(10, 310))} for key, x in enumerate(string.ascii_uppercase)}
print(stim_d)
#text = [{1: {'text': 'X', 'position': (10, 10)}, 2: {'text': 'Y', 'position': (10, 20)}, 3:{'text': 'Z', 'position': (10, 30)}},{1: {'text': 'A', 'position': (50, 10)}, 2: {'text': 'B', 'position': (50, 180)}, 3:{'text': 'C', 'position': (400, 180)}}]
environ = actr.Environment(focus_position=(0,0))
m = Model(environ, subsymbolic=True, latency_factor=0.4, decay=0.5, retrieval_threshold=-2, instantaneous_noise=0, automatic_visual_search=True, eye_mvt_scaling_parameter=0.05, eye_mvt_angle_parameter=10) #If you don't want to use the EMMA model, specify emma=False in here
sim = m.m.simulation(realtime=True, trace=True, gui=True, environment_process=environ.environment_process, stimuli=stim_d, triggers='X', times=50)
sim.run(10)
check = 0
for key in m.dm:
if key.typename == '_visual':
print(key, m.dm[key])
check += 1
print(check)
print(len(stim_d))
|
PypiClean
|
/evaluation_framework-1.3.tar.gz/evaluation_framework-1.3/evaluation_framework/Regression/regression_taskManager.py
|
from regression_model import RegressionModel as Model
import csv
from collections import defaultdict
import codecs
import os
import pandas as pd
from evaluation_framework.abstract_taskManager import AbstractTaskManager
from numpy import mean
task_name = 'Regression'
"""
Manager of the Regression task
"""
class RegressionManager (AbstractTaskManager):
"""
It initializes the manager of the regression task.
data_manager: the data manager to read the dataset(s) and the input file with the vectors to evaluate
debugging_mode: {TRUE, FALSE}, TRUE to run the model by reporting all the errors and information; FALSE otherwise
"""
def __init__(self, data_manager, debugging_mode):
self.debugging_mode = debugging_mode
self.data_manager = data_manager
if debugging_mode:
print("Regression task manager initialized")
"""
It returns the task name.
"""
@staticmethod
def get_task_name():
return task_name
"""
It evaluates the Regression task.
vectors: dataframe which contains the vectors data
vector_file: path of the vector file
vector_size: size of the vectors
result_directory: directory where the results must be stored
log_dictionary: dictionary to store all the information to store in the log file
scores_dictionary: dictionary to store all the scores which will be used in the comparison phase
"""
def evaluate(self, vectors, vector_file, vector_size, results_folder, log_dictionary, scores_dictionary):
log_errors = ""
gold_standard_filenames = self.get_gold_standard_file()
totalscores = defaultdict(dict)
for gold_standard_filename in gold_standard_filenames:
script_dir = os.path.dirname(__file__)
rel_path = "data/"+gold_standard_filename+'.tsv'
gold_standard_file = os.path.join(script_dir, rel_path)
regression_model_names = ["LR", "KNN", "M5"]
scores = defaultdict(list)
totalscores_element = defaultdict(list)
data, ignored = self.data_manager.intersect_vectors_goldStandard(vectors, vector_file, vector_size, gold_standard_file)
self.storeIgnored(results_folder, gold_standard_filename, ignored)
if data.size == 0:
log_errors += 'Regression : Problems in merging vector with gold standard ' + gold_standard_file + '\n'
if self.debugging_mode:
print('Regression : Problems in merging vector with gold standard ' + gold_standard_file)
else:
for i in range(10):
data = data.sample(frac=1, random_state=i).reset_index(drop=True)
for model_name in regression_model_names:
# initialize the model
model = Model(task_name, model_name, self.debugging_mode)
# train and print score
try:
result = model.train(data)
result['gold_standard_file'] = gold_standard_filename
scores[model_name].append(result)
totalscores_element[model_name].append(result)
except Exception as e:
log_errors += 'File used as gold standard: ' + gold_standard_filename + '\n'
log_errors += 'Regression method: ' + model_name + '\n'
log_errors += str(e) + '\n'
self.storeResults(results_folder, gold_standard_filename, scores)
totalscores[gold_standard_filename] = totalscores_element
results_df = self.resultsAsDataFrame(totalscores)
scores_dictionary[task_name] = results_df
log_dictionary[task_name] = log_errors
"""
It stores the entities which are in the dataset used as gold standard, but not in the input file.
results_folder: directory where the results must be stored
gold_standard_filename: the current dataset used as gold standard
ignored: dataframe containing the ignored entities in the column NAME
"""
def storeIgnored(self, results_folder, gold_standard_filename, ignored):
if self.debugging_mode:
print('Regression : Ignored data: ' + str(len(ignored)))
file_ignored = codecs.open(results_folder+'/regression_'+gold_standard_filename+'_ignoredData.txt',"w", 'utf-8')
for ignored_tuple in ignored.itertuples():
value = getattr(ignored_tuple,'name')
if self.debugging_mode:
print('Regression : Ignored data: ' + value.encode(encoding='UTF-8', errors='ignore'))
if isinstance(value, str):
value = unicode(value, "utf-8").encode(encoding='UTF-8', errors='ignore')
file_ignored.write(value+'\n')
file_ignored.close()
"""
It stores the results of the Regression task.
results_folder: directory where the results must be stored
gold_standard_filename: the current dataset used as gold standard
scores: dictionary with the model_name as key and the list of all the results returned by the model for the same model_name
"""
def storeResults(self, results_folder, gold_standard_filename, scores):
with open(results_folder+'/regression_'+gold_standard_filename+'_results.csv', "wb") as csv_file:
fieldnames = ['task_name', 'gold_standard_file', 'model_name', 'model_configuration', 'root_mean_squared_error']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for (method, scoresForMethod) in scores.items():
for score in scoresForMethod:
writer.writerow(score)
if self.debugging_mode:
print("Regression " + method, score)
"""
It converts the scores dictionary into a dataframe
scores: dictionary containing the gold_standard_filename as key and
as value a dictionary containing the model_name as key and
as value the list of all the results returned by the model for the same model_name
"""
def resultsAsDataFrame(self, scores):
data_dict = dict()
data_dict['task_name'] = list()
data_dict['gold_standard_file'] = list()
data_dict['model'] = list()
data_dict['model_configuration'] = list()
data_dict['metric'] = list()
data_dict['score_value'] = list()
metrics = self.get_metric_list()
for (gold_standard_filename, gold_standard_scores) in scores.items():
for (method, scoresForMethod) in gold_standard_scores.items():
for metric in metrics:
metric_scores = list()
for score in scoresForMethod:
metric_scores.append(score[metric])
metric_score = mean(metric_scores)
score = scoresForMethod[0]
configuration = score['model_configuration']
if configuration is None:
configuration='-'
data_dict['task_name'].append(score['task_name'])
data_dict['gold_standard_file'].append(score['gold_standard_file'])
data_dict['model'].append(score['model_name'])
data_dict['model_configuration'].append(configuration)
data_dict['metric'].append(metric)
data_dict['score_value'].append(metric_score)
results_df = pd.DataFrame(data_dict, columns = ['task_name', 'gold_standard_file', 'model', 'model_configuration', 'metric', 'score_value'])
return results_df
"""
It returns the dataset used as gold standard.
"""
@staticmethod
def get_gold_standard_file():
return ['Cities', 'MetacriticMovies', 'MetacriticAlbums', 'AAUP', 'Forbes']
"""
It returns the metrics used in the evaluation of the Classification task.
"""
@staticmethod
def get_metric_list():
return ['root_mean_squared_error']
|
PypiClean
|
/APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/fuzzer.py
|
from kitty.interfaces import WebInterface
from apifuzzer.fuzz_model import APIFuzzerModel
from apifuzzer.fuzzer_target.fuzz_request_sender import FuzzerTarget
from apifuzzer.openapi_template_generator import OpenAPITemplateGenerator
from apifuzzer.server_fuzzer import OpenApiServerFuzzer
from apifuzzer.utils import set_logger
from apifuzzer.version import get_version
class Fuzzer(object):
def __init__(
self,
report_dir,
test_level,
log_level,
basic_output,
alternate_url=None,
test_result_dst=None,
auth_headers=None,
api_definition_url=None,
api_definition_file=None,
junit_report_path=None,
):
self.base_url = None
self.alternate_url = alternate_url
self.templates = None
self.test_level = test_level
self.report_dir = report_dir
self.test_result_dst = test_result_dst
self.auth_headers = auth_headers if auth_headers else {}
self.junit_report_path = junit_report_path
self.logger = set_logger(log_level, basic_output)
self.logger.info("%s initialized", get_version())
self.api_definition_url = api_definition_url
self.api_definition_file = api_definition_file
def prepare(self):
# here we will be able to branch the template generator if we will support other than Swagger / OpenAPI
template_generator = OpenAPITemplateGenerator(
api_definition_url=self.api_definition_url,
api_definition_file=self.api_definition_file,
)
try:
template_generator.process_api_resources()
except Exception as e:
self.logger.error(f"Exception: {e}", exc_info=True)
raise e
self.templates = template_generator.templates
self.base_url = template_generator.compile_base_url(self.alternate_url)
def run(self):
target = FuzzerTarget(
name="target",
base_url=self.base_url,
report_dir=self.report_dir,
auth_headers=self.auth_headers,
junit_report_path=self.junit_report_path,
)
interface = WebInterface()
model = APIFuzzerModel()
for template in self.templates:
model.connect(template.compile_template())
model.content_type = template.get_content_type()
fuzzer = OpenApiServerFuzzer()
fuzzer.set_model(model)
fuzzer.set_target(target)
fuzzer.set_interface(interface)
fuzzer.start()
fuzzer.stop()
|
PypiClean
|
/nodejs_bin-18.4.0a3-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.whl/nodejs/lib/node_modules/npm/node_modules/socks/typings/common/constants.d.ts
|
import { Duplex } from 'stream';
import { Socket, SocketConnectOpts } from 'net';
import { RequireOnlyOne } from './util';
declare const DEFAULT_TIMEOUT = 30000;
declare type SocksProxyType = 4 | 5;
declare const ERRORS: {
InvalidSocksCommand: string;
InvalidSocksCommandForOperation: string;
InvalidSocksCommandChain: string;
InvalidSocksClientOptionsDestination: string;
InvalidSocksClientOptionsExistingSocket: string;
InvalidSocksClientOptionsProxy: string;
InvalidSocksClientOptionsTimeout: string;
InvalidSocksClientOptionsProxiesLength: string;
InvalidSocksClientOptionsCustomAuthRange: string;
InvalidSocksClientOptionsCustomAuthOptions: string;
NegotiationError: string;
SocketClosed: string;
ProxyConnectionTimedOut: string;
InternalError: string;
InvalidSocks4HandshakeResponse: string;
Socks4ProxyRejectedConnection: string;
InvalidSocks4IncomingConnectionResponse: string;
Socks4ProxyRejectedIncomingBoundConnection: string;
InvalidSocks5InitialHandshakeResponse: string;
InvalidSocks5IntiailHandshakeSocksVersion: string;
InvalidSocks5InitialHandshakeNoAcceptedAuthType: string;
InvalidSocks5InitialHandshakeUnknownAuthType: string;
Socks5AuthenticationFailed: string;
InvalidSocks5FinalHandshake: string;
InvalidSocks5FinalHandshakeRejected: string;
InvalidSocks5IncomingConnectionResponse: string;
Socks5ProxyRejectedIncomingBoundConnection: string;
};
declare const SOCKS_INCOMING_PACKET_SIZES: {
Socks5InitialHandshakeResponse: number;
Socks5UserPassAuthenticationResponse: number;
Socks5ResponseHeader: number;
Socks5ResponseIPv4: number;
Socks5ResponseIPv6: number;
Socks5ResponseHostname: (hostNameLength: number) => number;
Socks4Response: number;
};
declare type SocksCommandOption = 'connect' | 'bind' | 'associate';
declare enum SocksCommand {
connect = 1,
bind = 2,
associate = 3
}
declare enum Socks4Response {
Granted = 90,
Failed = 91,
Rejected = 92,
RejectedIdent = 93
}
declare enum Socks5Auth {
NoAuth = 0,
GSSApi = 1,
UserPass = 2
}
declare const SOCKS5_CUSTOM_AUTH_START = 128;
declare const SOCKS5_CUSTOM_AUTH_END = 254;
declare const SOCKS5_NO_ACCEPTABLE_AUTH = 255;
declare enum Socks5Response {
Granted = 0,
Failure = 1,
NotAllowed = 2,
NetworkUnreachable = 3,
HostUnreachable = 4,
ConnectionRefused = 5,
TTLExpired = 6,
CommandNotSupported = 7,
AddressNotSupported = 8
}
declare enum Socks5HostType {
IPv4 = 1,
Hostname = 3,
IPv6 = 4
}
declare enum SocksClientState {
Created = 0,
Connecting = 1,
Connected = 2,
SentInitialHandshake = 3,
ReceivedInitialHandshakeResponse = 4,
SentAuthentication = 5,
ReceivedAuthenticationResponse = 6,
SentFinalHandshake = 7,
ReceivedFinalResponse = 8,
BoundWaitingForConnection = 9,
Established = 10,
Disconnected = 11,
Error = 99
}
/**
* Represents a SocksProxy
*/
declare type SocksProxy = RequireOnlyOne<{
ipaddress?: string;
host?: string;
port: number;
type: SocksProxyType;
userId?: string;
password?: string;
custom_auth_method?: number;
custom_auth_request_handler?: () => Promise<Buffer>;
custom_auth_response_size?: number;
custom_auth_response_handler?: (data: Buffer) => Promise<boolean>;
}, 'host' | 'ipaddress'>;
/**
* Represents a remote host
*/
interface SocksRemoteHost {
host: string;
port: number;
}
/**
* SocksClient connection options.
*/
interface SocksClientOptions {
command: SocksCommandOption;
destination: SocksRemoteHost;
proxy: SocksProxy;
timeout?: number;
existing_socket?: Duplex;
set_tcp_nodelay?: boolean;
socket_options?: SocketConnectOpts;
}
/**
* SocksClient chain connection options.
*/
interface SocksClientChainOptions {
command: 'connect';
destination: SocksRemoteHost;
proxies: SocksProxy[];
timeout?: number;
randomizeChain?: false;
}
interface SocksClientEstablishedEvent {
socket: Socket;
remoteHost?: SocksRemoteHost;
}
declare type SocksClientBoundEvent = SocksClientEstablishedEvent;
interface SocksUDPFrameDetails {
frameNumber?: number;
remoteHost: SocksRemoteHost;
data: Buffer;
}
export { DEFAULT_TIMEOUT, ERRORS, SocksProxyType, SocksCommand, Socks4Response, Socks5Auth, Socks5HostType, Socks5Response, SocksClientState, SocksProxy, SocksRemoteHost, SocksCommandOption, SocksClientOptions, SocksClientChainOptions, SocksClientEstablishedEvent, SocksClientBoundEvent, SocksUDPFrameDetails, SOCKS_INCOMING_PACKET_SIZES, SOCKS5_CUSTOM_AUTH_START, SOCKS5_CUSTOM_AUTH_END, SOCKS5_NO_ACCEPTABLE_AUTH, };
|
PypiClean
|
/vwoptimize-0.10.2.tar.gz/vwoptimize-0.10.2/vwoptimizelib/third_party/networkx/algorithms/centrality/katz.py
|
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = "\n".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Sasha Gutfraind ([email protected])',
'Vincent Gauthier ([email protected])'])
__all__ = ['katz_centrality',
'katz_centrality_numpy']
@not_implemented_for('multigraph')
def katz_centrality(G, alpha=0.1, beta=1.0,
max_iter=1000, tol=1.0e-6, nstart=None, normalized=True,
weight = 'weight'):
r"""Compute the Katz centrality for the nodes of the graph G.
Katz centrality computes the centrality for a node based on the centrality
of its neighbors. It is a generalization of the eigenvector centrality. The
Katz centrality for node `i` is
.. math::
x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
where `A` is the adjacency matrix of the graph G with eigenvalues `\lambda`.
The parameter `\beta` controls the initial centrality and
.. math::
\alpha < \frac{1}{\lambda_{max}}.
Katz centrality computes the relative influence of a node within a
network by measuring the number of the immediate neighbors (first
degree nodes) and also all other nodes in the network that connect
to the node under consideration through these immediate neighbors.
Extra weight can be provided to immediate neighbors through the
parameter :math:`\beta`. Connections made with distant neighbors
are, however, penalized by an attenuation factor `\alpha` which
should be strictly less than the inverse largest eigenvalue of the
adjacency matrix in order for the Katz centrality to be computed
correctly. More information is provided in [1]_ .
Parameters
----------
G : graph
A NetworkX graph
alpha : float
Attenuation factor
beta : scalar or dictionary, optional (default=1.0)
Weight attributed to the immediate neighborhood. If not a scalar, the
dictionary must have an value for every node.
max_iter : integer, optional (default=1000)
Maximum number of iterations in power method.
tol : float, optional (default=1.0e-6)
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of Katz iteration for each node.
normalized : bool, optional (default=True)
If True normalize the resulting values.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with Katz centrality as the value.
Raises
------
NetworkXError
If the parameter `beta` is not a scalar but lacks a value for at least
one node
Examples
--------
>>> import math
>>> G = nx.path_graph(4)
>>> phi = (1+math.sqrt(5))/2.0 # largest eigenvalue of adj matrix
>>> centrality = nx.katz_centrality(G,1/phi-0.01)
>>> for n,c in sorted(centrality.items()):
... print("%d %0.2f"%(n,c))
0 0.37
1 0.60
2 0.60
3 0.37
See Also
--------
katz_centrality_numpy
eigenvector_centrality
eigenvector_centrality_numpy
pagerank
hits
Notes
-----
Katz centrality was introduced by [2]_.
This algorithm it uses the power method to find the eigenvector
corresponding to the largest eigenvalue of the adjacency matrix of G.
The constant alpha should be strictly less than the inverse of largest
eigenvalue of the adjacency matrix for the algorithm to converge.
The iteration will stop after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
When `\alpha = 1/\lambda_{max}` and `\beta=0`, Katz centrality is the same
as eigenvector centrality.
For directed graphs this finds "left" eigenvectors which corresponds
to the in-edges in the graph. For out-edges Katz centrality
first reverse the graph with G.reverse().
References
----------
.. [1] Mark E. J. Newman:
Networks: An Introduction.
Oxford University Press, USA, 2010, p. 720.
.. [2] Leo Katz:
A New Status Index Derived from Sociometric Index.
Psychometrika 18(1):39–43, 1953
http://phya.snu.ac.kr/~dkim/PRL87278701.pdf
"""
from math import sqrt
if len(G) == 0:
return {}
nnodes = G.number_of_nodes()
if nstart is None:
# choose starting vector with entries of 0
x = dict([(n,0) for n in G])
else:
x = nstart
try:
b = dict.fromkeys(G,float(beta))
except (TypeError,ValueError,AttributeError):
b = beta
if set(beta) != set(G):
raise nx.NetworkXError('beta dictionary '
'must have a value for every node')
# make up to max_iter iterations
for i in range(max_iter):
xlast = x
x = dict.fromkeys(xlast, 0)
# do the multiplication y^T = Alpha * x^T A - Beta
for n in x:
for nbr in G[n]:
x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
for n in x:
x[n] = alpha*x[n] + b[n]
# check convergence
err = sum([abs(x[n]-xlast[n]) for n in x])
if err < nnodes*tol:
if normalized:
# normalize vector
try:
s = 1.0/sqrt(sum(v**2 for v in x.values()))
# this should never be zero?
except ZeroDivisionError:
s = 1.0
else:
s = 1
for n in x:
x[n] *= s
return x
raise nx.NetworkXError('Power iteration failed to converge in '
'%d iterations.' % max_iter)
@not_implemented_for('multigraph')
def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True,
weight = 'weight'):
r"""Compute the Katz centrality for the graph G.
Katz centrality computes the centrality for a node based on the centrality
of its neighbors. It is a generalization of the eigenvector centrality. The
Katz centrality for node `i` is
.. math::
x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
where `A` is the adjacency matrix of the graph G with eigenvalues `\lambda`.
The parameter `\beta` controls the initial centrality and
.. math::
\alpha < \frac{1}{\lambda_{max}}.
Katz centrality computes the relative influence of a node within a
network by measuring the number of the immediate neighbors (first
degree nodes) and also all other nodes in the network that connect
to the node under consideration through these immediate neighbors.
Extra weight can be provided to immediate neighbors through the
parameter :math:`\beta`. Connections made with distant neighbors
are, however, penalized by an attenuation factor `\alpha` which
should be strictly less than the inverse largest eigenvalue of the
adjacency matrix in order for the Katz centrality to be computed
correctly. More information is provided in [1]_ .
Parameters
----------
G : graph
A NetworkX graph
alpha : float
Attenuation factor
beta : scalar or dictionary, optional (default=1.0)
Weight attributed to the immediate neighborhood. If not a scalar the
dictionary must have an value for every node.
normalized : bool
If True normalize the resulting values.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with Katz centrality as the value.
Raises
------
NetworkXError
If the parameter `beta` is not a scalar but lacks a value for at least
one node
Examples
--------
>>> import math
>>> G = nx.path_graph(4)
>>> phi = (1+math.sqrt(5))/2.0 # largest eigenvalue of adj matrix
>>> centrality = nx.katz_centrality_numpy(G,1/phi)
>>> for n,c in sorted(centrality.items()):
... print("%d %0.2f"%(n,c))
0 0.37
1 0.60
2 0.60
3 0.37
See Also
--------
katz_centrality
eigenvector_centrality_numpy
eigenvector_centrality
pagerank
hits
Notes
-----
Katz centrality was introduced by [2]_.
This algorithm uses a direct linear solver to solve the above equation.
The constant alpha should be strictly less than the inverse of largest
eigenvalue of the adjacency matrix for there to be a solution. When
`\alpha = 1/\lambda_{max}` and `\beta=0`, Katz centrality is the same as
eigenvector centrality.
For directed graphs this finds "left" eigenvectors which corresponds
to the in-edges in the graph. For out-edges Katz centrality
first reverse the graph with G.reverse().
References
----------
.. [1] Mark E. J. Newman:
Networks: An Introduction.
Oxford University Press, USA, 2010, p. 720.
.. [2] Leo Katz:
A New Status Index Derived from Sociometric Index.
Psychometrika 18(1):39–43, 1953
http://phya.snu.ac.kr/~dkim/PRL87278701.pdf
"""
try:
import numpy as np
except ImportError:
raise ImportError('Requires NumPy: http://scipy.org/')
if len(G) == 0:
return {}
try:
nodelist = beta.keys()
if set(nodelist) != set(G):
raise nx.NetworkXError('beta dictionary '
'must have a value for every node')
b = np.array(list(beta.values()), dtype=float)
except AttributeError:
nodelist = G.nodes()
try:
b = np.ones((len(nodelist),1))*float(beta)
except (TypeError,ValueError,AttributeError):
raise nx.NetworkXError('beta must be a number')
A = nx.adj_matrix(G, nodelist=nodelist, weight=weight).todense().T
n = np.array(A).shape[0]
centrality = np.linalg.solve( np.eye(n,n) - (alpha * A) , b)
if normalized:
norm = np.sign(sum(centrality)) * np.linalg.norm(centrality)
else:
norm = 1.0
centrality = dict(zip(nodelist, map(float,centrality/norm)))
return centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import scipy
except:
raise SkipTest("SciPy not available")
|
PypiClean
|
/superdesk-planning-2.4.0.tar.gz/superdesk-planning-2.4.0/server/planning/commands/flag_expired_items.py
|
from flask import current_app as app
from superdesk import Command, command, get_resource_service
from superdesk.logging import logger
from superdesk.utc import utcnow
from superdesk.celery_task_utils import get_lock_id
from superdesk.lock import lock, unlock, remove_locks
from superdesk.notification import push_notification
from datetime import timedelta, datetime
from eve.utils import config
from bson.objectid import ObjectId
class FlagExpiredItems(Command):
"""
Flag expired `Events` and `Planning` items with `{'expired': True}`.
Example:
::
$ python manage.py planning:flag_expired
"""
log_msg = ""
def run(self):
now = utcnow()
self.log_msg = "Expiry Time: {}.".format(now)
logger.info("{} Starting to remove expired content at.".format(self.log_msg))
expire_interval = app.config.get("PLANNING_EXPIRY_MINUTES", 0)
if expire_interval == 0:
logger.info("{} PLANNING_EXPIRY_MINUTES=0, not flagging items as expired")
return
lock_name = get_lock_id("planning", "flag_expired")
if not lock(lock_name, expire=610):
logger.info("{} Flag expired items task is already running".format(self.log_msg))
return
expiry_datetime = now - timedelta(minutes=expire_interval)
try:
self._flag_expired_events(expiry_datetime)
except Exception as e:
logger.exception(e)
try:
self._flag_expired_planning(expiry_datetime)
except Exception as e:
logger.exception(e)
unlock(lock_name)
logger.info("{} Completed flagging expired items.".format(self.log_msg))
remove_locks()
logger.info("{} Starting to remove expired planning versions.".format(self.log_msg))
self._remove_expired_published_planning()
logger.info("{} Completed removing expired planning versions.".format(self.log_msg))
def _flag_expired_events(self, expiry_datetime):
logger.info("{} Starting to flag expired events".format(self.log_msg))
events_service = get_resource_service("events")
planning_service = get_resource_service("planning")
locked_events = set()
events_in_use = set()
events_expired = set()
plans_expired = set()
# Obtain the full list of Events that we're to process first
# As subsequent queries will change the list of returned items
events = dict()
for items in events_service.get_expired_items(expiry_datetime):
events.update({item[config.ID_FIELD]: item for item in items})
self._set_event_plans(events)
for event_id, event in events.items():
if event.get("lock_user"):
locked_events.add(event_id)
elif self._get_event_schedule(event) > expiry_datetime:
events_in_use.add(event_id)
else:
events_expired.add(event_id)
events_service.system_update(event_id, {"expired": True}, event)
for plan in event.get("_plans", []):
plan_id = plan[config.ID_FIELD]
planning_service.system_update(plan_id, {"expired": True}, plan)
plans_expired.add(plan_id)
if len(locked_events) > 0:
logger.info(
"{} Skipping {} locked Events: {}".format(self.log_msg, len(locked_events), list(locked_events))
)
if len(events_in_use) > 0:
logger.info(
"{} Skipping {} Events in use: {}".format(self.log_msg, len(events_in_use), list(events_in_use))
)
if len(events_expired) > 0:
push_notification("events:expired", items=list(events_expired))
if len(plans_expired) > 0:
push_notification("planning:expired", items=list(plans_expired))
logger.info("{} {} Events expired: {}".format(self.log_msg, len(events_expired), list(events_expired)))
def _flag_expired_planning(self, expiry_datetime):
logger.info("{} Starting to flag expired planning items".format(self.log_msg))
planning_service = get_resource_service("planning")
# Obtain the full list of Planning items that we're to process first
# As subsequent queries will change the list of returnd items
plans = dict()
for items in planning_service.get_expired_items(expiry_datetime):
plans.update({item[config.ID_FIELD]: item for item in items})
locked_plans = set()
plans_expired = set()
for plan_id, plan in plans.items():
if plan.get("lock_user"):
locked_plans.add(plan_id)
else:
planning_service.system_update(plan[config.ID_FIELD], {"expired": True}, plan)
plans_expired.add(plan_id)
if len(locked_plans) > 0:
logger.info(
"{} Skipping {} locked Planning items: {}".format(self.log_msg, len(locked_plans), list(locked_plans))
)
if len(plans_expired) > 0:
push_notification("planning:expired", items=list(plans_expired))
logger.info("{} {} Planning items expired: {}".format(self.log_msg, len(plans_expired), list(plans_expired)))
@staticmethod
def _set_event_plans(events):
planning_service = get_resource_service("planning")
for plan in planning_service.get_from_mongo(req=None, lookup={"event_item": {"$in": list(events.keys())}}):
event = events[plan["event_item"]]
if "_plans" not in event:
event["_plans"] = []
event["_plans"].append(plan)
@staticmethod
def _get_event_schedule(event):
latest_scheduled = datetime.strptime(event["dates"]["end"], "%Y-%m-%dT%H:%M:%S%z")
for plan in event.get("_plans", []):
# First check the Planning item's planning date
# and compare to the Event's end date
if latest_scheduled < plan.get("planning_date", latest_scheduled):
latest_scheduled = plan.get("planning_date")
# Next go through all the coverage's scheduled dates
# and compare to the latest scheduled date
for planning_schedule in plan.get("_planning_schedule", []):
scheduled = planning_schedule.get("scheduled")
if scheduled and isinstance(scheduled, str):
scheduled = datetime.strptime(planning_schedule.get("scheduled"), "%Y-%m-%dT%H:%M:%S%z")
if scheduled and (latest_scheduled < scheduled):
latest_scheduled = scheduled
# Finally return the latest scheduled date among the Event, Planning and Coverages
return latest_scheduled
@staticmethod
def _remove_expired_published_planning():
"""Expire planning versions
Expiry of the planning versions mirrors the expiry of items within the publish queue in Superdesk so it uses the
same configuration value
:param self:
:return:
"""
expire_interval = app.config.get("PUBLISH_QUEUE_EXPIRY_MINUTES", 0)
if expire_interval:
expire_time = utcnow() - timedelta(minutes=expire_interval)
logger.info("Removing planning history items created before {}".format(str(expire_time)))
get_resource_service("published_planning").delete({"_id": {"$lte": ObjectId.from_datetime(expire_time)}})
command("planning:flag_expired", FlagExpiredItems())
|
PypiClean
|
/kashgari-tf-0.5.5.tar.gz/kashgari-tf-0.5.5/setup.tf.py
|
import codecs
import os
import pathlib
import re
from setuptools import find_packages, setup
HERE = pathlib.Path(__file__).parent
def read(*parts):
with codecs.open(os.path.join(HERE, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
__name__ = 'kashgari-tf'
__author__ = "BrikerMan"
__copyright__ = "Copyright 2018, BrikerMan"
__credits__ = []
__license__ = "Apache License 2.0"
__maintainer__ = "BrikerMan"
__email__ = "[email protected]"
__url__ = 'https://github.com/BrikerMan/Kashgari'
__description__ = 'Simple, Keras-powered multilingual NLP framework,' \
' allows you to build your models in 5 minutes for named entity recognition (NER),' \
' part-of-speech tagging (PoS) and text classification tasks. ' \
'Includes BERT, GPT-2 and word2vec embedding.'
__version__ = '0.5.5'
README = (HERE / "README.md").read_text(encoding='utf-8')
with codecs.open('requirements.txt', 'r', 'utf8') as reader:
install_requires = list(map(lambda x: x.strip(), reader.readlines()))
setup(
name=__name__,
version=__version__,
description=__description__,
python_requires='>3.6',
long_description=README,
long_description_content_type="text/markdown",
author=__author__,
author_email=__email__,
url=__url__,
packages=find_packages(exclude=('tests',)),
install_requires=install_requires,
include_package_data=True,
license=__license__,
classifiers=[
'License :: OSI Approved :: Apache Software License',
# 'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
if __name__ == "__main__":
print("Hello world")
|
PypiClean
|
/w3blog-0.5.2-py3-none-any.whl/weblog/templatetags/weblog_extras.py
|
from django import template
from django.utils import translation
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from django.conf import settings
from weblog.apps import SETTINGS as blog_settings
from weblog.models import Category, CategoryTranslation, BlogPost
import datetime
IS_MULTILINGUAL = blog_settings['multilingual']
MONTHS = (
_('January'),
_('February'),
_('March'),
_('April'),
_('May'),
_('June'),
_('July'),
_('August'),
_('September'),
_('October'),
_('November'),
_('December'),
)
register = template.Library()
@register.inclusion_tag('weblog/sidebar_categories.html')
def get_sidebar_categories(selected_cat_slug=None):
now = datetime.datetime.now()
current_language = translation.get_language()
if current_language is None:
current_language = settings.LANGUAGE_CODE
context_dict = {'categories': [], 'selected_cat_slug': selected_cat_slug}
for raw_category in Category.objects.all():
next_category = {'name': raw_category.name, 'slug': raw_category.slug}
if CategoryTranslation.objects.filter(
category=raw_category).count() > 0 and IS_MULTILINGUAL:
for category_translation in CategoryTranslation.objects.filter(
category=raw_category):
if current_language[0:2] == category_translation.language[0:2]:
next_category['name'] = category_translation.name
context_dict['categories'].append(next_category)
if BlogPost.objects.filter(
published=True, publish_date__lte=now,
categories=None).count() > 0:
context_dict['categories'].append({'name': pgettext_lazy(
'Posts without category', 'Uncategorized'), 'slug': 'misc'})
return context_dict
@register.inclusion_tag('weblog/sidebar_archive.html')
def get_sidebar_archive():
if BlogPost.objects.filter(published=True).count() < 1:
return {}
now = datetime.datetime.now()
oldest_post = BlogPost.objects.filter(published=True).reverse()[0]
first_year = oldest_post.publish_date.year
first_month = oldest_post.publish_date.month
newest_post = BlogPost.objects.filter(
published=True, publish_date__lte=now)[0]
latest_year = newest_post.publish_date.year
latest_month = newest_post.publish_date.month
c_month = first_month
c_year = first_year
archive = []
while c_year <= latest_year:
year_posts = BlogPost.objects.filter(
publish_date__year=c_year, publish_date__lte=now, published=True)
if year_posts.count() > 0:
this_years_months = []
while (c_year < latest_year or c_month <= latest_month) \
and c_month <= 12:
if year_posts.filter(
publish_date__month=c_month, publish_date__lte=now,
published=True).count() > 0:
this_years_months.append((c_month, MONTHS[c_month - 1]))
c_month += 1
archive.append((c_year, this_years_months))
c_year += 1
c_month = 1
archive.reverse()
return {'archive': archive}
|
PypiClean
|
/rdkit_pypi-2023.3.1b1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rdkit/ML/Data/DataUtils.py
|
import csv
from rdkit import RDRandom as random
import re
import numpy
from rdkit.DataStructs import BitUtils
from rdkit.ML.Data import MLData
import pickle
from rdkit.utils import fileutils
def permutation(nToDo):
res = list(range(nToDo))
random.shuffle(res, random=random.random)
return res
def WriteData(outFile, varNames, qBounds, examples):
""" writes out a .qdat file
**Arguments**
- outFile: a file object
- varNames: a list of variable names
- qBounds: the list of quantization bounds (should be the same length
as _varNames_)
- examples: the data to be written
"""
outFile.write('# Quantized data from DataUtils\n')
outFile.write('# ----------\n')
outFile.write('# Variable Table\n')
for i in range(len(varNames)):
outFile.write('# %s %s\n' % (varNames[i], str(qBounds[i])))
outFile.write('# ----------\n')
for example in examples:
outFile.write(' '.join([str(e) for e in example]) + '\n')
def ReadVars(inFile):
""" reads the variables and quantization bounds from a .qdat or .dat file
**Arguments**
- inFile: a file object
**Returns**
a 2-tuple containing:
1) varNames: a list of the variable names
2) qbounds: the list of quantization bounds for each variable
"""
varNames = []
qBounds = []
fileutils.MoveToMatchingLine(inFile, 'Variable Table')
inLine = inFile.readline()
while inLine.find('# ----') == -1:
splitLine = inLine[2:].split('[')
varNames.append(splitLine[0].strip())
qBounds.append(splitLine[1][:-2])
inLine = inFile.readline()
for i in range(len(qBounds)):
if qBounds[i] != '':
l = qBounds[i].split(',')
qBounds[i] = []
for item in l:
qBounds[i].append(float(item))
else:
qBounds[i] = []
return varNames, qBounds
def ReadQuantExamples(inFile):
""" reads the examples from a .qdat file
**Arguments**
- inFile: a file object
**Returns**
a 2-tuple containing:
1) the names of the examples
2) a list of lists containing the examples themselves
**Note**
because this is reading a .qdat file, it assumed that all variable values
are integers
"""
expr1 = re.compile(r'^#')
expr2 = re.compile(r'[ ]+|[\t]+')
examples = []
names = []
inLine = inFile.readline()
while inLine:
if expr1.search(inLine) is None:
resArr = expr2.split(inLine)
if len(resArr) > 1:
examples.append([int(x) for x in resArr[1:]])
names.append(resArr[0])
inLine = inFile.readline()
return names, examples
def ReadGeneralExamples(inFile):
""" reads the examples from a .dat file
**Arguments**
- inFile: a file object
**Returns**
a 2-tuple containing:
1) the names of the examples
2) a list of lists containing the examples themselves
**Note**
- this attempts to convert variable values to ints, then floats.
if those both fail, they are left as strings
"""
expr1 = re.compile(r'^#')
expr2 = re.compile(r'[ ]+|[\t]+')
examples = []
names = []
inLine = inFile.readline()
while inLine:
if expr1.search(inLine) is None:
resArr = expr2.split(inLine)[:-1]
if len(resArr) > 1:
for i in range(1, len(resArr)):
d = resArr[i]
try:
resArr[i] = int(d)
except ValueError:
try:
resArr[i] = float(d)
except ValueError:
pass
examples.append(resArr[1:])
names.append(resArr[0])
inLine = inFile.readline()
return names, examples
def BuildQuantDataSet(fileName):
""" builds a data set from a .qdat file
**Arguments**
- fileName: the name of the .qdat file
**Returns**
an _MLData.MLQuantDataSet_
"""
with open(fileName, 'r') as inFile:
varNames, qBounds = ReadVars(inFile)
ptNames, examples = ReadQuantExamples(inFile)
data = MLData.MLQuantDataSet(examples, qBounds=qBounds, varNames=varNames, ptNames=ptNames)
return data
def BuildDataSet(fileName):
""" builds a data set from a .dat file
**Arguments**
- fileName: the name of the .dat file
**Returns**
an _MLData.MLDataSet_
"""
with open(fileName, 'r') as inFile:
varNames, qBounds = ReadVars(inFile)
ptNames, examples = ReadGeneralExamples(inFile)
data = MLData.MLDataSet(examples, qBounds=qBounds, varNames=varNames, ptNames=ptNames)
return data
def CalcNPossibleUsingMap(data, order, qBounds, nQBounds=None, silent=True):
""" calculates the number of possible values for each variable in a data set
**Arguments**
- data: a list of examples
- order: the ordering map between the variables in _data_ and _qBounds_
- qBounds: the quantization bounds for the variables
**Returns**
a list with the number of possible values each variable takes on in the data set
**Notes**
- variables present in _qBounds_ will have their _nPossible_ number read
from _qbounds
- _nPossible_ for other numeric variables will be calculated
"""
numericTypes = (int, float, numpy.int64, numpy.int32, numpy.int16)
if not silent:
print('order:', order, len(order))
print('qB:', qBounds)
# print('nQB:',nQBounds, len(nQBounds))
assert (qBounds and len(order) == len(qBounds)) or (nQBounds and len(order) == len(nQBounds)), \
'order/qBounds mismatch'
nVars = len(order)
nPossible = [-1] * nVars
cols = list(range(nVars))
for i in range(nVars):
if nQBounds and nQBounds[i] != 0:
nPossible[i] = -1
cols.remove(i)
elif len(qBounds[i]) > 0:
nPossible[i] = len(qBounds[i])
cols.remove(i)
nPts = len(data)
for i in range(nPts):
for col in cols[:]:
d = data[i][order[col]]
if type(d) in numericTypes:
if int(d) == d:
nPossible[col] = max(int(d), nPossible[col])
else:
nPossible[col] = -1
cols.remove(col)
else:
if not silent:
print('bye bye col %d: %s' % (col, repr(d)))
nPossible[col] = -1
cols.remove(col)
return [int(x) + 1 for x in nPossible]
def WritePickledData(outName, data):
""" writes either a .qdat.pkl or a .dat.pkl file
**Arguments**
- outName: the name of the file to be used
- data: either an _MLData.MLDataSet_ or an _MLData.MLQuantDataSet_
"""
varNames = data.GetVarNames()
qBounds = data.GetQuantBounds()
ptNames = data.GetPtNames()
examples = data.GetAllData()
with open(outName, 'wb+') as outFile:
pickle.dump(varNames, outFile)
pickle.dump(qBounds, outFile)
pickle.dump(ptNames, outFile)
pickle.dump(examples, outFile)
def TakeEnsemble(vect, ensembleIds, isDataVect=False):
"""
>>> v = [10,20,30,40,50]
>>> TakeEnsemble(v,(1,2,3))
[20, 30, 40]
>>> v = ['foo',10,20,30,40,50,1]
>>> TakeEnsemble(v,(1,2,3),isDataVect=True)
['foo', 20, 30, 40, 1]
"""
if isDataVect:
ensembleIds = [x + 1 for x in ensembleIds]
vect = [vect[0]] + [vect[x] for x in ensembleIds] + [vect[-1]]
else:
vect = [vect[x] for x in ensembleIds]
return vect
def DBToData(dbName, tableName, user='sysdba', password='masterkey', dupCol=-1, what='*', where='',
join='', pickleCol=-1, pickleClass=None, ensembleIds=None):
""" constructs an _MLData.MLDataSet_ from a database
**Arguments**
- dbName: the name of the database to be opened
- tableName: the table name containing the data in the database
- user: the user name to be used to connect to the database
- password: the password to be used to connect to the database
- dupCol: if nonzero specifies which column should be used to recognize
duplicates.
**Returns**
an _MLData.MLDataSet_
**Notes**
- this uses Dbase.DataUtils functionality
"""
from rdkit.Dbase.DbConnection import DbConnect
conn = DbConnect(dbName, tableName, user, password)
res = conn.GetData(fields=what, where=where, join=join, removeDups=dupCol, forceList=1)
nPts = len(res)
vals = [None] * nPts
ptNames = [None] * nPts
classWorks = True
for i in range(nPts):
tmp = list(res[i])
ptNames[i] = tmp.pop(0)
if pickleCol >= 0:
if not pickleClass or not classWorks:
tmp[pickleCol] = pickle.loads(str(tmp[pickleCol]))
else:
try:
tmp[pickleCol] = pickleClass(str(tmp[pickleCol]))
except Exception:
tmp[pickleCol] = pickle.loads(str(tmp[pickleCol]))
classWorks = False
if ensembleIds:
tmp[pickleCol] = BitUtils.ConstructEnsembleBV(tmp[pickleCol], ensembleIds)
else:
if ensembleIds:
tmp = TakeEnsemble(tmp, ensembleIds, isDataVect=True)
vals[i] = tmp
varNames = conn.GetColumnNames(join=join, what=what)
data = MLData.MLDataSet(vals, varNames=varNames, ptNames=ptNames)
return data
def TextToData(reader, ignoreCols=[], onlyCols=None):
""" constructs an _MLData.MLDataSet_ from a bunch of text
#DOC
**Arguments**
- reader needs to be iterable and return lists of elements
(like a csv.reader)
**Returns**
an _MLData.MLDataSet_
"""
varNames = next(reader)
if not onlyCols:
keepCols = []
for i, name in enumerate(varNames):
if name not in ignoreCols:
keepCols.append(i)
else:
keepCols = [-1] * len(onlyCols)
for i, name in enumerate(varNames):
if name in onlyCols:
keepCols[onlyCols.index(name)] = i
nCols = len(varNames)
varNames = tuple([varNames[x] for x in keepCols])
nVars = len(varNames)
vals = []
ptNames = []
for splitLine in reader:
if len(splitLine):
if len(splitLine) != nCols:
raise ValueError('unequal line lengths')
tmp = [splitLine[x] for x in keepCols]
ptNames.append(tmp[0])
pt = [None] * (nVars - 1)
for j in range(nVars - 1):
try:
val = int(tmp[j + 1])
except ValueError:
try:
val = float(tmp[j + 1])
except ValueError:
val = str(tmp[j + 1])
pt[j] = val
vals.append(pt)
data = MLData.MLDataSet(vals, varNames=varNames, ptNames=ptNames)
return data
def TextFileToData(fName, onlyCols=None):
"""
#DOC
"""
ext = fName.split('.')[-1]
with open(fName, 'r') as inF:
if ext.upper() == 'CSV':
# CSV module distributed with python2.3 and later
splitter = csv.reader(inF)
else:
splitter = csv.reader(inF, delimiter='\t')
res = TextToData(splitter, onlyCols=onlyCols)
return res
def InitRandomNumbers(seed):
""" Seeds the random number generators
**Arguments**
- seed: a 2-tuple containing integers to be used as the random number seeds
**Notes**
this seeds both the RDRandom generator and the one in the standard
Python _random_ module
"""
from rdkit import RDRandom
RDRandom.seed(seed[0])
random.seed(seed[0])
def FilterData(inData, val, frac, col=-1, indicesToUse=None, indicesOnly=0):
"""
#DOC
"""
if frac < 0 or frac > 1:
raise ValueError('filter fraction out of bounds')
try:
inData[0][col]
except IndexError:
raise ValueError('target column index out of range')
# convert the input data to a list and sort them
if indicesToUse:
tmp = [inData[x] for x in indicesToUse]
else:
tmp = list(inData)
nOrig = len(tmp)
sortOrder = list(range(nOrig))
sortOrder.sort(key=lambda x: tmp[x][col])
tmp = [tmp[x] for x in sortOrder]
# find the start of the entries with value val
start = 0
while start < nOrig and tmp[start][col] != val:
start += 1
if start >= nOrig:
raise ValueError('target value (%d) not found in data' % (val))
# find the end of the entries with value val
finish = start + 1
while finish < nOrig and tmp[finish][col] == val:
finish += 1
# how many entries have the target value?
nWithVal = finish - start
# how many don't?
nOthers = len(tmp) - nWithVal
currFrac = float(nWithVal) / nOrig
if currFrac < frac:
#
# We're going to keep most of (all) the points with the target value,
# We need to figure out how many of the other points we'll
# toss out
#
nTgtFinal = nWithVal
nFinal = int(round(nWithVal / frac))
nOthersFinal = nFinal - nTgtFinal
#
# We may need to reduce the number of targets to keep
# because it may make it impossible to hit exactly the
# fraction we're trying for. Take care of that now
#
while float(nTgtFinal) / nFinal > frac:
nTgtFinal -= 1
nFinal -= 1
else:
#
# There are too many points with the target value,
# we'll keep most of (all) the other points and toss a random
# selection of the target value points
#
nOthersFinal = nOthers
nFinal = int(round(nOthers / (1 - frac)))
nTgtFinal = nFinal - nOthersFinal
#
# We may need to reduce the number of others to keep
# because it may make it impossible to hit exactly the
# fraction we're trying for. Take care of that now
#
while float(nTgtFinal) / nFinal < frac:
nOthersFinal -= 1
nFinal -= 1
others = list(range(start)) + list(range(finish, nOrig))
othersTake = permutation(nOthers)
others = [others[x] for x in othersTake[:nOthersFinal]]
targets = list(range(start, finish))
targetsTake = permutation(nWithVal)
targets = [targets[x] for x in targetsTake[:nTgtFinal]]
# these are all the indices we'll be keeping
indicesToKeep = targets + others
res = []
rej = []
# now pull the points, but in random order
if not indicesOnly:
for i in permutation(nOrig):
if i in indicesToKeep:
res.append(tmp[i])
else:
rej.append(tmp[i])
else:
# EFF: this is slower than it needs to be
for i in permutation(nOrig):
if not indicesToUse:
idx = sortOrder[i]
else:
idx = indicesToUse[sortOrder[i]]
if i in indicesToKeep:
res.append(idx)
else:
rej.append(idx)
return res, rej
def CountResults(inData, col=-1, bounds=None):
""" #DOC
"""
counts = {}
for p in inData:
if not bounds:
r = p[col]
else:
act = p[col]
bound = 0
placed = 0
while not placed and bound < len(bounds):
if act < bounds[bound]:
r = bound
placed = 1
else:
bound += 1
if not placed:
r = bound
counts[r] = counts.get(r, 0) + 1
return counts
def RandomizeActivities(dataSet, shuffle=0, runDetails=None):
""" randomizes the activity values of a dataset
**Arguments**
- dataSet: a _ML.Data.MLQuantDataSet_, the activities here will be randomized
- shuffle: an optional toggle. If this is set, the activity values
will be shuffled (so the number in each class remains constant)
- runDetails: an optional CompositeRun object
**Note**
- _examples_ are randomized in place
"""
nPts = dataSet.GetNPts()
if shuffle:
if runDetails:
runDetails.shuffled = 1
acts = dataSet.GetResults()[:]
# While the random argument is the default, removing it will cause the shuffle
# tests in UnitTestScreenComposite to fail.
random.shuffle(acts, random=random.random)
else: # This part of the code isn't working as examples is not defined
if runDetails:
runDetails.randomized = 1
nPossible = dataSet.GetNPossibleVals()[-1]
acts = [random.randint(0, nPossible) for _ in len(examples)]
for i in range(nPts):
tmp = dataSet[i]
tmp[-1] = acts[i]
dataSet[i] = tmp
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
PypiClean
|
/ovs-helper-0.0.5.tar.gz/ovs-helper-0.0.5/ovs_helper/vsctl.py
|
from __future__ import print_function
import logging
import operator
import os
import sys
import weakref
import six
import ovs.db.data
import ovs.db.parser
import ovs.db.schema
import ovs.db.types
import ovs.poller
import ovs.json
from ovs import jsonrpc
from ovs import ovsuuid
from ovs import stream
from ovs.db import idl
from ryu.lib import hub
from ryu.lib.ovs import vswitch_idl
from ryu.lib.stringify import StringifyMixin
LOG = logging.getLogger(__name__) # use ovs.vlog?
# for debug
def ovsrec_row_changes_to_string(ovsrec_row):
if not ovsrec_row._changes:
return ovsrec_row._changes
return dict((key, value.to_string())
for key, value in ovsrec_row._changes.items())
# for debug
def ovsrec_row_to_string(ovsrec_row):
output = ''
output += 'uuid: %s ' % ovsrec_row.uuid
if ovsrec_row._data:
output += '_data: %s ' % dict((key, value.to_string()) for key, value
in ovsrec_row._data.items())
else:
output += '_data: %s ' % ovsrec_row._data
output += '_changes: %s' % ovsrec_row_changes_to_string(ovsrec_row)
return output
def atom_from_string(base, value_string, symtab=None):
type_ = base.type
atom = None
if type_ == ovs.db.types.IntegerType:
atom = ovs.db.data.Atom(type_, int(value_string))
elif type_ == ovs.db.types.RealType:
# TODO:XXX negation
atom = ovs.db.data.Atom(
type_, ovs.db.parser.float_to_int(float(value_string)))
elif type_ == ovs.db.types.BooleanType:
if value_string in ("true", "yes", "on", "1"):
atom = ovs.db.data.Atom(type_, True)
elif value_string == ("false", "no", "off", "0"):
atom = ovs.db.data.Atom(type_, False)
elif type_ == ovs.db.types.StringType:
# TODO:XXXX escape: if value_string[0] == '"':
atom = ovs.db.data.Atom(type_, value_string)
elif type_ == ovs.db.types.UuidType:
if value_string[0] == "@":
assert symtab is not None
uuid_ = symtab[value_string]
atom = ovs.db.data.Atom(type_, uuid_)
else:
atom = ovs.db.data.Atom(type_,
ovs.ovsuuid.from_string(value_string))
if atom is None:
raise ValueError("expected %s" % type_.to_string(), value_string)
atom.check_constraints(base)
return atom
def datum_from_string(type_, value_string, symtab=None):
value_string = value_string.strip()
if type_.is_map():
if value_string.startswith('{'):
# TODO:dict case
LOG.debug('value_string %s', value_string)
raise NotImplementedError()
d = dict(v.split('=', 1) for v in value_string.split(','))
d = dict((atom_from_string(type_.key, key, symtab),
atom_from_string(type_.value, value, symtab))
for key, value in d.items())
elif type_.is_set():
if value_string.startswith('['):
# TODO:set case
LOG.debug('value_string %s', value_string)
raise NotImplementedError()
values = value_string.split(',')
d = dict((atom_from_string(type_.key, value, symtab), None)
for value in values)
else:
atom = atom_from_string(type_.key, value_string, symtab)
d = {atom: None}
datum = ovs.db.data.Datum(type_, d)
return datum.to_json()
def ifind(pred, seq):
try:
return [i for i in seq if pred(i)][0]
except IndexError:
return None
def not_reached():
os.abort()
def vsctl_fatal(msg):
LOG.error(msg)
raise Exception(msg) # not call ovs.utils.ovs_fatal for reusability
class VSCtlBridge(object):
def __init__(self, ovsrec_bridge, name, parent, vlan):
super(VSCtlBridge, self).__init__()
self.br_cfg = ovsrec_bridge
self.name = name
self.ports = set()
self.parent = parent
self.vlan = vlan
self.children = set() # WeakSet is needed?
def find_vlan_bridge(self, vlan):
return ifind(lambda child: child.vlan == vlan, self.children)
class VSCtlPort(object):
def __init__(self, vsctl_bridge_parent, ovsrec_port):
super(VSCtlPort, self).__init__()
self.bridge = weakref.ref(vsctl_bridge_parent) # backpointer
self.port_cfg = ovsrec_port
self.ifaces = set()
self.qos = None
class VSCtlIface(object):
def __init__(self, vsctl_port_parent, ovsrec_iface):
super(VSCtlIface, self).__init__()
self.port = weakref.ref(vsctl_port_parent) # backpointer
self.iface_cfg = ovsrec_iface
class VSCtlQoS(object):
def __init__(self, vsctl_port_parent, ovsrec_qos):
super(VSCtlQoS, self).__init__()
self.port = weakref.ref(vsctl_port_parent)
self.qos_cfg = ovsrec_qos
self.queues = set()
class VSCtlQueue(object):
def __init__(self, vsctl_qos_parent, ovsrec_queue):
super(VSCtlQueue, self).__init__()
self.qos = weakref.ref(vsctl_qos_parent)
self.queue_cfg = ovsrec_queue
class VSCtlContext(object):
def _invalidate_cache(self):
self.cache_valid = False
self.bridges.clear()
self.ports.clear()
self.ifaces.clear()
def __init__(self, idl_, txn, ovsrec_open_vswitch):
super(VSCtlContext, self).__init__()
# Modifiable state
# self.table = None
self.idl = idl_
self.txn = txn
self.ovs = ovsrec_open_vswitch
self.symtab = None # TODO:XXX
self.verified_ports = False
# A cache of the contents of the database.
self.cache_valid = False
self.bridges = {} # bridge name -> VSCtlBridge
self.ports = {} # port name -> VSCtlPort
self.ifaces = {} # iface name -> VSCtlIface
self.try_again = False # used by wait-until command
def done(self):
self._invalidate_cache()
def verify_bridges(self):
self.ovs.verify(vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES)
def verify_ports(self):
if self.verified_ports:
return
self.verify_bridges()
for ovsrec_bridge in self.idl.tables[
vswitch_idl.OVSREC_TABLE_BRIDGE].rows.values():
ovsrec_bridge.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
for ovsrec_port in self.idl.tables[
vswitch_idl.OVSREC_TABLE_PORT].rows.values():
ovsrec_port.verify(vswitch_idl.OVSREC_PORT_COL_INTERFACES)
self.verified_ports = True
def add_bridge_to_cache(self, ovsrec_bridge, name, parent, vlan):
vsctl_bridge = VSCtlBridge(ovsrec_bridge, name, parent, vlan)
if parent:
parent.children.add(vsctl_bridge)
self.bridges[name] = vsctl_bridge
return vsctl_bridge
def del_cached_bridge(self, vsctl_bridge):
assert not vsctl_bridge.ports
assert not vsctl_bridge.children
parent = vsctl_bridge.parent
if parent:
parent.children.remove(vsctl_bridge)
vsctl_bridge.parent = None # break circular reference
ovsrec_bridge = vsctl_bridge.br_cfg
if ovsrec_bridge:
ovsrec_bridge.delete()
self.ovs_delete_bridge(ovsrec_bridge)
del self.bridges[vsctl_bridge.name]
def del_cached_qos(self, vsctl_qos):
vsctl_qos.port().qos = None
vsctl_qos.port = None
vsctl_qos.queues = None
def add_port_to_cache(self, vsctl_bridge_parent, ovsrec_port):
tag = getattr(ovsrec_port, vswitch_idl.OVSREC_PORT_COL_TAG, None)
if isinstance(tag, list):
if len(tag) == 0:
tag = 0
else:
tag = tag[0]
if tag is not None and 0 <= tag < 4096:
vlan_bridge = vsctl_bridge_parent.find_vlan_bridge(tag)
if vlan_bridge:
vsctl_bridge_parent = vlan_bridge
vsctl_port = VSCtlPort(vsctl_bridge_parent, ovsrec_port)
vsctl_bridge_parent.ports.add(vsctl_port)
self.ports[ovsrec_port.name] = vsctl_port
return vsctl_port
def del_cached_port(self, vsctl_port):
assert not vsctl_port.ifaces
vsctl_port.bridge().ports.remove(vsctl_port)
vsctl_port.bridge = None
port = self.ports.pop(vsctl_port.port_cfg.name)
assert port == vsctl_port
vsctl_port.port_cfg.delete()
def add_iface_to_cache(self, vsctl_port_parent, ovsrec_iface):
vsctl_iface = VSCtlIface(vsctl_port_parent, ovsrec_iface)
vsctl_port_parent.ifaces.add(vsctl_iface)
self.ifaces[ovsrec_iface.name] = vsctl_iface
def add_qos_to_cache(self, vsctl_port_parent, ovsrec_qos):
vsctl_qos = VSCtlQoS(vsctl_port_parent, ovsrec_qos)
vsctl_port_parent.qos = vsctl_qos
return vsctl_qos
def add_queue_to_cache(self, vsctl_qos_parent, ovsrec_queue):
vsctl_queue = VSCtlQueue(vsctl_qos_parent, ovsrec_queue)
vsctl_qos_parent.queues.add(vsctl_queue)
def del_cached_iface(self, vsctl_iface):
vsctl_iface.port().ifaces.remove(vsctl_iface)
vsctl_iface.port = None
del self.ifaces[vsctl_iface.iface_cfg.name]
vsctl_iface.iface_cfg.delete()
def invalidate_cache(self):
if not self.cache_valid:
return
self._invalidate_cache()
def populate_cache(self):
self._populate_cache(self.idl.tables[vswitch_idl.OVSREC_TABLE_BRIDGE])
@staticmethod
def port_is_fake_bridge(ovsrec_port):
tag = ovsrec_port.tag
if isinstance(tag, list):
if len(tag) == 0:
tag = 0
else:
tag = tag[0]
return ovsrec_port.fake_bridge and 0 <= tag <= 4095
def _populate_cache(self, ovsrec_bridges):
if self.cache_valid:
return
self.cache_valid = True
bridges = set()
ports = set()
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
LOG.warning('%s: database contains duplicate bridge name',
name)
bridges.add(name)
vsctl_bridge = self.add_bridge_to_cache(ovsrec_bridge, name,
None, 0)
if not vsctl_bridge:
continue
for ovsrec_port in ovsrec_bridge.ports:
port_name = ovsrec_port.name
if port_name in ports:
# Duplicate ovsrec_port name.
# (We will warn about that later.)
continue
ports.add(port_name)
if (self.port_is_fake_bridge(ovsrec_port) and
port_name not in bridges):
bridges.add(port_name)
self.add_bridge_to_cache(None, port_name, vsctl_bridge,
ovsrec_port.tag)
bridges = set()
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
continue
bridges.add(name)
vsctl_bridge = self.bridges[name]
for ovsrec_port in ovsrec_bridge.ports:
port_name = ovsrec_port.name
vsctl_port = self.ports.get(port_name)
if vsctl_port:
if ovsrec_port == vsctl_port.port_cfg:
LOG.warning('%s: vsctl_port is in multiple bridges '
'(%s and %s)',
port_name, vsctl_bridge.name,
vsctl_port.br.name)
else:
LOG.error('%s: database contains duplicate '
'vsctl_port name',
ovsrec_port.name)
continue
if (self.port_is_fake_bridge(ovsrec_port) and
port_name in bridges):
continue
# LOG.debug('ovsrec_port %s %s %s',
# ovsrec_port, ovsrec_port._data, ovsrec_port.tag)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
# LOG.debug('vsctl_port %s', vsctl_port)
for ovsrec_iface in ovsrec_port.interfaces:
iface = self.ifaces.get(ovsrec_iface.name)
if iface:
if ovsrec_iface == iface.iface_cfg:
LOG.warning(
'%s: interface is in multiple ports '
'(%s and %s)',
ovsrec_iface.name,
iface.port().port_cfg.name,
vsctl_port.port_cfg.name)
else:
LOG.error(
'%s: database contains duplicate interface '
'name',
ovsrec_iface.name)
continue
self.add_iface_to_cache(vsctl_port, ovsrec_iface)
ovsrec_qos = ovsrec_port.qos
vsctl_qos = self.add_qos_to_cache(vsctl_port, ovsrec_qos)
if len(ovsrec_qos):
for ovsrec_queue in ovsrec_qos[0].queues:
self.add_queue_to_cache(vsctl_qos, ovsrec_queue)
def check_conflicts(self, name, msg):
self.verify_ports()
if name in self.bridges:
vsctl_fatal('%s because a bridge named %s already exists' %
(msg, name))
if name in self.ports:
vsctl_fatal('%s because a port named %s already exists on '
'bridge %s' %
(msg, name, self.ports[name].bridge().name))
if name in self.ifaces:
vsctl_fatal('%s because an interface named %s already '
'exists on bridge %s' %
(msg, name, self.ifaces[name].port().bridge().name))
def find_bridge(self, name, must_exist):
assert self.cache_valid
vsctl_bridge = self.bridges.get(name)
if must_exist and not vsctl_bridge:
vsctl_fatal('no bridge named %s' % name)
self.verify_bridges()
return vsctl_bridge
def find_real_bridge(self, name, must_exist):
vsctl_bridge = self.find_bridge(name, must_exist)
if vsctl_bridge and vsctl_bridge.parent:
vsctl_fatal('%s is a fake bridge' % name)
return vsctl_bridge
def find_bridge_by_id(self, datapath_id, must_exist):
assert self.cache_valid
for vsctl_bridge in self.bridges.values():
if vsctl_bridge.br_cfg.datapath_id[0].strip('"') == datapath_id:
self.verify_bridges()
return vsctl_bridge
if must_exist:
vsctl_fatal('no bridge id %s' % datapath_id)
return None
def find_port(self, name, must_exist):
assert self.cache_valid
vsctl_port = self.ports.get(name)
if vsctl_port and name == vsctl_port.bridge().name:
vsctl_port = None
if must_exist and not vsctl_port:
vsctl_fatal('no vsctl_port named %s' % name)
return vsctl_port
def find_iface(self, name, must_exist):
assert self.cache_valid
vsctl_iface = self.ifaces.get(name)
if vsctl_iface and name == vsctl_iface.port().bridge().name:
vsctl_iface = None
if must_exist and not vsctl_iface:
vsctl_fatal('no interface named %s' % name)
self.verify_ports()
return vsctl_iface
def set_qos(self, vsctl_port, type, max_rate):
qos = vsctl_port.qos.qos_cfg
if not len(qos):
ovsrec_qos = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QOS])
vsctl_port.port_cfg.qos = [ovsrec_qos]
else:
ovsrec_qos = qos[0]
ovsrec_qos.type = type
if max_rate is not None:
value_json = ['map', [['max-rate', max_rate]]]
self.set_column(ovsrec_qos, 'other_config', value_json)
self.add_qos_to_cache(vsctl_port, [ovsrec_qos])
return ovsrec_qos
def set_queue(self, vsctl_qos, max_rate, min_rate,
queue_id):
ovsrec_qos = vsctl_qos.qos_cfg[0]
try:
ovsrec_queue = ovsrec_qos.queues[queue_id]
except (AttributeError, KeyError):
ovsrec_queue = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QUEUE])
if max_rate is not None:
value_json = ['map', [['max-rate', max_rate]]]
self.add_column(ovsrec_queue, 'other_config', value_json)
if min_rate is not None:
value_json = ['map', [['min-rate', min_rate]]]
self.add_column(ovsrec_queue, 'other_config', value_json)
value_json = ['map', [[queue_id, ['uuid', str(ovsrec_queue.uuid)]]]]
self.add_column(ovsrec_qos, 'queues', value_json)
self.add_queue_to_cache(vsctl_qos, ovsrec_queue)
return ovsrec_queue
@staticmethod
def _column_set(ovsrec_row, column, ovsrec_value):
# need to trigger Row.__setattr__()
setattr(ovsrec_row, column, ovsrec_value)
@staticmethod
def _column_insert(ovsrec_row, column, ovsrec_add):
value = getattr(ovsrec_row, column)
value.append(ovsrec_add)
VSCtlContext._column_set(ovsrec_row, column, value)
@staticmethod
def _column_delete(ovsrec_row, column, ovsrec_del):
value = getattr(ovsrec_row, column)
try:
value.remove(ovsrec_del)
except ValueError:
# Datum.to_python() with _uuid_to_row trims down deleted
# references. If ovsrec_del.delete() is called before
# _column_delete(), value doesn't include ovsrec_del.
pass
VSCtlContext._column_set(ovsrec_row, column, value)
@staticmethod
def bridge_insert_port(ovsrec_bridge, ovsrec_port):
VSCtlContext._column_insert(ovsrec_bridge,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS,
ovsrec_port)
@staticmethod
def bridge_delete_port(ovsrec_bridge, ovsrec_port):
VSCtlContext._column_delete(ovsrec_bridge,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS,
ovsrec_port)
@staticmethod
def port_delete_qos(ovsrec_port, ovsrec_qos):
VSCtlContext._column_delete(ovsrec_port,
vswitch_idl.OVSREC_PORT_COL_QOS,
ovsrec_qos)
def ovs_insert_bridge(self, ovsrec_bridge):
self._column_insert(self.ovs,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
ovsrec_bridge)
def ovs_delete_bridge(self, ovsrec_bridge):
self._column_delete(self.ovs,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
ovsrec_bridge)
def del_port(self, vsctl_port):
if vsctl_port.bridge().parent:
ovsrec_bridge = vsctl_port.bridge().parent.br_cfg
else:
ovsrec_bridge = vsctl_port.bridge().br_cfg
self.bridge_delete_port(ovsrec_bridge, vsctl_port.port_cfg)
for vsctl_iface in vsctl_port.ifaces.copy():
self.del_cached_iface(vsctl_iface)
self.del_cached_port(vsctl_port)
def del_bridge(self, vsctl_bridge):
for child in vsctl_bridge.children.copy():
self.del_bridge(child)
for vsctl_port in vsctl_bridge.ports.copy():
self.del_port(vsctl_port)
self.del_cached_bridge(vsctl_bridge)
def del_qos(self, vsctl_qos):
ovsrec_port = vsctl_qos.port().port_cfg
ovsrec_qos = vsctl_qos.qos_cfg
if len(ovsrec_qos):
self.port_delete_qos(ovsrec_port, ovsrec_qos[0])
self.del_cached_qos(vsctl_qos)
def add_port(self, br_name, port_name, may_exist, fake_iface,
iface_names, settings=None):
"""
:type settings: list of (column, value_json)
where column is str,
value_json is json that is represented
by Datum.to_json()
"""
settings = settings or []
self.populate_cache()
if may_exist:
vsctl_port = self.find_port(port_name, False)
if vsctl_port:
want_names = set(iface_names)
have_names = set(ovsrec_iface.name for ovsrec_iface in
vsctl_port.port_cfg.interfaces)
if vsctl_port.bridge().name != br_name:
vsctl_fatal('"%s" but %s is actually attached to '
'vsctl_bridge %s' %
(br_name, port_name, vsctl_port.bridge().name))
if want_names != have_names:
want_names_string = ','.join(want_names)
have_names_string = ','.join(have_names)
vsctl_fatal('"%s" but %s actually has interface(s) %s' %
(want_names_string,
port_name, have_names_string))
return
self.check_conflicts(port_name,
'cannot create a port named %s' % port_name)
for iface_name in iface_names:
self.check_conflicts(
iface_name, 'cannot create an interface named %s' % iface_name)
vsctl_bridge = self.find_bridge(br_name, True)
ifaces = []
for iface_name in iface_names:
ovsrec_iface = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = iface_name
ifaces.append(ovsrec_iface)
ovsrec_port = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = port_name
ovsrec_port.interfaces = ifaces
ovsrec_port.bond_fake_iface = fake_iface
if vsctl_bridge.parent:
tag = vsctl_bridge.vlan
ovsrec_port.tag = tag
for column, value in settings:
# TODO:XXX self.symtab:
self.set_column(ovsrec_port, column, value)
if vsctl_bridge.parent:
ovsrec_bridge = vsctl_bridge.parent.br_cfg
else:
ovsrec_bridge = vsctl_bridge.br_cfg
self.bridge_insert_port(ovsrec_bridge, ovsrec_port)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
for ovsrec_iface in ifaces:
self.add_iface_to_cache(vsctl_port, ovsrec_iface)
def add_bridge(self, br_name, parent_name=None, vlan=0, may_exist=False):
self.populate_cache()
if may_exist:
vsctl_bridge = self.find_bridge(br_name, False)
if vsctl_bridge:
if not parent_name:
if vsctl_bridge.parent:
vsctl_fatal('"--may-exist add-vsctl_bridge %s" '
'but %s is a VLAN bridge for VLAN %d' %
(br_name, br_name, vsctl_bridge.vlan))
else:
if not vsctl_bridge.parent:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s is not a VLAN bridge' %
(br_name, parent_name, vlan, br_name))
elif vsctl_bridge.parent.name != parent_name:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s has the wrong parent %s' %
(br_name, parent_name, vlan,
br_name, vsctl_bridge.parent.name))
elif vsctl_bridge.vlan != vlan:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s is a VLAN bridge for the wrong '
'VLAN %d' %
(br_name, parent_name, vlan, br_name,
vsctl_bridge.vlan))
return
self.check_conflicts(br_name,
'cannot create a bridge named %s' % br_name)
txn = self.txn
tables = self.idl.tables
if not parent_name:
ovsrec_iface = txn.insert(
tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = br_name
ovsrec_iface.type = 'internal'
ovsrec_port = txn.insert(tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = br_name
ovsrec_port.interfaces = [ovsrec_iface]
ovsrec_port.fake_bridge = False
ovsrec_bridge = txn.insert(tables[vswitch_idl.OVSREC_TABLE_BRIDGE])
ovsrec_bridge.name = br_name
ovsrec_bridge.ports = [ovsrec_port]
self.ovs_insert_bridge(ovsrec_bridge)
else:
parent = self.find_bridge(parent_name, False)
if parent and parent.parent:
vsctl_fatal('cannot create bridge with fake bridge as parent')
if not parent:
vsctl_fatal('parent bridge %s does not exist' % parent_name)
ovsrec_iface = txn.insert(
tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = br_name
ovsrec_iface.type = 'internal'
ovsrec_port = txn.insert(tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = br_name
ovsrec_port.interfaces = [ovsrec_iface]
ovsrec_port.fake_bridge = True
ovsrec_port.tag = vlan
self.bridge_insert_port(parent.br_cfg, ovsrec_port)
self.invalidate_cache()
@staticmethod
def parse_column_key(setting_string):
"""
Parses 'setting_string' as str formatted in <column>[:<key>]
and returns str type 'column' and 'key'
"""
if ':' in setting_string:
# splits <column>:<key> into <column> and <key>
column, key = setting_string.split(':', 1)
else:
# stores <column> and <value>=None
column = setting_string
key = None
return column, key
@staticmethod
def parse_column_key_value(table_schema, setting_string):
"""
Parses 'setting_string' as str formatted in <column>[:<key>]=<value>
and returns str type 'column' and json formatted 'value'
"""
if ':' in setting_string:
# splits <column>:<key>=<value> into <column> and <key>=<value>
column, value = setting_string.split(':', 1)
elif '=' in setting_string:
# splits <column>=<value> into <column> and <value>
column, value = setting_string.split('=', 1)
else:
# stores <column> and <value>=None
column = setting_string
value = None
if value is not None:
type_ = table_schema.columns[column].type
value = datum_from_string(type_, value)
return column, value
def get_column(self, ovsrec_row, column, key=None, if_exists=False):
value = getattr(ovsrec_row, column, None)
if isinstance(value, dict) and key is not None:
value = value.get(key, None)
column = '%s:%s' % (column, key)
if value is None:
if if_exists:
return None
vsctl_fatal('%s does not contain a column whose name matches "%s"'
% (ovsrec_row._table.name, column))
return value
def _pre_mod_column(self, ovsrec_row, column, value_json):
if column not in ovsrec_row._table.columns:
vsctl_fatal('%s does not contain a column whose name matches "%s"'
% (ovsrec_row._table.name, column))
column_schema = ovsrec_row._table.columns[column]
datum = ovs.db.data.Datum.from_json(
column_schema.type, value_json, self.symtab)
return datum.to_python(ovs.db.idl._uuid_to_row)
def set_column(self, ovsrec_row, column, value_json):
column_schema = ovsrec_row._table.columns[column]
datum = self._pre_mod_column(ovsrec_row, column, value_json)
if column_schema.type.is_map():
values = getattr(ovsrec_row, column, {})
values.update(datum)
else:
values = datum
setattr(ovsrec_row, column, values)
def add_column(self, ovsrec_row, column, value_json):
column_schema = ovsrec_row._table.columns[column]
datum = self._pre_mod_column(ovsrec_row, column, value_json)
if column_schema.type.is_map():
values = getattr(ovsrec_row, column, {})
values.update(datum)
elif column_schema.type.is_set():
values = getattr(ovsrec_row, column, [])
values.extend(datum)
else:
values = datum
setattr(ovsrec_row, column, values)
def remove_column(self, ovsrec_row, column, value_json):
column_schema = ovsrec_row._table.columns[column]
datum = self._pre_mod_column(ovsrec_row, column, value_json)
if column_schema.type.is_map():
values = getattr(ovsrec_row, column, {})
for datum_key, datum_value in datum.items():
v = values.get(datum_key, None)
if v == datum_value:
values.pop(datum_key)
setattr(ovsrec_row, column, values)
elif column_schema.type.is_set():
values = getattr(ovsrec_row, column, [])
for d in datum:
if d in values:
values.remove(d)
setattr(ovsrec_row, column, values)
else:
values = getattr(ovsrec_row, column, None)
default = ovs.db.data.Datum.default(column_schema.type)
default = default.to_python(ovs.db.idl._uuid_to_row).to_json()
if values == datum:
setattr(ovsrec_row, column, default)
def _get_row_by_id(self, table_name, vsctl_row_id, record_id):
if not vsctl_row_id.table:
return None
if not vsctl_row_id.name_column:
if record_id != '.':
return None
values = list(self.idl.tables[vsctl_row_id.table].rows.values())
if not values or len(values) > 2:
return None
referrer = values[0]
else:
referrer = None
for ovsrec_row in self.idl.tables[
vsctl_row_id.table].rows.values():
name = getattr(ovsrec_row, vsctl_row_id.name_column)
assert isinstance(name, (list, str, six.text_type))
if not isinstance(name, list) and name == record_id:
if referrer:
vsctl_fatal('multiple rows in %s match "%s"' %
(table_name, record_id))
referrer = ovsrec_row
if not referrer:
return None
final = None
if vsctl_row_id.uuid_column:
referrer.verify(vsctl_row_id.uuid_column)
uuid = getattr(referrer, vsctl_row_id.uuid_column)
uuid_ = referrer._data[vsctl_row_id.uuid_column]
assert uuid_.type.key.type == ovs.db.types.UuidType
assert uuid_.type.value is None
assert isinstance(uuid, list)
if len(uuid) == 1:
final = uuid[0]
else:
final = referrer
return final
def get_row(self, vsctl_table, record_id):
table_name = vsctl_table.table_name
if ovsuuid.is_valid_string(record_id):
uuid = ovsuuid.from_string(record_id)
return self.idl.tables[table_name].rows.get(uuid)
else:
for vsctl_row_id in vsctl_table.row_ids:
ovsrec_row = self._get_row_by_id(table_name, vsctl_row_id,
record_id)
if ovsrec_row:
return ovsrec_row
return None
def must_get_row(self, vsctl_table, record_id):
ovsrec_row = self.get_row(vsctl_table, record_id)
if not ovsrec_row:
vsctl_fatal('no row "%s" in table %s' % (record_id,
vsctl_table.table_name))
return ovsrec_row
class _CmdShowTable(object):
def __init__(self, table, name_column, columns, recurse):
super(_CmdShowTable, self).__init__()
self.table = table
self.name_column = name_column
self.columns = columns
self.recurse = recurse
class _VSCtlRowID(object):
def __init__(self, table, name_column, uuid_column):
super(_VSCtlRowID, self).__init__()
self.table = table
self.name_column = name_column
self.uuid_column = uuid_column
class _VSCtlTable(object):
def __init__(self, table_name, vsctl_row_id_list):
super(_VSCtlTable, self).__init__()
self.table_name = table_name
self.row_ids = vsctl_row_id_list
class VSCtlCommand(StringifyMixin):
def __init__(self, command, args=None, options=None):
super(VSCtlCommand, self).__init__()
self.command = command
self.args = args or []
self.options = options or []
# Data modified by commands
self.result = None
# internally used by VSCtl
self._prerequisite = None
self._run = None
def has_option(self, option):
return option in self.options
class VSCtl(object):
def _reset(self):
self.schema_helper = None
self.ovs = None
self.txn = None
self.wait_for_reload = True
self.dry_run = False
def __init__(self, remote):
super(VSCtl, self).__init__()
self.remote = remote
self.schema_json = None
self.schema = None
self.schema_helper = None
self.ovs = None
self.txn = None
self.wait_for_reload = True
self.dry_run = False
def _rpc_get_schema_json(self, database):
LOG.debug('remote %s', self.remote)
error, stream_ = stream.Stream.open_block(
stream.Stream.open(self.remote))
if error:
vsctl_fatal('error %s' % os.strerror(error))
rpc = jsonrpc.Connection(stream_)
request = jsonrpc.Message.create_request('get_schema', [database])
error, reply = rpc.transact_block(request)
rpc.close()
if error:
vsctl_fatal(os.strerror(error))
elif reply.error:
vsctl_fatal('error %s' % reply.error)
return reply.result
def _init_schema_helper(self):
if self.schema_json is None:
self.schema_json = self._rpc_get_schema_json(
vswitch_idl.OVSREC_DB_NAME)
schema_helper = idl.SchemaHelper(None, self.schema_json)
schema_helper.register_all()
self.schema = schema_helper.get_idl_schema()
# LOG.debug('schema_json %s', schema_json)
self.schema_helper = idl.SchemaHelper(None, self.schema_json)
@staticmethod
def _idl_block(idl_):
poller = ovs.poller.Poller()
idl_.wait(poller)
poller.block()
@staticmethod
def _idl_wait(idl_, seqno):
while idl_.change_seqno == seqno and not idl_.run():
VSCtl._idl_block(idl_)
def _run_prerequisites(self, commands):
schema_helper = self.schema_helper
schema_helper.register_table(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH)
if self.wait_for_reload:
# LOG.debug('schema_helper._tables %s', schema_helper._tables)
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_CUR_CFG])
for command in commands:
if not command._prerequisite:
continue
ctx = VSCtlContext(None, None, None)
command._prerequisite(ctx, command)
ctx.done()
def _do_vsctl(self, idl_, commands):
self.txn = idl.Transaction(idl_)
if self.dry_run:
self.txn.dry_run = True
self.txn.add_comment('ovs-vsctl') # TODO:XXX add operation name. args
ovs_rows = idl_.tables[vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH].rows
if ovs_rows:
ovs_ = list(ovs_rows.values())[0]
else:
# XXX add verification that table is empty
ovs_ = self.txn.insert(
idl_.tables[vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH])
if self.wait_for_reload:
ovs_.increment(vswitch_idl.OVSREC_OPEN_VSWITCH_COL_NEXT_CFG)
# TODO:XXX
# symtab = ovsdb_symbol_table_create()
ctx = VSCtlContext(idl_, self.txn, ovs_)
for command in commands:
if not command._run:
continue
command._run(ctx, command)
if ctx.try_again:
return False
LOG.debug('result:\n%s', [command.result for command in commands])
ctx.done()
# TODO:XXX check if created symbols are really created, referenced.
status = self.txn.commit_block()
next_cfg = 0
if self.wait_for_reload and status == idl.Transaction.SUCCESS:
next_cfg = self.txn.get_increment_new_value()
# TODO:XXX
# if status in (idl.Transaction.UNCHANGED, idl.Transaction.SUCCESS):
# for command in commands:
# if not command.post_func:
# continue
# ctx = VSCtlContext(idl_, txn, self.ovs)
# command.post_func(ctx)
# ctx.done()
txn_ = self.txn
self.txn = None
if status in (idl.Transaction.UNCOMMITTED, idl.Transaction.INCOMPLETE):
not_reached()
elif status == idl.Transaction.ABORTED:
vsctl_fatal('transaction aborted')
elif status == idl.Transaction.UNCHANGED:
LOG.debug('unchanged')
elif status == idl.Transaction.SUCCESS:
LOG.debug('success')
elif status == idl.Transaction.TRY_AGAIN:
return False
elif status == idl.Transaction.ERROR:
vsctl_fatal('transaction error: %s' % txn_.get_error())
elif status == idl.Transaction.NOT_LOCKED:
vsctl_fatal('database not locked')
else:
not_reached()
if self.wait_for_reload and status != idl.Transaction.UNCHANGED:
while True:
idl_.run()
if ovs_.cur_cfg >= next_cfg:
break
self._idl_block(idl_)
return True
def _do_main(self, commands):
"""
:type commands: list of VSCtlCommand
"""
self._reset()
self._init_schema_helper()
self._run_prerequisites(commands)
idl_ = idl.Idl(self.remote, self.schema_helper)
seqno = idl_.change_seqno
while True:
self._idl_wait(idl_, seqno)
seqno = idl_.change_seqno
if self._do_vsctl(idl_, commands):
break
if self.txn:
self.txn.abort()
self.txn = None
# TODO:XXX
# ovsdb_symbol_table_destroy(symtab)
idl_.close()
def _run_command(self, commands):
"""
:type commands: list of VSCtlCommand
"""
all_commands = {
# Open vSwitch commands.
'init': (None, self._cmd_init),
'show': (self._pre_cmd_show, self._cmd_show),
# 'emer-reset':
# Bridge commands.
'add-br': (self._pre_add_br, self._cmd_add_br),
'del-br': (self._pre_get_info, self._cmd_del_br),
'list-br': (self._pre_get_info, self._cmd_list_br),
'br-exists': (self._pre_get_info, self._cmd_br_exists),
'br-to-vlan': (self._pre_get_info, self._cmd_br_to_vlan),
'br-to-parent': (self._pre_get_info, self._cmd_br_to_parent),
'br-set-external-id': (self._pre_cmd_br_set_external_id,
self._cmd_br_set_external_id),
'br-get-external-id': (self._pre_cmd_br_get_external_id,
self._cmd_br_get_external_id),
# Port. commands
'list-ports': (self._pre_get_info, self._cmd_list_ports),
'add-port': (self._pre_cmd_add_port, self._cmd_add_port),
'add-bond': (self._pre_cmd_add_bond, self._cmd_add_bond),
'del-port': (self._pre_get_info, self._cmd_del_port),
'port-to-br': (self._pre_get_info, self._cmd_port_to_br),
# Interface commands.
'list-ifaces': (self._pre_get_info, self._cmd_list_ifaces),
'iface-to-br': (self._pre_get_info, self._cmd_iface_to_br),
# Controller commands.
'get-controller': (self._pre_controller, self._cmd_get_controller),
'del-controller': (self._pre_controller, self._cmd_del_controller),
'set-controller': (self._pre_controller, self._cmd_set_controller),
'get-fail-mode': (self._pre_fail_mode, self._cmd_get_fail_mode),
'del-fail-mode': (self._pre_fail_mode, self._cmd_del_fail_mode),
'set-fail-mode': (self._pre_fail_mode, self._cmd_set_fail_mode),
# Manager commands.
# 'get-manager':
# 'del-manager':
# 'set-manager':
# SSL commands.
# 'get-ssl':
# 'del-ssl':
# 'set-ssl':
# Auto Attach commands.
# 'add-aa-mapping':
# 'del-aa-mapping':
# 'get-aa-mapping':
# Switch commands.
# 'emer-reset':
# Database commands.
'list': (self._pre_cmd_list, self._cmd_list),
'find': (self._pre_cmd_find, self._cmd_find),
'get': (self._pre_cmd_get, self._cmd_get),
'set': (self._pre_cmd_set, self._cmd_set),
'add': (self._pre_cmd_add, self._cmd_add),
'remove': (self._pre_cmd_remove, self._cmd_remove),
'clear': (self._pre_cmd_clear, self._cmd_clear),
# 'create':
# 'destroy':
# 'wait-until':
# Utility commands. (No corresponding command in ovs-vsctl)
'set-qos': (self._pre_cmd_set_qos, self._cmd_set_qos),
'set-queue': (self._pre_cmd_set_queue, self._cmd_set_queue),
'del-qos': (self._pre_get_info, self._cmd_del_qos),
# for quantum_adapter
'list-ifaces-verbose': (self._pre_cmd_list_ifaces_verbose,
self._cmd_list_ifaces_verbose),
}
for command in commands:
funcs = all_commands[command.command]
command._prerequisite, command._run = funcs
self._do_main(commands)
def run_command(self, commands, timeout_sec=None, exception=None):
if timeout_sec is None:
self._run_command(commands)
else:
with hub.Timeout(timeout_sec, exception):
self._run_command(commands)
# Open vSwitch commands:
def _cmd_init(self, _ctx, _command):
# nothing. Just check connection to ovsdb
pass
_CMD_SHOW_TABLES = [
_CmdShowTable(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH, None,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_MANAGER_OPTIONS,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_OVS_VERSION],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
[vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER,
vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
[vswitch_idl.OVSREC_PORT_COL_TAG,
vswitch_idl.OVSREC_PORT_COL_TRUNKS,
vswitch_idl.OVSREC_PORT_COL_INTERFACES],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_INTERFACE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE,
vswitch_idl.OVSREC_INTERFACE_COL_OPTIONS],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_CONTROLLER,
vswitch_idl.OVSREC_CONTROLLER_COL_TARGET,
[vswitch_idl.OVSREC_CONTROLLER_COL_IS_CONNECTED],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_MANAGER,
vswitch_idl.OVSREC_MANAGER_COL_TARGET,
[vswitch_idl.OVSREC_MANAGER_COL_IS_CONNECTED],
False),
]
def _pre_cmd_show(self, _ctx, _command):
schema_helper = self.schema_helper
for show in self._CMD_SHOW_TABLES:
schema_helper.register_table(show.table)
if show.name_column:
schema_helper.register_columns(show.table, [show.name_column])
schema_helper.register_columns(show.table, show.columns)
@staticmethod
def _cmd_show_find_table_by_row(row):
for show in VSCtl._CMD_SHOW_TABLES:
if show.table == row._table.name:
return show
return None
@staticmethod
def _cmd_show_find_table_by_name(name):
for show in VSCtl._CMD_SHOW_TABLES:
if show.table == name:
return show
return None
@staticmethod
def _cmd_show_row(ctx, row, level):
_INDENT_SIZE = 4 # # of spaces per indent
show = VSCtl._cmd_show_find_table_by_row(row)
output = ''
output += ' ' * level * _INDENT_SIZE
if show and show.name_column:
output += '%s ' % show.table
datum = getattr(row, show.name_column)
output += datum
else:
output += str(row.uuid)
output += '\n'
if not show or show.recurse:
return
show.recurse = True
for column in show.columns:
datum = row._data[column]
key = datum.type.key
if key.type == ovs.db.types.UuidType and key.ref_table_name:
ref_show = VSCtl._cmd_show_find_table_by_name(
key.ref_table_name)
if ref_show:
for atom in datum.values:
ref_row = ctx.idl.tables[ref_show.table].rows.get(
atom.value)
if ref_row:
VSCtl._cmd_show_row(ctx, ref_row, level + 1)
continue
if not datum.is_default():
output += ' ' * (level + 1) * _INDENT_SIZE
output += '%s: %s\n' % (column, datum)
show.recurse = False
return output
def _cmd_show(self, ctx, command):
for row in ctx.idl.tables[
self._CMD_SHOW_TABLES[0].table].rows.values():
output = self._cmd_show_row(ctx, row, 0)
command.result = output
# Bridge commands:
def _pre_get_info(self, _ctx, _command):
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER,
vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT,
[vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_FAKE_BRIDGE,
vswitch_idl.OVSREC_PORT_COL_TAG,
vswitch_idl.OVSREC_PORT_COL_INTERFACES,
vswitch_idl.OVSREC_PORT_COL_QOS])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_NAME])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QOS,
[vswitch_idl.OVSREC_QOS_COL_QUEUES])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QUEUE,
[])
def _cmd_list_br(self, ctx, command):
ctx.populate_cache()
command.result = sorted(ctx.bridges.keys())
def _pre_add_br(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE])
def _cmd_add_br(self, ctx, command):
br_name = command.args[0]
parent_name = None
vlan = 0
if len(command.args) == 1:
pass
elif len(command.args) == 3:
parent_name = command.args[1]
vlan = int(command.args[2])
if vlan < 0 or vlan > 4095:
vsctl_fatal("vlan must be between 0 and 4095 %d" % vlan)
else:
vsctl_fatal('this command takes exactly 1 or 3 argument')
ctx.add_bridge(br_name, parent_name, vlan)
def _del_br(self, ctx, br_name, must_exist=False):
ctx.populate_cache()
br = ctx.find_bridge(br_name, must_exist)
if br:
ctx.del_bridge(br)
def _cmd_del_br(self, ctx, command):
br_name = command.args[0]
self._del_br(ctx, br_name)
def _br_exists(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, must_exist=False)
return br is not None
def _cmd_br_exists(self, ctx, command):
br_name = command.args[0]
command.result = self._br_exists(ctx, br_name)
def _br_to_vlan(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, must_exist=True)
vlan = br.vlan
if isinstance(vlan, list):
if len(vlan) == 0:
vlan = 0
else:
vlan = vlan[0]
return vlan
def _cmd_br_to_vlan(self, ctx, command):
br_name = command.args[0]
command.result = self._br_to_vlan(ctx, br_name)
def _br_to_parent(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, must_exist=True)
return br if br.parent is None else br.parent
def _cmd_br_to_parent(self, ctx, command):
br_name = command.args[0]
command.result = self._br_to_parent(ctx, br_name)
def _pre_cmd_br_set_external_id(self, ctx, _command):
table_name = vswitch_idl.OVSREC_TABLE_BRIDGE
columns = [vswitch_idl.OVSREC_BRIDGE_COL_EXTERNAL_IDS]
self._pre_mod_columns(ctx, table_name, columns)
def _br_add_external_id(self, ctx, br_name, key, value):
table_name = vswitch_idl.OVSREC_TABLE_BRIDGE
column = vswitch_idl.OVSREC_BRIDGE_COL_EXTERNAL_IDS
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, br_name)
value_json = ['map', [[key, value]]]
ctx.add_column(ovsrec_row, column, value_json)
ctx.invalidate_cache()
def _br_clear_external_id(self, ctx, br_name, key):
table_name = vswitch_idl.OVSREC_TABLE_BRIDGE
column = vswitch_idl.OVSREC_BRIDGE_COL_EXTERNAL_IDS
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, br_name)
values = getattr(ovsrec_row, column, {})
values.pop(key, None)
setattr(ovsrec_row, column, values)
ctx.invalidate_cache()
def _cmd_br_set_external_id(self, ctx, command):
br_name = command.args[0]
key = command.args[1]
if len(command.args) > 2:
self._br_add_external_id(ctx, br_name, key, command.args[2])
else:
self._br_clear_external_id(ctx, br_name, key)
def _pre_cmd_br_get_external_id(self, ctx, _command):
table_name = vswitch_idl.OVSREC_TABLE_BRIDGE
columns = [vswitch_idl.OVSREC_BRIDGE_COL_EXTERNAL_IDS]
self._pre_get_columns(ctx, table_name, columns)
def _br_get_external_id_value(self, ctx, br_name, key):
external_id = self._br_get_external_id_list(ctx, br_name)
return external_id.get(key, None)
def _br_get_external_id_list(self, ctx, br_name):
table_name = vswitch_idl.OVSREC_TABLE_BRIDGE
column = vswitch_idl.OVSREC_BRIDGE_COL_EXTERNAL_IDS
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, br_name)
return ctx.get_column(ovsrec_row, column)
def _cmd_br_get_external_id(self, ctx, command):
br_name = command.args[0]
if len(command.args) > 1:
command.result = self._br_get_external_id_value(ctx, br_name,
command.args[1])
else:
command.result = self._br_get_external_id_list(ctx, br_name)
# Port commands:
def _list_ports(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
if br.br_cfg:
br.br_cfg.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
else:
br.parent.br_cfg.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
return [port.port_cfg.name for port in br.ports
if port.port_cfg.name != br.name]
def _cmd_list_ports(self, ctx, command):
br_name = command.args[0]
port_names = self._list_ports(ctx, br_name)
command.result = sorted(port_names)
def _pre_add_port(self, _ctx, columns):
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT,
[vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_BOND_FAKE_IFACE])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT, columns)
def _pre_cmd_add_port(self, ctx, command):
self._pre_get_info(ctx, command)
columns = [
ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)[0]
for setting in command.args[2:]]
self._pre_add_port(ctx, columns)
def _pre_cmd_add_bond(self, ctx, command):
self._pre_get_info(ctx, command)
if len(command.args) < 3:
vsctl_fatal('this command requires at least 3 arguments')
columns = [
ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)[0]
for setting in command.args[3:]]
self._pre_add_port(ctx, columns)
def _cmd_add_port(self, ctx, command):
may_exist = command.has_option('--may_exist')
br_name = command.args[0]
port_name = command.args[1]
iface_names = [command.args[1]]
settings = [
ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)
for setting in command.args[2:]]
ctx.add_port(br_name, port_name, may_exist,
False, iface_names, settings)
def _cmd_add_bond(self, ctx, command):
may_exist = command.has_option('--may_exist')
fake_iface = command.has_option('--fake-iface')
br_name = command.args[0]
port_name = command.args[1]
iface_names = list(command.args[2])
settings = [
ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)
for setting in command.args[3:]]
ctx.add_port(br_name, port_name, may_exist, fake_iface,
iface_names, settings)
def _del_port(self, ctx, br_name=None, target=None,
must_exist=False, with_iface=False):
assert target is not None
ctx.populate_cache()
if not with_iface:
vsctl_port = ctx.find_port(target, must_exist)
else:
vsctl_port = ctx.find_port(target, False)
if not vsctl_port:
vsctl_iface = ctx.find_iface(target, False)
if vsctl_iface:
vsctl_port = vsctl_iface.port()
if must_exist and not vsctl_port:
vsctl_fatal('no port or interface named %s' % target)
if not vsctl_port:
return
if not br_name:
vsctl_bridge = ctx.find_bridge(br_name, True)
if vsctl_port.bridge() != vsctl_bridge:
if vsctl_port.bridge().parent == vsctl_bridge:
vsctl_fatal('bridge %s does not have a port %s (although '
'its parent bridge %s does)' %
(br_name, target, vsctl_bridge.parent.name))
else:
vsctl_fatal('bridge %s does not have a port %s' %
(br_name, target))
ctx.del_port(vsctl_port)
def _cmd_del_port(self, ctx, command):
must_exist = command.has_option('--must-exist')
with_iface = command.has_option('--with-iface')
target = command.args[-1]
br_name = command.args[0] if len(command.args) == 2 else None
self._del_port(ctx, br_name, target, must_exist, with_iface)
def _port_to_br(self, ctx, port_name):
ctx.populate_cache()
port = ctx.find_port(port_name, True)
bridge = port.bridge()
if bridge is None:
vsctl_fatal('Bridge associated to port "%s" does not exist' %
port_name)
return bridge.name
def _cmd_port_to_br(self, ctx, command):
iface_name = command.args[0]
command.result = self._iface_to_br(ctx, iface_name)
# Interface commands:
def _list_ifaces(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
ctx.verify_ports()
iface_names = set()
for vsctl_port in br.ports:
for vsctl_iface in vsctl_port.ifaces:
iface_name = vsctl_iface.iface_cfg.name
if iface_name != br_name:
iface_names.add(iface_name)
return iface_names
def _cmd_list_ifaces(self, ctx, command):
br_name = command.args[0]
iface_names = self._list_ifaces(ctx, br_name)
command.result = sorted(iface_names)
def _iface_to_br(self, ctx, iface_name):
ctx.populate_cache()
iface = ctx.find_iface(iface_name, True)
port = iface.port()
if port is None:
vsctl_fatal('Port associated to iface "%s" does not exist' %
iface_name)
bridge = port.bridge()
if bridge is None:
vsctl_fatal('Bridge associated to iface "%s" does not exist' %
iface_name)
return bridge.name
def _cmd_iface_to_br(self, ctx, command):
iface_name = command.args[0]
command.result = self._iface_to_br(ctx, iface_name)
# Utility commands for quantum_adapter:
def _pre_cmd_list_ifaces_verbose(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_DATAPATH_ID])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
vswitch_idl.OVSREC_INTERFACE_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_INTERFACE_COL_OPTIONS,
vswitch_idl.OVSREC_INTERFACE_COL_OFPORT])
@staticmethod
def _iface_to_dict(iface_cfg):
_ATTRIBUTE = ['name', 'ofport', 'type', 'external_ids', 'options']
attr = dict((key, getattr(iface_cfg, key)) for key in _ATTRIBUTE)
if attr['ofport']:
attr['ofport'] = attr['ofport'][0]
return attr
def _list_ifaces_verbose(self, ctx, datapath_id, port_name):
ctx.populate_cache()
br = ctx.find_bridge_by_id(datapath_id, True)
ctx.verify_ports()
iface_cfgs = []
if port_name is None:
for vsctl_port in br.ports:
iface_cfgs.extend(self._iface_to_dict(vsctl_iface.iface_cfg)
for vsctl_iface in vsctl_port.ifaces)
else:
# When port is created, ofport column might be None.
# So try with port name if it happended
for vsctl_port in br.ports:
iface_cfgs.extend(
self._iface_to_dict(vsctl_iface.iface_cfg)
for vsctl_iface in vsctl_port.ifaces
if vsctl_iface.iface_cfg.name == port_name)
return iface_cfgs
def _cmd_list_ifaces_verbose(self, ctx, command):
datapath_id = command.args[0]
port_name = None
if len(command.args) >= 2:
port_name = command.args[1]
LOG.debug('command.args %s', command.args)
iface_cfgs = self._list_ifaces_verbose(ctx, datapath_id, port_name)
command.result = sorted(iface_cfgs)
# Controller commands:
def _verify_controllers(self, ovsrec_bridge):
ovsrec_bridge.verify(vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER)
for controller in ovsrec_bridge.controller:
controller.verify(vswitch_idl.OVSREC_CONTROLLER_COL_TARGET)
def _pre_controller(self, ctx, command):
self._pre_get_info(ctx, command)
self.schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_CONTROLLER,
[vswitch_idl.OVSREC_CONTROLLER_COL_TARGET])
def _get_controller(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
self._verify_controllers(br.br_cfg)
return set(controller.target for controller in br.br_cfg.controller)
def _cmd_get_controller(self, ctx, command):
br_name = command.args[0]
controller_names = self._get_controller(ctx, br_name)
command.result = sorted(controller_names)
def _delete_controllers(self, ovsrec_controllers):
for controller in ovsrec_controllers:
controller.delete()
def _del_controller(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_real_bridge(br_name, True)
ovsrec_bridge = br.br_cfg
self._verify_controllers(ovsrec_bridge)
if ovsrec_bridge.controller:
self._delete_controllers(ovsrec_bridge.controller)
ovsrec_bridge.controller = []
def _cmd_del_controller(self, ctx, command):
br_name = command.args[0]
self._del_controller(ctx, br_name)
def _insert_controllers(self, controller_names):
ovsrec_controllers = []
for name in controller_names:
# TODO: check if the name startswith() supported protocols
ovsrec_controller = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_CONTROLLER])
ovsrec_controller.target = name
ovsrec_controllers.append(ovsrec_controller)
return ovsrec_controllers
def _insert_qos(self):
ovsrec_qos = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QOS])
return ovsrec_qos
def _set_controller(self, ctx, br_name, controller_names):
ctx.populate_cache()
ovsrec_bridge = ctx.find_real_bridge(br_name, True).br_cfg
self._verify_controllers(ovsrec_bridge)
self._delete_controllers(ovsrec_bridge.controller)
controllers = self._insert_controllers(controller_names)
ovsrec_bridge.controller = controllers
def _cmd_set_controller(self, ctx, command):
br_name = command.args[0]
controller_names = command.args[1:]
self._set_controller(ctx, br_name, controller_names)
def _pre_fail_mode(self, ctx, command):
self._pre_get_info(ctx, command)
self.schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE])
def _get_fail_mode(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
# Note: Returns first element of fail_mode column
return getattr(br.br_cfg, vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE)[0]
def _cmd_get_fail_mode(self, ctx, command):
br_name = command.args[0]
command.result = self._get_fail_mode(ctx, br_name)
def _del_fail_mode(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
# Note: assuming that [] means empty
setattr(br.br_cfg, vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE, [])
ctx.invalidate_cache()
def _cmd_del_fail_mode(self, ctx, command):
br_name = command.args[0]
self._del_fail_mode(ctx, br_name)
def _set_fail_mode(self, ctx, br_name, mode):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
setattr(br.br_cfg, vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE, mode)
ctx.invalidate_cache()
def _cmd_set_fail_mode(self, ctx, command):
br_name = command.args[0]
mode = command.args[1]
if mode not in ('standalone', 'secure'):
vsctl_fatal('fail-mode must be "standalone" or "secure"')
self._set_fail_mode(ctx, br_name, mode)
# Utility commands:
def _del_qos(self, ctx, port_name):
assert port_name is not None
ctx.populate_cache()
vsctl_port = ctx.find_port(port_name, True)
vsctl_qos = vsctl_port.qos
ctx.del_qos(vsctl_qos)
def _cmd_del_qos(self, ctx, command):
port_name = command.args[0]
self._del_qos(ctx, port_name)
def _set_qos(self, ctx, port_name, type, max_rate):
ctx.populate_cache()
vsctl_port = ctx.find_port(port_name, True)
ovsrec_qos = ctx.set_qos(vsctl_port, type, max_rate)
return ovsrec_qos
def _cmd_set_qos(self, ctx, command):
port_name = command.args[0]
type = command.args[1]
max_rate = command.args[2]
result = self._set_qos(ctx, port_name, type, max_rate)
command.result = [result]
def _pre_cmd_set_qos(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QOS,
[vswitch_idl.OVSREC_QOS_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_QOS_COL_OTHER_CONFIG,
vswitch_idl.OVSREC_QOS_COL_QUEUES,
vswitch_idl.OVSREC_QOS_COL_TYPE])
def _cmd_set_queue(self, ctx, command):
ctx.populate_cache()
port_name = command.args[0]
queues = command.args[1]
vsctl_port = ctx.find_port(port_name, True)
vsctl_qos = vsctl_port.qos
queue_id = 0
results = []
for queue in queues:
max_rate = queue.get('max-rate', None)
min_rate = queue.get('min-rate', None)
ovsrec_queue = ctx.set_queue(
vsctl_qos, max_rate, min_rate, queue_id)
results.append(ovsrec_queue)
queue_id += 1
command.result = results
def _pre_cmd_set_queue(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QUEUE,
[vswitch_idl.OVSREC_QUEUE_COL_DSCP,
vswitch_idl.OVSREC_QUEUE_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_QUEUE_COL_OTHER_CONFIG])
# Database commands:
_TABLES = [
_VSCtlTable(vswitch_idl.OVSREC_TABLE_BRIDGE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_CONTROLLER,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_INTERFACE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_INTERFACE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_MIRROR,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_MIRROR,
vswitch_idl.OVSREC_MIRROR_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_MANAGER,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_MANAGER,
vswitch_idl.OVSREC_MANAGER_COL_TARGET,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_NETFLOW,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_NETFLOW)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
None,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_PORT,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_QOS,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_QOS)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_QUEUE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_QOS,
None,
vswitch_idl.OVSREC_QOS_COL_QUEUES)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_SSL,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
None,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_SSL)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_SFLOW,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_SFLOW)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_FLOW_TABLE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_FLOW_TABLE,
vswitch_idl.OVSREC_FLOW_TABLE_COL_NAME,
None)]),
]
@staticmethod
def _score_partial_match(name, s):
_MAX_SCORE = 0xffffffff
assert len(name) < _MAX_SCORE
s = s[:_MAX_SCORE - 1] # in practice, this doesn't matter
if name == s:
return _MAX_SCORE
name = name.lower().replace('-', '_')
s = s.lower().replace('-', '_')
if s.startswith(name):
return _MAX_SCORE - 1
if name.startswith(s):
return len(s)
return 0
@staticmethod
def _get_table(table_name):
best_match = None
best_score = 0
for table in VSCtl._TABLES:
score = VSCtl._score_partial_match(table.table_name, table_name)
if score > best_score:
best_match = table
best_score = score
elif score == best_score:
best_match = None
if best_match:
return best_match
elif best_score:
vsctl_fatal('multiple table names match "%s"' % table_name)
else:
vsctl_fatal('unknown table "%s"' % table_name)
def _pre_get_table(self, _ctx, table_name):
vsctl_table = self._get_table(table_name)
schema_helper = self.schema_helper
schema_helper.register_table(vsctl_table.table_name)
for row_id in vsctl_table.row_ids:
if row_id.table:
schema_helper.register_table(row_id.table)
if row_id.name_column:
schema_helper.register_columns(row_id.table,
[row_id.name_column])
if row_id.uuid_column:
schema_helper.register_columns(row_id.table,
[row_id.uuid_column])
return vsctl_table
def _get_column(self, table_name, column_name):
best_match = None
best_score = 0
columns = self.schema.tables[table_name].columns.keys()
for column in columns:
score = VSCtl._score_partial_match(column, column_name)
if score > best_score:
best_match = column
best_score = score
elif score == best_score:
best_match = None
if best_match:
# ovs.db.schema_helper._keep_table_columns() requires that
# column_name is type of str. Not unicode string
return str(best_match)
elif best_score:
vsctl_fatal('%s contains more than one column whose name '
'matches "%s"' % (table_name, column_name))
else:
vsctl_fatal('%s does not contain a column whose name matches '
'"%s"' % (table_name, column_name))
def _pre_get_column(self, _ctx, table_name, column):
column_name = self._get_column(table_name, column)
self.schema_helper.register_columns(table_name, [column_name])
def _pre_get_columns(self, ctx, table_name, columns):
self._pre_get_table(ctx, table_name)
for column in columns:
self._pre_get_column(ctx, table_name, column)
def _pre_cmd_list(self, ctx, command):
table_name = command.args[0]
self._pre_get_table(ctx, table_name)
def _list(self, ctx, table_name, record_id=None):
result = []
for ovsrec_row in ctx.idl.tables[table_name].rows.values():
if record_id is not None and ovsrec_row.name != record_id:
continue
result.append(ovsrec_row)
return result
def _cmd_list(self, ctx, command):
table_name = command.args[0]
record_id = None
if len(command.args) > 1:
record_id = command.args[1]
command.result = self._list(ctx, table_name, record_id)
def _pre_cmd_find(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [
ctx.parse_column_key_value(table_schema, column_key_value)[0]
for column_key_value in command.args[1:]]
self._pre_get_columns(ctx, table_name, columns)
def _check_value(self, ovsrec_row, column_value):
"""
:type column_value: tuple of column and value_json
"""
column, value_json = column_value
column_schema = ovsrec_row._table.columns[column]
value = ovs.db.data.Datum.from_json(
column_schema.type, value_json).to_python(ovs.db.idl._uuid_to_row)
datum = getattr(ovsrec_row, column)
if column_schema.type.is_map():
for k, v in value.items():
if k in datum and datum[k] == v:
return True
elif datum == value:
return True
return False
def _find(self, ctx, table_name, column_values):
"""
:type column_values: list of (column, value_json)
"""
result = []
for ovsrec_row in ctx.idl.tables[table_name].rows.values():
LOG.debug('ovsrec_row %s', ovsrec_row_to_string(ovsrec_row))
if all(self._check_value(ovsrec_row, column_value)
for column_value in column_values):
result.append(ovsrec_row)
return result
def _cmd_find(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
column_values = [
ctx.parse_column_key_value(table_schema, column_key_value)
for column_key_value in command.args[1:]]
command.result = self._find(ctx, table_name, column_values)
def _pre_cmd_get(self, ctx, command):
table_name = command.args[0]
columns = [
ctx.parse_column_key(column_key)[0]
for column_key in command.args[2:]]
self._pre_get_columns(ctx, table_name, columns)
def _get(self, ctx, table_name, record_id, column_keys,
id_=None, if_exists=False):
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
# TODO: Support symbol name
# if id_:
# symbol, new = ctx.create_symbol(id_)
# if not new:
# vsctl_fatal('row id "%s" specified on "get" command was '
# 'used before it was defined' % id_)
# symbol.uuid = row.uuid
# symbol.strong_ref = True
result = []
for column, key in column_keys:
result.append(ctx.get_column(ovsrec_row, column, key, if_exists))
return result
def _cmd_get(self, ctx, command):
id_ = None # TODO: Support --id option
if_exists = command.has_option('--if-exists')
table_name = command.args[0]
record_id = command.args[1]
column_keys = [
ctx.parse_column_key(column_key)
for column_key in command.args[2:]]
command.result = self._get(
ctx, table_name, record_id, column_keys, id_, if_exists)
def _check_mutable(self, table_name, column):
column_schema = self.schema.tables[table_name].columns[column]
if not column_schema.mutable:
vsctl_fatal('cannot modify read-only column %s in table %s' %
(column, table_name))
def _pre_mod_columns(self, ctx, table_name, columns):
self._pre_get_table(ctx, table_name)
for column in columns:
self._pre_get_column(ctx, table_name, column)
self._check_mutable(table_name, column)
def _pre_cmd_set(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [
ctx.parse_column_key_value(table_schema, column_key_value)[0]
for column_key_value in command.args[2:]]
self._pre_mod_columns(ctx, table_name, columns)
def _set(self, ctx, table_name, record_id, column_values):
"""
:type column_values: list of (column, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, value in column_values:
ctx.set_column(ovsrec_row, column, value)
ctx.invalidate_cache()
def _cmd_set(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
# column_key_value: <column>[:<key>]=<value>
table_schema = self.schema.tables[table_name]
column_values = [
ctx.parse_column_key_value(table_schema, column_key_value)
for column_key_value in command.args[2:]]
self._set(ctx, table_name, record_id, column_values)
def _pre_cmd_add(self, ctx, command):
table_name = command.args[0]
columns = [command.args[2]]
self._pre_mod_columns(ctx, table_name, columns)
def _add(self, ctx, table_name, record_id, column_values):
"""
:type column_values: list of (column, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, value in column_values:
ctx.add_column(ovsrec_row, column, value)
ctx.invalidate_cache()
def _cmd_add(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
column = command.args[2]
column_key_value_strings = []
for value in command.args[3:]:
if '=' in value:
# construct <column>:<key>=value
column_key_value_strings.append('%s:%s' % (column, value))
else:
# construct <column>=value
column_key_value_strings.append('%s=%s' % (column, value))
table_schema = self.schema.tables[table_name]
column_values = [
ctx.parse_column_key_value(table_schema, column_key_value_string)
for column_key_value_string in column_key_value_strings]
self._add(ctx, table_name, record_id, column_values)
def _pre_cmd_remove(self, ctx, command):
table_name = command.args[0]
columns = [command.args[2]]
self._pre_mod_columns(ctx, table_name, columns)
def _remove(self, ctx, table_name, record_id, column_values):
"""
:type column_values: list of (column, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, value in column_values:
ctx.remove_column(ovsrec_row, column, value)
ctx.invalidate_cache()
def _cmd_remove(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
column = command.args[2]
column_key_value_strings = []
for value in command.args[3:]:
if '=' in value:
# construct <column>:<key>=value
column_key_value_strings.append('%s:%s' % (column, value))
else:
# construct <column>=value
column_key_value_strings.append('%s=%s' % (column, value))
table_schema = self.schema.tables[table_name]
column_values = [
ctx.parse_column_key_value(table_schema, column_key_value_string)
for column_key_value_string in column_key_value_strings]
self._remove(ctx, table_name, record_id, column_values)
def _pre_cmd_clear(self, ctx, command):
table_name = command.args[0]
column = command.args[2]
self._pre_mod_columns(ctx, table_name, [column])
def _clear(self, ctx, table_name, record_id, column):
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
column_schema = ctx.idl.tables[table_name].columns[column]
if column_schema.type.n_min > 0:
vsctl_fatal('"clear" operation cannot be applied to column %s '
'of table %s, which is not allowed to be empty' %
(column, table_name))
# assuming that default datum is empty.
default_datum = ovs.db.data.Datum.default(column_schema.type)
setattr(ovsrec_row, column,
default_datum.to_python(ovs.db.idl._uuid_to_row))
ctx.invalidate_cache()
def _cmd_clear(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
column = command.args[2]
self._clear(ctx, table_name, record_id, column)
#
# Create constants from ovs db schema
#
def schema_print(schema_location, prefix):
prefix = prefix.upper()
json = ovs.json.from_file(schema_location)
schema = ovs.db.schema.DbSchema.from_json(json)
print('# Do NOT edit.')
print('# This is automatically generated by %s' % __file__)
print('# created based on version %s' % (schema.version or 'unknown'))
print('')
print('')
print('%s_DB_NAME = \'%s\'' % (prefix, schema.name))
for table in sorted(schema.tables.values(),
key=operator.attrgetter('name')):
print('')
print('%s_TABLE_%s = \'%s\'' % (prefix,
table.name.upper(), table.name))
for column in sorted(table.columns.values(),
key=operator.attrgetter('name')):
print('%s_%s_COL_%s = \'%s\'' % (prefix, table.name.upper(),
column.name.upper(),
column.name))
def main():
if len(sys.argv) <= 2:
print('Usage: %s <schema file>' % sys.argv[0])
print('e.g.: %s vswitchd/vswitch.ovsschema' % sys.argv[0])
location = sys.argv[1]
prefix = 'OVSREC'
schema_print(location, prefix)
if __name__ == '__main__':
main()
|
PypiClean
|
/miso2-3.0.6.tar.gz/miso2-3.0.6/miso/training/adaptive_learning_rate.py
|
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
import tensorflow.keras.backend as K
from miso.utils.rolling_buffer import RollingBuffer
import math
import time
def graph_to_console(epoch, batch, acc, loss, val_acc, val_loss, lr_prob, lr_prob_active, time_difference):
acc_i = round(acc * 50)
val_acc_i = round(val_acc * 50)
lr_prob_i = round(lr_prob * 50)
for j in range(51):
if j == acc_i:
print('#', end="")
# elif j == trainsdi:
# print('@', end="")
elif j == val_acc_i:
print('*', end="")
elif j == lr_prob_i and j != 100 and lr_prob_active:
print('+', end="")
elif j % 10 == 0:
print('|', end="")
else:
print(' ', end="")
# msg = " {} #T {:.1f}%/{:.4f}, *V {:.1f}%/{:.4f} ({:.2f}s)"
# print(msg.format(epoch, acc * 100, loss, val_acc * 100, val_loss, time_difference))
msg = " {} #T{:.1f}%/*V{:.1f}% ({:.2f}s)"
print(msg.format(epoch, acc * 100, val_acc * 100, time_difference))
class AdaptiveLearningRateScheduler(Callback):
"""
Adaptive learning rate scheduler
Decreases learning rate by a certain factor each time it is no longer improving
"""
def __init__(self, drop_rate=0.5, nb_drops=4, nb_epochs=10, verbose=1, monitor='loss'):
super(AdaptiveLearningRateScheduler, self).__init__()
self.monitor = monitor
self.drop_rate = drop_rate
self.nb_drops = nb_drops
self.nb_epochs = nb_epochs
self.verbose = verbose
self.current_epoch = 0
self.current_batch = 0
self.drop_count = 0
self.buffer = None
self.previous_time = None
self.finished = False
def on_train_begin(self, logs=None):
# if 'batch_size' in self.params and self.params['batch_size'] is not None:
# batch_size = self.params['batch_size']
# samples = self.params['samples']
# self.buffer = RollingBuffer(math.ceil(samples * self.nb_epochs / batch_size))
# else:
self.buffer = RollingBuffer(self.nb_epochs)
self.previous_time = time.time()
def on_epoch_begin(self, epoch, logs=None):
self.current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
loss = logs.get("loss")
acc = logs.get("acc") or logs.get('accuracy') or logs.get('iou_score') or logs.get('cosine_proximity')
val_loss = logs.get("val_loss") or 0
val_acc = logs.get("val_acc") or logs.get('val_accuracy') or logs.get('val_iou_score') or logs.get(
'val_cosine_proximity') or 0
if 'cosine_proximity' in logs:
acc += 1
val_acc += 1
# Elapsed time
current_time = time.time()
if self.previous_time is None:
time_difference = 0
else:
time_difference = current_time - self.previous_time
self.previous_time = current_time
# Update learning rate
self.update_learning_rate(self.current_epoch, logs)
# Graph to console
if val_acc is not None:
print("\r", end="")
graph_to_console(self.current_epoch, self.current_batch,
acc, loss, val_acc, val_loss,
self.buffer.slope_probability_less_than(0), self.buffer.full(),
time_difference)
if self.finished is True:
self.model.stop_training = True
print("Training finished".format(self.model.optimizer.lr))
def on_batch_end(self, batch, logs=None):
self.current_batch += 1
def update_learning_rate(self, count, logs):
monitor_value = logs.get(self.monitor)
self.buffer.append(monitor_value)
if count >= self.buffer.length() * 3 and self.buffer.full() and self.finished is False:
# if count % 20 == 19:
# lr = float(K.get_value(self.model.optimizer.lr))
# new_lr = lr * self.drop_rate
# K.set_value(self.model.optimizer.lr, new_lr)
# print("Learning rate dropped ({}/{}) to {}".format(self.drop_count, self.nb_drops, new_lr))
if self.buffer.slope_probability_less_than(0) < 0.50:
lr = float(K.get_value(self.model.optimizer.lr))
# lr = self.model.optimizer.lr.read_value()
new_lr = lr * self.drop_rate
K.set_value(self.model.optimizer.lr, new_lr)
# self.model.optimizer.lr.assign(new_lr)
self.buffer.clear()
self.drop_count += 1
if self.drop_count == self.nb_drops:
self.finished = True
return
if self.verbose == 1:
print("Learning rate dropped ({}/{}) to {}".format(self.drop_count, self.nb_drops, new_lr))
|
PypiClean
|
/pdfminer.six-20221105-py3-none-any.whl/pdfminer/glyphlist.py
|
def convert_glyphlist(path: str) -> None:
"""Convert a glyph list into a python representation.
See output below.
"""
state = 0
with open(path, "r") as fileinput:
for line in fileinput.readlines():
line = line.strip()
if not line or line.startswith("#"):
if state == 1:
state = 2
print("}\n")
print(line)
continue
if state == 0:
print("\nglyphname2unicode = {")
state = 1
(name, x) = line.split(";")
codes = x.split(" ")
print(
" {!r}: u'{}',".format(name, "".join("\\u%s" % code for code in codes))
)
glyphname2unicode = {
"A": "\u0041",
"AE": "\u00C6",
"AEacute": "\u01FC",
"AEmacron": "\u01E2",
"AEsmall": "\uF7E6",
"Aacute": "\u00C1",
"Aacutesmall": "\uF7E1",
"Abreve": "\u0102",
"Abreveacute": "\u1EAE",
"Abrevecyrillic": "\u04D0",
"Abrevedotbelow": "\u1EB6",
"Abrevegrave": "\u1EB0",
"Abrevehookabove": "\u1EB2",
"Abrevetilde": "\u1EB4",
"Acaron": "\u01CD",
"Acircle": "\u24B6",
"Acircumflex": "\u00C2",
"Acircumflexacute": "\u1EA4",
"Acircumflexdotbelow": "\u1EAC",
"Acircumflexgrave": "\u1EA6",
"Acircumflexhookabove": "\u1EA8",
"Acircumflexsmall": "\uF7E2",
"Acircumflextilde": "\u1EAA",
"Acute": "\uF6C9",
"Acutesmall": "\uF7B4",
"Acyrillic": "\u0410",
"Adblgrave": "\u0200",
"Adieresis": "\u00C4",
"Adieresiscyrillic": "\u04D2",
"Adieresismacron": "\u01DE",
"Adieresissmall": "\uF7E4",
"Adotbelow": "\u1EA0",
"Adotmacron": "\u01E0",
"Agrave": "\u00C0",
"Agravesmall": "\uF7E0",
"Ahookabove": "\u1EA2",
"Aiecyrillic": "\u04D4",
"Ainvertedbreve": "\u0202",
"Alpha": "\u0391",
"Alphatonos": "\u0386",
"Amacron": "\u0100",
"Amonospace": "\uFF21",
"Aogonek": "\u0104",
"Aring": "\u00C5",
"Aringacute": "\u01FA",
"Aringbelow": "\u1E00",
"Aringsmall": "\uF7E5",
"Asmall": "\uF761",
"Atilde": "\u00C3",
"Atildesmall": "\uF7E3",
"Aybarmenian": "\u0531",
"B": "\u0042",
"Bcircle": "\u24B7",
"Bdotaccent": "\u1E02",
"Bdotbelow": "\u1E04",
"Becyrillic": "\u0411",
"Benarmenian": "\u0532",
"Beta": "\u0392",
"Bhook": "\u0181",
"Blinebelow": "\u1E06",
"Bmonospace": "\uFF22",
"Brevesmall": "\uF6F4",
"Bsmall": "\uF762",
"Btopbar": "\u0182",
"C": "\u0043",
"Caarmenian": "\u053E",
"Cacute": "\u0106",
"Caron": "\uF6CA",
"Caronsmall": "\uF6F5",
"Ccaron": "\u010C",
"Ccedilla": "\u00C7",
"Ccedillaacute": "\u1E08",
"Ccedillasmall": "\uF7E7",
"Ccircle": "\u24B8",
"Ccircumflex": "\u0108",
"Cdot": "\u010A",
"Cdotaccent": "\u010A",
"Cedillasmall": "\uF7B8",
"Chaarmenian": "\u0549",
"Cheabkhasiancyrillic": "\u04BC",
"Checyrillic": "\u0427",
"Chedescenderabkhasiancyrillic": "\u04BE",
"Chedescendercyrillic": "\u04B6",
"Chedieresiscyrillic": "\u04F4",
"Cheharmenian": "\u0543",
"Chekhakassiancyrillic": "\u04CB",
"Cheverticalstrokecyrillic": "\u04B8",
"Chi": "\u03A7",
"Chook": "\u0187",
"Circumflexsmall": "\uF6F6",
"Cmonospace": "\uFF23",
"Coarmenian": "\u0551",
"Csmall": "\uF763",
"D": "\u0044",
"DZ": "\u01F1",
"DZcaron": "\u01C4",
"Daarmenian": "\u0534",
"Dafrican": "\u0189",
"Dcaron": "\u010E",
"Dcedilla": "\u1E10",
"Dcircle": "\u24B9",
"Dcircumflexbelow": "\u1E12",
"Dcroat": "\u0110",
"Ddotaccent": "\u1E0A",
"Ddotbelow": "\u1E0C",
"Decyrillic": "\u0414",
"Deicoptic": "\u03EE",
"Delta": "\u2206",
"Deltagreek": "\u0394",
"Dhook": "\u018A",
"Dieresis": "\uF6CB",
"DieresisAcute": "\uF6CC",
"DieresisGrave": "\uF6CD",
"Dieresissmall": "\uF7A8",
"Digammagreek": "\u03DC",
"Djecyrillic": "\u0402",
"Dlinebelow": "\u1E0E",
"Dmonospace": "\uFF24",
"Dotaccentsmall": "\uF6F7",
"Dslash": "\u0110",
"Dsmall": "\uF764",
"Dtopbar": "\u018B",
"Dz": "\u01F2",
"Dzcaron": "\u01C5",
"Dzeabkhasiancyrillic": "\u04E0",
"Dzecyrillic": "\u0405",
"Dzhecyrillic": "\u040F",
"E": "\u0045",
"Eacute": "\u00C9",
"Eacutesmall": "\uF7E9",
"Ebreve": "\u0114",
"Ecaron": "\u011A",
"Ecedillabreve": "\u1E1C",
"Echarmenian": "\u0535",
"Ecircle": "\u24BA",
"Ecircumflex": "\u00CA",
"Ecircumflexacute": "\u1EBE",
"Ecircumflexbelow": "\u1E18",
"Ecircumflexdotbelow": "\u1EC6",
"Ecircumflexgrave": "\u1EC0",
"Ecircumflexhookabove": "\u1EC2",
"Ecircumflexsmall": "\uF7EA",
"Ecircumflextilde": "\u1EC4",
"Ecyrillic": "\u0404",
"Edblgrave": "\u0204",
"Edieresis": "\u00CB",
"Edieresissmall": "\uF7EB",
"Edot": "\u0116",
"Edotaccent": "\u0116",
"Edotbelow": "\u1EB8",
"Efcyrillic": "\u0424",
"Egrave": "\u00C8",
"Egravesmall": "\uF7E8",
"Eharmenian": "\u0537",
"Ehookabove": "\u1EBA",
"Eightroman": "\u2167",
"Einvertedbreve": "\u0206",
"Eiotifiedcyrillic": "\u0464",
"Elcyrillic": "\u041B",
"Elevenroman": "\u216A",
"Emacron": "\u0112",
"Emacronacute": "\u1E16",
"Emacrongrave": "\u1E14",
"Emcyrillic": "\u041C",
"Emonospace": "\uFF25",
"Encyrillic": "\u041D",
"Endescendercyrillic": "\u04A2",
"Eng": "\u014A",
"Enghecyrillic": "\u04A4",
"Enhookcyrillic": "\u04C7",
"Eogonek": "\u0118",
"Eopen": "\u0190",
"Epsilon": "\u0395",
"Epsilontonos": "\u0388",
"Ercyrillic": "\u0420",
"Ereversed": "\u018E",
"Ereversedcyrillic": "\u042D",
"Escyrillic": "\u0421",
"Esdescendercyrillic": "\u04AA",
"Esh": "\u01A9",
"Esmall": "\uF765",
"Eta": "\u0397",
"Etarmenian": "\u0538",
"Etatonos": "\u0389",
"Eth": "\u00D0",
"Ethsmall": "\uF7F0",
"Etilde": "\u1EBC",
"Etildebelow": "\u1E1A",
"Euro": "\u20AC",
"Ezh": "\u01B7",
"Ezhcaron": "\u01EE",
"Ezhreversed": "\u01B8",
"F": "\u0046",
"Fcircle": "\u24BB",
"Fdotaccent": "\u1E1E",
"Feharmenian": "\u0556",
"Feicoptic": "\u03E4",
"Fhook": "\u0191",
"Fitacyrillic": "\u0472",
"Fiveroman": "\u2164",
"Fmonospace": "\uFF26",
"Fourroman": "\u2163",
"Fsmall": "\uF766",
"G": "\u0047",
"GBsquare": "\u3387",
"Gacute": "\u01F4",
"Gamma": "\u0393",
"Gammaafrican": "\u0194",
"Gangiacoptic": "\u03EA",
"Gbreve": "\u011E",
"Gcaron": "\u01E6",
"Gcedilla": "\u0122",
"Gcircle": "\u24BC",
"Gcircumflex": "\u011C",
"Gcommaaccent": "\u0122",
"Gdot": "\u0120",
"Gdotaccent": "\u0120",
"Gecyrillic": "\u0413",
"Ghadarmenian": "\u0542",
"Ghemiddlehookcyrillic": "\u0494",
"Ghestrokecyrillic": "\u0492",
"Gheupturncyrillic": "\u0490",
"Ghook": "\u0193",
"Gimarmenian": "\u0533",
"Gjecyrillic": "\u0403",
"Gmacron": "\u1E20",
"Gmonospace": "\uFF27",
"Grave": "\uF6CE",
"Gravesmall": "\uF760",
"Gsmall": "\uF767",
"Gsmallhook": "\u029B",
"Gstroke": "\u01E4",
"H": "\u0048",
"H18533": "\u25CF",
"H18543": "\u25AA",
"H18551": "\u25AB",
"H22073": "\u25A1",
"HPsquare": "\u33CB",
"Haabkhasiancyrillic": "\u04A8",
"Hadescendercyrillic": "\u04B2",
"Hardsigncyrillic": "\u042A",
"Hbar": "\u0126",
"Hbrevebelow": "\u1E2A",
"Hcedilla": "\u1E28",
"Hcircle": "\u24BD",
"Hcircumflex": "\u0124",
"Hdieresis": "\u1E26",
"Hdotaccent": "\u1E22",
"Hdotbelow": "\u1E24",
"Hmonospace": "\uFF28",
"Hoarmenian": "\u0540",
"Horicoptic": "\u03E8",
"Hsmall": "\uF768",
"Hungarumlaut": "\uF6CF",
"Hungarumlautsmall": "\uF6F8",
"Hzsquare": "\u3390",
"I": "\u0049",
"IAcyrillic": "\u042F",
"IJ": "\u0132",
"IUcyrillic": "\u042E",
"Iacute": "\u00CD",
"Iacutesmall": "\uF7ED",
"Ibreve": "\u012C",
"Icaron": "\u01CF",
"Icircle": "\u24BE",
"Icircumflex": "\u00CE",
"Icircumflexsmall": "\uF7EE",
"Icyrillic": "\u0406",
"Idblgrave": "\u0208",
"Idieresis": "\u00CF",
"Idieresisacute": "\u1E2E",
"Idieresiscyrillic": "\u04E4",
"Idieresissmall": "\uF7EF",
"Idot": "\u0130",
"Idotaccent": "\u0130",
"Idotbelow": "\u1ECA",
"Iebrevecyrillic": "\u04D6",
"Iecyrillic": "\u0415",
"Ifraktur": "\u2111",
"Igrave": "\u00CC",
"Igravesmall": "\uF7EC",
"Ihookabove": "\u1EC8",
"Iicyrillic": "\u0418",
"Iinvertedbreve": "\u020A",
"Iishortcyrillic": "\u0419",
"Imacron": "\u012A",
"Imacroncyrillic": "\u04E2",
"Imonospace": "\uFF29",
"Iniarmenian": "\u053B",
"Iocyrillic": "\u0401",
"Iogonek": "\u012E",
"Iota": "\u0399",
"Iotaafrican": "\u0196",
"Iotadieresis": "\u03AA",
"Iotatonos": "\u038A",
"Ismall": "\uF769",
"Istroke": "\u0197",
"Itilde": "\u0128",
"Itildebelow": "\u1E2C",
"Izhitsacyrillic": "\u0474",
"Izhitsadblgravecyrillic": "\u0476",
"J": "\u004A",
"Jaarmenian": "\u0541",
"Jcircle": "\u24BF",
"Jcircumflex": "\u0134",
"Jecyrillic": "\u0408",
"Jheharmenian": "\u054B",
"Jmonospace": "\uFF2A",
"Jsmall": "\uF76A",
"K": "\u004B",
"KBsquare": "\u3385",
"KKsquare": "\u33CD",
"Kabashkircyrillic": "\u04A0",
"Kacute": "\u1E30",
"Kacyrillic": "\u041A",
"Kadescendercyrillic": "\u049A",
"Kahookcyrillic": "\u04C3",
"Kappa": "\u039A",
"Kastrokecyrillic": "\u049E",
"Kaverticalstrokecyrillic": "\u049C",
"Kcaron": "\u01E8",
"Kcedilla": "\u0136",
"Kcircle": "\u24C0",
"Kcommaaccent": "\u0136",
"Kdotbelow": "\u1E32",
"Keharmenian": "\u0554",
"Kenarmenian": "\u053F",
"Khacyrillic": "\u0425",
"Kheicoptic": "\u03E6",
"Khook": "\u0198",
"Kjecyrillic": "\u040C",
"Klinebelow": "\u1E34",
"Kmonospace": "\uFF2B",
"Koppacyrillic": "\u0480",
"Koppagreek": "\u03DE",
"Ksicyrillic": "\u046E",
"Ksmall": "\uF76B",
"L": "\u004C",
"LJ": "\u01C7",
"LL": "\uF6BF",
"Lacute": "\u0139",
"Lambda": "\u039B",
"Lcaron": "\u013D",
"Lcedilla": "\u013B",
"Lcircle": "\u24C1",
"Lcircumflexbelow": "\u1E3C",
"Lcommaaccent": "\u013B",
"Ldot": "\u013F",
"Ldotaccent": "\u013F",
"Ldotbelow": "\u1E36",
"Ldotbelowmacron": "\u1E38",
"Liwnarmenian": "\u053C",
"Lj": "\u01C8",
"Ljecyrillic": "\u0409",
"Llinebelow": "\u1E3A",
"Lmonospace": "\uFF2C",
"Lslash": "\u0141",
"Lslashsmall": "\uF6F9",
"Lsmall": "\uF76C",
"M": "\u004D",
"MBsquare": "\u3386",
"Macron": "\uF6D0",
"Macronsmall": "\uF7AF",
"Macute": "\u1E3E",
"Mcircle": "\u24C2",
"Mdotaccent": "\u1E40",
"Mdotbelow": "\u1E42",
"Menarmenian": "\u0544",
"Mmonospace": "\uFF2D",
"Msmall": "\uF76D",
"Mturned": "\u019C",
"Mu": "\u039C",
"N": "\u004E",
"NJ": "\u01CA",
"Nacute": "\u0143",
"Ncaron": "\u0147",
"Ncedilla": "\u0145",
"Ncircle": "\u24C3",
"Ncircumflexbelow": "\u1E4A",
"Ncommaaccent": "\u0145",
"Ndotaccent": "\u1E44",
"Ndotbelow": "\u1E46",
"Nhookleft": "\u019D",
"Nineroman": "\u2168",
"Nj": "\u01CB",
"Njecyrillic": "\u040A",
"Nlinebelow": "\u1E48",
"Nmonospace": "\uFF2E",
"Nowarmenian": "\u0546",
"Nsmall": "\uF76E",
"Ntilde": "\u00D1",
"Ntildesmall": "\uF7F1",
"Nu": "\u039D",
"O": "\u004F",
"OE": "\u0152",
"OEsmall": "\uF6FA",
"Oacute": "\u00D3",
"Oacutesmall": "\uF7F3",
"Obarredcyrillic": "\u04E8",
"Obarreddieresiscyrillic": "\u04EA",
"Obreve": "\u014E",
"Ocaron": "\u01D1",
"Ocenteredtilde": "\u019F",
"Ocircle": "\u24C4",
"Ocircumflex": "\u00D4",
"Ocircumflexacute": "\u1ED0",
"Ocircumflexdotbelow": "\u1ED8",
"Ocircumflexgrave": "\u1ED2",
"Ocircumflexhookabove": "\u1ED4",
"Ocircumflexsmall": "\uF7F4",
"Ocircumflextilde": "\u1ED6",
"Ocyrillic": "\u041E",
"Odblacute": "\u0150",
"Odblgrave": "\u020C",
"Odieresis": "\u00D6",
"Odieresiscyrillic": "\u04E6",
"Odieresissmall": "\uF7F6",
"Odotbelow": "\u1ECC",
"Ogoneksmall": "\uF6FB",
"Ograve": "\u00D2",
"Ogravesmall": "\uF7F2",
"Oharmenian": "\u0555",
"Ohm": "\u2126",
"Ohookabove": "\u1ECE",
"Ohorn": "\u01A0",
"Ohornacute": "\u1EDA",
"Ohorndotbelow": "\u1EE2",
"Ohorngrave": "\u1EDC",
"Ohornhookabove": "\u1EDE",
"Ohorntilde": "\u1EE0",
"Ohungarumlaut": "\u0150",
"Oi": "\u01A2",
"Oinvertedbreve": "\u020E",
"Omacron": "\u014C",
"Omacronacute": "\u1E52",
"Omacrongrave": "\u1E50",
"Omega": "\u2126",
"Omegacyrillic": "\u0460",
"Omegagreek": "\u03A9",
"Omegaroundcyrillic": "\u047A",
"Omegatitlocyrillic": "\u047C",
"Omegatonos": "\u038F",
"Omicron": "\u039F",
"Omicrontonos": "\u038C",
"Omonospace": "\uFF2F",
"Oneroman": "\u2160",
"Oogonek": "\u01EA",
"Oogonekmacron": "\u01EC",
"Oopen": "\u0186",
"Oslash": "\u00D8",
"Oslashacute": "\u01FE",
"Oslashsmall": "\uF7F8",
"Osmall": "\uF76F",
"Ostrokeacute": "\u01FE",
"Otcyrillic": "\u047E",
"Otilde": "\u00D5",
"Otildeacute": "\u1E4C",
"Otildedieresis": "\u1E4E",
"Otildesmall": "\uF7F5",
"P": "\u0050",
"Pacute": "\u1E54",
"Pcircle": "\u24C5",
"Pdotaccent": "\u1E56",
"Pecyrillic": "\u041F",
"Peharmenian": "\u054A",
"Pemiddlehookcyrillic": "\u04A6",
"Phi": "\u03A6",
"Phook": "\u01A4",
"Pi": "\u03A0",
"Piwrarmenian": "\u0553",
"Pmonospace": "\uFF30",
"Psi": "\u03A8",
"Psicyrillic": "\u0470",
"Psmall": "\uF770",
"Q": "\u0051",
"Qcircle": "\u24C6",
"Qmonospace": "\uFF31",
"Qsmall": "\uF771",
"R": "\u0052",
"Raarmenian": "\u054C",
"Racute": "\u0154",
"Rcaron": "\u0158",
"Rcedilla": "\u0156",
"Rcircle": "\u24C7",
"Rcommaaccent": "\u0156",
"Rdblgrave": "\u0210",
"Rdotaccent": "\u1E58",
"Rdotbelow": "\u1E5A",
"Rdotbelowmacron": "\u1E5C",
"Reharmenian": "\u0550",
"Rfraktur": "\u211C",
"Rho": "\u03A1",
"Ringsmall": "\uF6FC",
"Rinvertedbreve": "\u0212",
"Rlinebelow": "\u1E5E",
"Rmonospace": "\uFF32",
"Rsmall": "\uF772",
"Rsmallinverted": "\u0281",
"Rsmallinvertedsuperior": "\u02B6",
"S": "\u0053",
"SF010000": "\u250C",
"SF020000": "\u2514",
"SF030000": "\u2510",
"SF040000": "\u2518",
"SF050000": "\u253C",
"SF060000": "\u252C",
"SF070000": "\u2534",
"SF080000": "\u251C",
"SF090000": "\u2524",
"SF100000": "\u2500",
"SF110000": "\u2502",
"SF190000": "\u2561",
"SF200000": "\u2562",
"SF210000": "\u2556",
"SF220000": "\u2555",
"SF230000": "\u2563",
"SF240000": "\u2551",
"SF250000": "\u2557",
"SF260000": "\u255D",
"SF270000": "\u255C",
"SF280000": "\u255B",
"SF360000": "\u255E",
"SF370000": "\u255F",
"SF380000": "\u255A",
"SF390000": "\u2554",
"SF400000": "\u2569",
"SF410000": "\u2566",
"SF420000": "\u2560",
"SF430000": "\u2550",
"SF440000": "\u256C",
"SF450000": "\u2567",
"SF460000": "\u2568",
"SF470000": "\u2564",
"SF480000": "\u2565",
"SF490000": "\u2559",
"SF500000": "\u2558",
"SF510000": "\u2552",
"SF520000": "\u2553",
"SF530000": "\u256B",
"SF540000": "\u256A",
"Sacute": "\u015A",
"Sacutedotaccent": "\u1E64",
"Sampigreek": "\u03E0",
"Scaron": "\u0160",
"Scarondotaccent": "\u1E66",
"Scaronsmall": "\uF6FD",
"Scedilla": "\u015E",
"Schwa": "\u018F",
"Schwacyrillic": "\u04D8",
"Schwadieresiscyrillic": "\u04DA",
"Scircle": "\u24C8",
"Scircumflex": "\u015C",
"Scommaaccent": "\u0218",
"Sdotaccent": "\u1E60",
"Sdotbelow": "\u1E62",
"Sdotbelowdotaccent": "\u1E68",
"Seharmenian": "\u054D",
"Sevenroman": "\u2166",
"Shaarmenian": "\u0547",
"Shacyrillic": "\u0428",
"Shchacyrillic": "\u0429",
"Sheicoptic": "\u03E2",
"Shhacyrillic": "\u04BA",
"Shimacoptic": "\u03EC",
"Sigma": "\u03A3",
"Sixroman": "\u2165",
"Smonospace": "\uFF33",
"Softsigncyrillic": "\u042C",
"Ssmall": "\uF773",
"Stigmagreek": "\u03DA",
"T": "\u0054",
"Tau": "\u03A4",
"Tbar": "\u0166",
"Tcaron": "\u0164",
"Tcedilla": "\u0162",
"Tcircle": "\u24C9",
"Tcircumflexbelow": "\u1E70",
"Tcommaaccent": "\u0162",
"Tdotaccent": "\u1E6A",
"Tdotbelow": "\u1E6C",
"Tecyrillic": "\u0422",
"Tedescendercyrillic": "\u04AC",
"Tenroman": "\u2169",
"Tetsecyrillic": "\u04B4",
"Theta": "\u0398",
"Thook": "\u01AC",
"Thorn": "\u00DE",
"Thornsmall": "\uF7FE",
"Threeroman": "\u2162",
"Tildesmall": "\uF6FE",
"Tiwnarmenian": "\u054F",
"Tlinebelow": "\u1E6E",
"Tmonospace": "\uFF34",
"Toarmenian": "\u0539",
"Tonefive": "\u01BC",
"Tonesix": "\u0184",
"Tonetwo": "\u01A7",
"Tretroflexhook": "\u01AE",
"Tsecyrillic": "\u0426",
"Tshecyrillic": "\u040B",
"Tsmall": "\uF774",
"Twelveroman": "\u216B",
"Tworoman": "\u2161",
"U": "\u0055",
"Uacute": "\u00DA",
"Uacutesmall": "\uF7FA",
"Ubreve": "\u016C",
"Ucaron": "\u01D3",
"Ucircle": "\u24CA",
"Ucircumflex": "\u00DB",
"Ucircumflexbelow": "\u1E76",
"Ucircumflexsmall": "\uF7FB",
"Ucyrillic": "\u0423",
"Udblacute": "\u0170",
"Udblgrave": "\u0214",
"Udieresis": "\u00DC",
"Udieresisacute": "\u01D7",
"Udieresisbelow": "\u1E72",
"Udieresiscaron": "\u01D9",
"Udieresiscyrillic": "\u04F0",
"Udieresisgrave": "\u01DB",
"Udieresismacron": "\u01D5",
"Udieresissmall": "\uF7FC",
"Udotbelow": "\u1EE4",
"Ugrave": "\u00D9",
"Ugravesmall": "\uF7F9",
"Uhookabove": "\u1EE6",
"Uhorn": "\u01AF",
"Uhornacute": "\u1EE8",
"Uhorndotbelow": "\u1EF0",
"Uhorngrave": "\u1EEA",
"Uhornhookabove": "\u1EEC",
"Uhorntilde": "\u1EEE",
"Uhungarumlaut": "\u0170",
"Uhungarumlautcyrillic": "\u04F2",
"Uinvertedbreve": "\u0216",
"Ukcyrillic": "\u0478",
"Umacron": "\u016A",
"Umacroncyrillic": "\u04EE",
"Umacrondieresis": "\u1E7A",
"Umonospace": "\uFF35",
"Uogonek": "\u0172",
"Upsilon": "\u03A5",
"Upsilon1": "\u03D2",
"Upsilonacutehooksymbolgreek": "\u03D3",
"Upsilonafrican": "\u01B1",
"Upsilondieresis": "\u03AB",
"Upsilondieresishooksymbolgreek": "\u03D4",
"Upsilonhooksymbol": "\u03D2",
"Upsilontonos": "\u038E",
"Uring": "\u016E",
"Ushortcyrillic": "\u040E",
"Usmall": "\uF775",
"Ustraightcyrillic": "\u04AE",
"Ustraightstrokecyrillic": "\u04B0",
"Utilde": "\u0168",
"Utildeacute": "\u1E78",
"Utildebelow": "\u1E74",
"V": "\u0056",
"Vcircle": "\u24CB",
"Vdotbelow": "\u1E7E",
"Vecyrillic": "\u0412",
"Vewarmenian": "\u054E",
"Vhook": "\u01B2",
"Vmonospace": "\uFF36",
"Voarmenian": "\u0548",
"Vsmall": "\uF776",
"Vtilde": "\u1E7C",
"W": "\u0057",
"Wacute": "\u1E82",
"Wcircle": "\u24CC",
"Wcircumflex": "\u0174",
"Wdieresis": "\u1E84",
"Wdotaccent": "\u1E86",
"Wdotbelow": "\u1E88",
"Wgrave": "\u1E80",
"Wmonospace": "\uFF37",
"Wsmall": "\uF777",
"X": "\u0058",
"Xcircle": "\u24CD",
"Xdieresis": "\u1E8C",
"Xdotaccent": "\u1E8A",
"Xeharmenian": "\u053D",
"Xi": "\u039E",
"Xmonospace": "\uFF38",
"Xsmall": "\uF778",
"Y": "\u0059",
"Yacute": "\u00DD",
"Yacutesmall": "\uF7FD",
"Yatcyrillic": "\u0462",
"Ycircle": "\u24CE",
"Ycircumflex": "\u0176",
"Ydieresis": "\u0178",
"Ydieresissmall": "\uF7FF",
"Ydotaccent": "\u1E8E",
"Ydotbelow": "\u1EF4",
"Yericyrillic": "\u042B",
"Yerudieresiscyrillic": "\u04F8",
"Ygrave": "\u1EF2",
"Yhook": "\u01B3",
"Yhookabove": "\u1EF6",
"Yiarmenian": "\u0545",
"Yicyrillic": "\u0407",
"Yiwnarmenian": "\u0552",
"Ymonospace": "\uFF39",
"Ysmall": "\uF779",
"Ytilde": "\u1EF8",
"Yusbigcyrillic": "\u046A",
"Yusbigiotifiedcyrillic": "\u046C",
"Yuslittlecyrillic": "\u0466",
"Yuslittleiotifiedcyrillic": "\u0468",
"Z": "\u005A",
"Zaarmenian": "\u0536",
"Zacute": "\u0179",
"Zcaron": "\u017D",
"Zcaronsmall": "\uF6FF",
"Zcircle": "\u24CF",
"Zcircumflex": "\u1E90",
"Zdot": "\u017B",
"Zdotaccent": "\u017B",
"Zdotbelow": "\u1E92",
"Zecyrillic": "\u0417",
"Zedescendercyrillic": "\u0498",
"Zedieresiscyrillic": "\u04DE",
"Zeta": "\u0396",
"Zhearmenian": "\u053A",
"Zhebrevecyrillic": "\u04C1",
"Zhecyrillic": "\u0416",
"Zhedescendercyrillic": "\u0496",
"Zhedieresiscyrillic": "\u04DC",
"Zlinebelow": "\u1E94",
"Zmonospace": "\uFF3A",
"Zsmall": "\uF77A",
"Zstroke": "\u01B5",
"a": "\u0061",
"aabengali": "\u0986",
"aacute": "\u00E1",
"aadeva": "\u0906",
"aagujarati": "\u0A86",
"aagurmukhi": "\u0A06",
"aamatragurmukhi": "\u0A3E",
"aarusquare": "\u3303",
"aavowelsignbengali": "\u09BE",
"aavowelsigndeva": "\u093E",
"aavowelsigngujarati": "\u0ABE",
"abbreviationmarkarmenian": "\u055F",
"abbreviationsigndeva": "\u0970",
"abengali": "\u0985",
"abopomofo": "\u311A",
"abreve": "\u0103",
"abreveacute": "\u1EAF",
"abrevecyrillic": "\u04D1",
"abrevedotbelow": "\u1EB7",
"abrevegrave": "\u1EB1",
"abrevehookabove": "\u1EB3",
"abrevetilde": "\u1EB5",
"acaron": "\u01CE",
"acircle": "\u24D0",
"acircumflex": "\u00E2",
"acircumflexacute": "\u1EA5",
"acircumflexdotbelow": "\u1EAD",
"acircumflexgrave": "\u1EA7",
"acircumflexhookabove": "\u1EA9",
"acircumflextilde": "\u1EAB",
"acute": "\u00B4",
"acutebelowcmb": "\u0317",
"acutecmb": "\u0301",
"acutecomb": "\u0301",
"acutedeva": "\u0954",
"acutelowmod": "\u02CF",
"acutetonecmb": "\u0341",
"acyrillic": "\u0430",
"adblgrave": "\u0201",
"addakgurmukhi": "\u0A71",
"adeva": "\u0905",
"adieresis": "\u00E4",
"adieresiscyrillic": "\u04D3",
"adieresismacron": "\u01DF",
"adotbelow": "\u1EA1",
"adotmacron": "\u01E1",
"ae": "\u00E6",
"aeacute": "\u01FD",
"aekorean": "\u3150",
"aemacron": "\u01E3",
"afii00208": "\u2015",
"afii08941": "\u20A4",
"afii10017": "\u0410",
"afii10018": "\u0411",
"afii10019": "\u0412",
"afii10020": "\u0413",
"afii10021": "\u0414",
"afii10022": "\u0415",
"afii10023": "\u0401",
"afii10024": "\u0416",
"afii10025": "\u0417",
"afii10026": "\u0418",
"afii10027": "\u0419",
"afii10028": "\u041A",
"afii10029": "\u041B",
"afii10030": "\u041C",
"afii10031": "\u041D",
"afii10032": "\u041E",
"afii10033": "\u041F",
"afii10034": "\u0420",
"afii10035": "\u0421",
"afii10036": "\u0422",
"afii10037": "\u0423",
"afii10038": "\u0424",
"afii10039": "\u0425",
"afii10040": "\u0426",
"afii10041": "\u0427",
"afii10042": "\u0428",
"afii10043": "\u0429",
"afii10044": "\u042A",
"afii10045": "\u042B",
"afii10046": "\u042C",
"afii10047": "\u042D",
"afii10048": "\u042E",
"afii10049": "\u042F",
"afii10050": "\u0490",
"afii10051": "\u0402",
"afii10052": "\u0403",
"afii10053": "\u0404",
"afii10054": "\u0405",
"afii10055": "\u0406",
"afii10056": "\u0407",
"afii10057": "\u0408",
"afii10058": "\u0409",
"afii10059": "\u040A",
"afii10060": "\u040B",
"afii10061": "\u040C",
"afii10062": "\u040E",
"afii10063": "\uF6C4",
"afii10064": "\uF6C5",
"afii10065": "\u0430",
"afii10066": "\u0431",
"afii10067": "\u0432",
"afii10068": "\u0433",
"afii10069": "\u0434",
"afii10070": "\u0435",
"afii10071": "\u0451",
"afii10072": "\u0436",
"afii10073": "\u0437",
"afii10074": "\u0438",
"afii10075": "\u0439",
"afii10076": "\u043A",
"afii10077": "\u043B",
"afii10078": "\u043C",
"afii10079": "\u043D",
"afii10080": "\u043E",
"afii10081": "\u043F",
"afii10082": "\u0440",
"afii10083": "\u0441",
"afii10084": "\u0442",
"afii10085": "\u0443",
"afii10086": "\u0444",
"afii10087": "\u0445",
"afii10088": "\u0446",
"afii10089": "\u0447",
"afii10090": "\u0448",
"afii10091": "\u0449",
"afii10092": "\u044A",
"afii10093": "\u044B",
"afii10094": "\u044C",
"afii10095": "\u044D",
"afii10096": "\u044E",
"afii10097": "\u044F",
"afii10098": "\u0491",
"afii10099": "\u0452",
"afii10100": "\u0453",
"afii10101": "\u0454",
"afii10102": "\u0455",
"afii10103": "\u0456",
"afii10104": "\u0457",
"afii10105": "\u0458",
"afii10106": "\u0459",
"afii10107": "\u045A",
"afii10108": "\u045B",
"afii10109": "\u045C",
"afii10110": "\u045E",
"afii10145": "\u040F",
"afii10146": "\u0462",
"afii10147": "\u0472",
"afii10148": "\u0474",
"afii10192": "\uF6C6",
"afii10193": "\u045F",
"afii10194": "\u0463",
"afii10195": "\u0473",
"afii10196": "\u0475",
"afii10831": "\uF6C7",
"afii10832": "\uF6C8",
"afii10846": "\u04D9",
"afii299": "\u200E",
"afii300": "\u200F",
"afii301": "\u200D",
"afii57381": "\u066A",
"afii57388": "\u060C",
"afii57392": "\u0660",
"afii57393": "\u0661",
"afii57394": "\u0662",
"afii57395": "\u0663",
"afii57396": "\u0664",
"afii57397": "\u0665",
"afii57398": "\u0666",
"afii57399": "\u0667",
"afii57400": "\u0668",
"afii57401": "\u0669",
"afii57403": "\u061B",
"afii57407": "\u061F",
"afii57409": "\u0621",
"afii57410": "\u0622",
"afii57411": "\u0623",
"afii57412": "\u0624",
"afii57413": "\u0625",
"afii57414": "\u0626",
"afii57415": "\u0627",
"afii57416": "\u0628",
"afii57417": "\u0629",
"afii57418": "\u062A",
"afii57419": "\u062B",
"afii57420": "\u062C",
"afii57421": "\u062D",
"afii57422": "\u062E",
"afii57423": "\u062F",
"afii57424": "\u0630",
"afii57425": "\u0631",
"afii57426": "\u0632",
"afii57427": "\u0633",
"afii57428": "\u0634",
"afii57429": "\u0635",
"afii57430": "\u0636",
"afii57431": "\u0637",
"afii57432": "\u0638",
"afii57433": "\u0639",
"afii57434": "\u063A",
"afii57440": "\u0640",
"afii57441": "\u0641",
"afii57442": "\u0642",
"afii57443": "\u0643",
"afii57444": "\u0644",
"afii57445": "\u0645",
"afii57446": "\u0646",
"afii57448": "\u0648",
"afii57449": "\u0649",
"afii57450": "\u064A",
"afii57451": "\u064B",
"afii57452": "\u064C",
"afii57453": "\u064D",
"afii57454": "\u064E",
"afii57455": "\u064F",
"afii57456": "\u0650",
"afii57457": "\u0651",
"afii57458": "\u0652",
"afii57470": "\u0647",
"afii57505": "\u06A4",
"afii57506": "\u067E",
"afii57507": "\u0686",
"afii57508": "\u0698",
"afii57509": "\u06AF",
"afii57511": "\u0679",
"afii57512": "\u0688",
"afii57513": "\u0691",
"afii57514": "\u06BA",
"afii57519": "\u06D2",
"afii57534": "\u06D5",
"afii57636": "\u20AA",
"afii57645": "\u05BE",
"afii57658": "\u05C3",
"afii57664": "\u05D0",
"afii57665": "\u05D1",
"afii57666": "\u05D2",
"afii57667": "\u05D3",
"afii57668": "\u05D4",
"afii57669": "\u05D5",
"afii57670": "\u05D6",
"afii57671": "\u05D7",
"afii57672": "\u05D8",
"afii57673": "\u05D9",
"afii57674": "\u05DA",
"afii57675": "\u05DB",
"afii57676": "\u05DC",
"afii57677": "\u05DD",
"afii57678": "\u05DE",
"afii57679": "\u05DF",
"afii57680": "\u05E0",
"afii57681": "\u05E1",
"afii57682": "\u05E2",
"afii57683": "\u05E3",
"afii57684": "\u05E4",
"afii57685": "\u05E5",
"afii57686": "\u05E6",
"afii57687": "\u05E7",
"afii57688": "\u05E8",
"afii57689": "\u05E9",
"afii57690": "\u05EA",
"afii57694": "\uFB2A",
"afii57695": "\uFB2B",
"afii57700": "\uFB4B",
"afii57705": "\uFB1F",
"afii57716": "\u05F0",
"afii57717": "\u05F1",
"afii57718": "\u05F2",
"afii57723": "\uFB35",
"afii57793": "\u05B4",
"afii57794": "\u05B5",
"afii57795": "\u05B6",
"afii57796": "\u05BB",
"afii57797": "\u05B8",
"afii57798": "\u05B7",
"afii57799": "\u05B0",
"afii57800": "\u05B2",
"afii57801": "\u05B1",
"afii57802": "\u05B3",
"afii57803": "\u05C2",
"afii57804": "\u05C1",
"afii57806": "\u05B9",
"afii57807": "\u05BC",
"afii57839": "\u05BD",
"afii57841": "\u05BF",
"afii57842": "\u05C0",
"afii57929": "\u02BC",
"afii61248": "\u2105",
"afii61289": "\u2113",
"afii61352": "\u2116",
"afii61573": "\u202C",
"afii61574": "\u202D",
"afii61575": "\u202E",
"afii61664": "\u200C",
"afii63167": "\u066D",
"afii64937": "\u02BD",
"agrave": "\u00E0",
"agujarati": "\u0A85",
"agurmukhi": "\u0A05",
"ahiragana": "\u3042",
"ahookabove": "\u1EA3",
"aibengali": "\u0990",
"aibopomofo": "\u311E",
"aideva": "\u0910",
"aiecyrillic": "\u04D5",
"aigujarati": "\u0A90",
"aigurmukhi": "\u0A10",
"aimatragurmukhi": "\u0A48",
"ainarabic": "\u0639",
"ainfinalarabic": "\uFECA",
"aininitialarabic": "\uFECB",
"ainmedialarabic": "\uFECC",
"ainvertedbreve": "\u0203",
"aivowelsignbengali": "\u09C8",
"aivowelsigndeva": "\u0948",
"aivowelsigngujarati": "\u0AC8",
"akatakana": "\u30A2",
"akatakanahalfwidth": "\uFF71",
"akorean": "\u314F",
"alef": "\u05D0",
"alefarabic": "\u0627",
"alefdageshhebrew": "\uFB30",
"aleffinalarabic": "\uFE8E",
"alefhamzaabovearabic": "\u0623",
"alefhamzaabovefinalarabic": "\uFE84",
"alefhamzabelowarabic": "\u0625",
"alefhamzabelowfinalarabic": "\uFE88",
"alefhebrew": "\u05D0",
"aleflamedhebrew": "\uFB4F",
"alefmaddaabovearabic": "\u0622",
"alefmaddaabovefinalarabic": "\uFE82",
"alefmaksuraarabic": "\u0649",
"alefmaksurafinalarabic": "\uFEF0",
"alefmaksurainitialarabic": "\uFEF3",
"alefmaksuramedialarabic": "\uFEF4",
"alefpatahhebrew": "\uFB2E",
"alefqamatshebrew": "\uFB2F",
"aleph": "\u2135",
"allequal": "\u224C",
"alpha": "\u03B1",
"alphatonos": "\u03AC",
"amacron": "\u0101",
"amonospace": "\uFF41",
"ampersand": "\u0026",
"ampersandmonospace": "\uFF06",
"ampersandsmall": "\uF726",
"amsquare": "\u33C2",
"anbopomofo": "\u3122",
"angbopomofo": "\u3124",
"angkhankhuthai": "\u0E5A",
"angle": "\u2220",
"anglebracketleft": "\u3008",
"anglebracketleftvertical": "\uFE3F",
"anglebracketright": "\u3009",
"anglebracketrightvertical": "\uFE40",
"angleleft": "\u2329",
"angleright": "\u232A",
"angstrom": "\u212B",
"anoteleia": "\u0387",
"anudattadeva": "\u0952",
"anusvarabengali": "\u0982",
"anusvaradeva": "\u0902",
"anusvaragujarati": "\u0A82",
"aogonek": "\u0105",
"apaatosquare": "\u3300",
"aparen": "\u249C",
"apostrophearmenian": "\u055A",
"apostrophemod": "\u02BC",
"apple": "\uF8FF",
"approaches": "\u2250",
"approxequal": "\u2248",
"approxequalorimage": "\u2252",
"approximatelyequal": "\u2245",
"araeaekorean": "\u318E",
"araeakorean": "\u318D",
"arc": "\u2312",
"arighthalfring": "\u1E9A",
"aring": "\u00E5",
"aringacute": "\u01FB",
"aringbelow": "\u1E01",
"arrowboth": "\u2194",
"arrowdashdown": "\u21E3",
"arrowdashleft": "\u21E0",
"arrowdashright": "\u21E2",
"arrowdashup": "\u21E1",
"arrowdblboth": "\u21D4",
"arrowdbldown": "\u21D3",
"arrowdblleft": "\u21D0",
"arrowdblright": "\u21D2",
"arrowdblup": "\u21D1",
"arrowdown": "\u2193",
"arrowdownleft": "\u2199",
"arrowdownright": "\u2198",
"arrowdownwhite": "\u21E9",
"arrowheaddownmod": "\u02C5",
"arrowheadleftmod": "\u02C2",
"arrowheadrightmod": "\u02C3",
"arrowheadupmod": "\u02C4",
"arrowhorizex": "\uF8E7",
"arrowleft": "\u2190",
"arrowleftdbl": "\u21D0",
"arrowleftdblstroke": "\u21CD",
"arrowleftoverright": "\u21C6",
"arrowleftwhite": "\u21E6",
"arrowright": "\u2192",
"arrowrightdblstroke": "\u21CF",
"arrowrightheavy": "\u279E",
"arrowrightoverleft": "\u21C4",
"arrowrightwhite": "\u21E8",
"arrowtableft": "\u21E4",
"arrowtabright": "\u21E5",
"arrowup": "\u2191",
"arrowupdn": "\u2195",
"arrowupdnbse": "\u21A8",
"arrowupdownbase": "\u21A8",
"arrowupleft": "\u2196",
"arrowupleftofdown": "\u21C5",
"arrowupright": "\u2197",
"arrowupwhite": "\u21E7",
"arrowvertex": "\uF8E6",
"asciicircum": "\u005E",
"asciicircummonospace": "\uFF3E",
"asciitilde": "\u007E",
"asciitildemonospace": "\uFF5E",
"ascript": "\u0251",
"ascriptturned": "\u0252",
"asmallhiragana": "\u3041",
"asmallkatakana": "\u30A1",
"asmallkatakanahalfwidth": "\uFF67",
"asterisk": "\u002A",
"asteriskaltonearabic": "\u066D",
"asteriskarabic": "\u066D",
"asteriskmath": "\u2217",
"asteriskmonospace": "\uFF0A",
"asterisksmall": "\uFE61",
"asterism": "\u2042",
"asuperior": "\uF6E9",
"asymptoticallyequal": "\u2243",
"at": "\u0040",
"atilde": "\u00E3",
"atmonospace": "\uFF20",
"atsmall": "\uFE6B",
"aturned": "\u0250",
"aubengali": "\u0994",
"aubopomofo": "\u3120",
"audeva": "\u0914",
"augujarati": "\u0A94",
"augurmukhi": "\u0A14",
"aulengthmarkbengali": "\u09D7",
"aumatragurmukhi": "\u0A4C",
"auvowelsignbengali": "\u09CC",
"auvowelsigndeva": "\u094C",
"auvowelsigngujarati": "\u0ACC",
"avagrahadeva": "\u093D",
"aybarmenian": "\u0561",
"ayin": "\u05E2",
"ayinaltonehebrew": "\uFB20",
"ayinhebrew": "\u05E2",
"b": "\u0062",
"babengali": "\u09AC",
"backslash": "\u005C",
"backslashmonospace": "\uFF3C",
"badeva": "\u092C",
"bagujarati": "\u0AAC",
"bagurmukhi": "\u0A2C",
"bahiragana": "\u3070",
"bahtthai": "\u0E3F",
"bakatakana": "\u30D0",
"bar": "\u007C",
"barmonospace": "\uFF5C",
"bbopomofo": "\u3105",
"bcircle": "\u24D1",
"bdotaccent": "\u1E03",
"bdotbelow": "\u1E05",
"beamedsixteenthnotes": "\u266C",
"because": "\u2235",
"becyrillic": "\u0431",
"beharabic": "\u0628",
"behfinalarabic": "\uFE90",
"behinitialarabic": "\uFE91",
"behiragana": "\u3079",
"behmedialarabic": "\uFE92",
"behmeeminitialarabic": "\uFC9F",
"behmeemisolatedarabic": "\uFC08",
"behnoonfinalarabic": "\uFC6D",
"bekatakana": "\u30D9",
"benarmenian": "\u0562",
"bet": "\u05D1",
"beta": "\u03B2",
"betasymbolgreek": "\u03D0",
"betdagesh": "\uFB31",
"betdageshhebrew": "\uFB31",
"bethebrew": "\u05D1",
"betrafehebrew": "\uFB4C",
"bhabengali": "\u09AD",
"bhadeva": "\u092D",
"bhagujarati": "\u0AAD",
"bhagurmukhi": "\u0A2D",
"bhook": "\u0253",
"bihiragana": "\u3073",
"bikatakana": "\u30D3",
"bilabialclick": "\u0298",
"bindigurmukhi": "\u0A02",
"birusquare": "\u3331",
"blackcircle": "\u25CF",
"blackdiamond": "\u25C6",
"blackdownpointingtriangle": "\u25BC",
"blackleftpointingpointer": "\u25C4",
"blackleftpointingtriangle": "\u25C0",
"blacklenticularbracketleft": "\u3010",
"blacklenticularbracketleftvertical": "\uFE3B",
"blacklenticularbracketright": "\u3011",
"blacklenticularbracketrightvertical": "\uFE3C",
"blacklowerlefttriangle": "\u25E3",
"blacklowerrighttriangle": "\u25E2",
"blackrectangle": "\u25AC",
"blackrightpointingpointer": "\u25BA",
"blackrightpointingtriangle": "\u25B6",
"blacksmallsquare": "\u25AA",
"blacksmilingface": "\u263B",
"blacksquare": "\u25A0",
"blackstar": "\u2605",
"blackupperlefttriangle": "\u25E4",
"blackupperrighttriangle": "\u25E5",
"blackuppointingsmalltriangle": "\u25B4",
"blackuppointingtriangle": "\u25B2",
"blank": "\u2423",
"blinebelow": "\u1E07",
"block": "\u2588",
"bmonospace": "\uFF42",
"bobaimaithai": "\u0E1A",
"bohiragana": "\u307C",
"bokatakana": "\u30DC",
"bparen": "\u249D",
"bqsquare": "\u33C3",
"braceex": "\uF8F4",
"braceleft": "\u007B",
"braceleftbt": "\uF8F3",
"braceleftmid": "\uF8F2",
"braceleftmonospace": "\uFF5B",
"braceleftsmall": "\uFE5B",
"bracelefttp": "\uF8F1",
"braceleftvertical": "\uFE37",
"braceright": "\u007D",
"bracerightbt": "\uF8FE",
"bracerightmid": "\uF8FD",
"bracerightmonospace": "\uFF5D",
"bracerightsmall": "\uFE5C",
"bracerighttp": "\uF8FC",
"bracerightvertical": "\uFE38",
"bracketleft": "\u005B",
"bracketleftbt": "\uF8F0",
"bracketleftex": "\uF8EF",
"bracketleftmonospace": "\uFF3B",
"bracketlefttp": "\uF8EE",
"bracketright": "\u005D",
"bracketrightbt": "\uF8FB",
"bracketrightex": "\uF8FA",
"bracketrightmonospace": "\uFF3D",
"bracketrighttp": "\uF8F9",
"breve": "\u02D8",
"brevebelowcmb": "\u032E",
"brevecmb": "\u0306",
"breveinvertedbelowcmb": "\u032F",
"breveinvertedcmb": "\u0311",
"breveinverteddoublecmb": "\u0361",
"bridgebelowcmb": "\u032A",
"bridgeinvertedbelowcmb": "\u033A",
"brokenbar": "\u00A6",
"bstroke": "\u0180",
"bsuperior": "\uF6EA",
"btopbar": "\u0183",
"buhiragana": "\u3076",
"bukatakana": "\u30D6",
"bullet": "\u2022",
"bulletinverse": "\u25D8",
"bulletoperator": "\u2219",
"bullseye": "\u25CE",
"c": "\u0063",
"caarmenian": "\u056E",
"cabengali": "\u099A",
"cacute": "\u0107",
"cadeva": "\u091A",
"cagujarati": "\u0A9A",
"cagurmukhi": "\u0A1A",
"calsquare": "\u3388",
"candrabindubengali": "\u0981",
"candrabinducmb": "\u0310",
"candrabindudeva": "\u0901",
"candrabindugujarati": "\u0A81",
"capslock": "\u21EA",
"careof": "\u2105",
"caron": "\u02C7",
"caronbelowcmb": "\u032C",
"caroncmb": "\u030C",
"carriagereturn": "\u21B5",
"cbopomofo": "\u3118",
"ccaron": "\u010D",
"ccedilla": "\u00E7",
"ccedillaacute": "\u1E09",
"ccircle": "\u24D2",
"ccircumflex": "\u0109",
"ccurl": "\u0255",
"cdot": "\u010B",
"cdotaccent": "\u010B",
"cdsquare": "\u33C5",
"cedilla": "\u00B8",
"cedillacmb": "\u0327",
"cent": "\u00A2",
"centigrade": "\u2103",
"centinferior": "\uF6DF",
"centmonospace": "\uFFE0",
"centoldstyle": "\uF7A2",
"centsuperior": "\uF6E0",
"chaarmenian": "\u0579",
"chabengali": "\u099B",
"chadeva": "\u091B",
"chagujarati": "\u0A9B",
"chagurmukhi": "\u0A1B",
"chbopomofo": "\u3114",
"cheabkhasiancyrillic": "\u04BD",
"checkmark": "\u2713",
"checyrillic": "\u0447",
"chedescenderabkhasiancyrillic": "\u04BF",
"chedescendercyrillic": "\u04B7",
"chedieresiscyrillic": "\u04F5",
"cheharmenian": "\u0573",
"chekhakassiancyrillic": "\u04CC",
"cheverticalstrokecyrillic": "\u04B9",
"chi": "\u03C7",
"chieuchacirclekorean": "\u3277",
"chieuchaparenkorean": "\u3217",
"chieuchcirclekorean": "\u3269",
"chieuchkorean": "\u314A",
"chieuchparenkorean": "\u3209",
"chochangthai": "\u0E0A",
"chochanthai": "\u0E08",
"chochingthai": "\u0E09",
"chochoethai": "\u0E0C",
"chook": "\u0188",
"cieucacirclekorean": "\u3276",
"cieucaparenkorean": "\u3216",
"cieuccirclekorean": "\u3268",
"cieuckorean": "\u3148",
"cieucparenkorean": "\u3208",
"cieucuparenkorean": "\u321C",
"circle": "\u25CB",
"circlemultiply": "\u2297",
"circleot": "\u2299",
"circleplus": "\u2295",
"circlepostalmark": "\u3036",
"circlewithlefthalfblack": "\u25D0",
"circlewithrighthalfblack": "\u25D1",
"circumflex": "\u02C6",
"circumflexbelowcmb": "\u032D",
"circumflexcmb": "\u0302",
"clear": "\u2327",
"clickalveolar": "\u01C2",
"clickdental": "\u01C0",
"clicklateral": "\u01C1",
"clickretroflex": "\u01C3",
"club": "\u2663",
"clubsuitblack": "\u2663",
"clubsuitwhite": "\u2667",
"cmcubedsquare": "\u33A4",
"cmonospace": "\uFF43",
"cmsquaredsquare": "\u33A0",
"coarmenian": "\u0581",
"colon": "\u003A",
"colonmonetary": "\u20A1",
"colonmonospace": "\uFF1A",
"colonsign": "\u20A1",
"colonsmall": "\uFE55",
"colontriangularhalfmod": "\u02D1",
"colontriangularmod": "\u02D0",
"comma": "\u002C",
"commaabovecmb": "\u0313",
"commaaboverightcmb": "\u0315",
"commaaccent": "\uF6C3",
"commaarabic": "\u060C",
"commaarmenian": "\u055D",
"commainferior": "\uF6E1",
"commamonospace": "\uFF0C",
"commareversedabovecmb": "\u0314",
"commareversedmod": "\u02BD",
"commasmall": "\uFE50",
"commasuperior": "\uF6E2",
"commaturnedabovecmb": "\u0312",
"commaturnedmod": "\u02BB",
"compass": "\u263C",
"congruent": "\u2245",
"contourintegral": "\u222E",
"control": "\u2303",
"controlACK": "\u0006",
"controlBEL": "\u0007",
"controlBS": "\u0008",
"controlCAN": "\u0018",
"controlCR": "\u000D",
"controlDC1": "\u0011",
"controlDC2": "\u0012",
"controlDC3": "\u0013",
"controlDC4": "\u0014",
"controlDEL": "\u007F",
"controlDLE": "\u0010",
"controlEM": "\u0019",
"controlENQ": "\u0005",
"controlEOT": "\u0004",
"controlESC": "\u001B",
"controlETB": "\u0017",
"controlETX": "\u0003",
"controlFF": "\u000C",
"controlFS": "\u001C",
"controlGS": "\u001D",
"controlHT": "\u0009",
"controlLF": "\u000A",
"controlNAK": "\u0015",
"controlRS": "\u001E",
"controlSI": "\u000F",
"controlSO": "\u000E",
"controlSOT": "\u0002",
"controlSTX": "\u0001",
"controlSUB": "\u001A",
"controlSYN": "\u0016",
"controlUS": "\u001F",
"controlVT": "\u000B",
"copyright": "\u00A9",
"copyrightsans": "\uF8E9",
"copyrightserif": "\uF6D9",
"cornerbracketleft": "\u300C",
"cornerbracketlefthalfwidth": "\uFF62",
"cornerbracketleftvertical": "\uFE41",
"cornerbracketright": "\u300D",
"cornerbracketrighthalfwidth": "\uFF63",
"cornerbracketrightvertical": "\uFE42",
"corporationsquare": "\u337F",
"cosquare": "\u33C7",
"coverkgsquare": "\u33C6",
"cparen": "\u249E",
"cruzeiro": "\u20A2",
"cstretched": "\u0297",
"curlyand": "\u22CF",
"curlyor": "\u22CE",
"currency": "\u00A4",
"cyrBreve": "\uF6D1",
"cyrFlex": "\uF6D2",
"cyrbreve": "\uF6D4",
"cyrflex": "\uF6D5",
"d": "\u0064",
"daarmenian": "\u0564",
"dabengali": "\u09A6",
"dadarabic": "\u0636",
"dadeva": "\u0926",
"dadfinalarabic": "\uFEBE",
"dadinitialarabic": "\uFEBF",
"dadmedialarabic": "\uFEC0",
"dagesh": "\u05BC",
"dageshhebrew": "\u05BC",
"dagger": "\u2020",
"daggerdbl": "\u2021",
"dagujarati": "\u0AA6",
"dagurmukhi": "\u0A26",
"dahiragana": "\u3060",
"dakatakana": "\u30C0",
"dalarabic": "\u062F",
"dalet": "\u05D3",
"daletdagesh": "\uFB33",
"daletdageshhebrew": "\uFB33",
"dalethatafpatah": "\u05D3\u05B2",
"dalethatafpatahhebrew": "\u05D3\u05B2",
"dalethatafsegol": "\u05D3\u05B1",
"dalethatafsegolhebrew": "\u05D3\u05B1",
"dalethebrew": "\u05D3",
"dalethiriq": "\u05D3\u05B4",
"dalethiriqhebrew": "\u05D3\u05B4",
"daletholam": "\u05D3\u05B9",
"daletholamhebrew": "\u05D3\u05B9",
"daletpatah": "\u05D3\u05B7",
"daletpatahhebrew": "\u05D3\u05B7",
"daletqamats": "\u05D3\u05B8",
"daletqamatshebrew": "\u05D3\u05B8",
"daletqubuts": "\u05D3\u05BB",
"daletqubutshebrew": "\u05D3\u05BB",
"daletsegol": "\u05D3\u05B6",
"daletsegolhebrew": "\u05D3\u05B6",
"daletsheva": "\u05D3\u05B0",
"daletshevahebrew": "\u05D3\u05B0",
"dalettsere": "\u05D3\u05B5",
"dalettserehebrew": "\u05D3\u05B5",
"dalfinalarabic": "\uFEAA",
"dammaarabic": "\u064F",
"dammalowarabic": "\u064F",
"dammatanaltonearabic": "\u064C",
"dammatanarabic": "\u064C",
"danda": "\u0964",
"dargahebrew": "\u05A7",
"dargalefthebrew": "\u05A7",
"dasiapneumatacyrilliccmb": "\u0485",
"dblGrave": "\uF6D3",
"dblanglebracketleft": "\u300A",
"dblanglebracketleftvertical": "\uFE3D",
"dblanglebracketright": "\u300B",
"dblanglebracketrightvertical": "\uFE3E",
"dblarchinvertedbelowcmb": "\u032B",
"dblarrowleft": "\u21D4",
"dblarrowright": "\u21D2",
"dbldanda": "\u0965",
"dblgrave": "\uF6D6",
"dblgravecmb": "\u030F",
"dblintegral": "\u222C",
"dbllowline": "\u2017",
"dbllowlinecmb": "\u0333",
"dbloverlinecmb": "\u033F",
"dblprimemod": "\u02BA",
"dblverticalbar": "\u2016",
"dblverticallineabovecmb": "\u030E",
"dbopomofo": "\u3109",
"dbsquare": "\u33C8",
"dcaron": "\u010F",
"dcedilla": "\u1E11",
"dcircle": "\u24D3",
"dcircumflexbelow": "\u1E13",
"dcroat": "\u0111",
"ddabengali": "\u09A1",
"ddadeva": "\u0921",
"ddagujarati": "\u0AA1",
"ddagurmukhi": "\u0A21",
"ddalarabic": "\u0688",
"ddalfinalarabic": "\uFB89",
"dddhadeva": "\u095C",
"ddhabengali": "\u09A2",
"ddhadeva": "\u0922",
"ddhagujarati": "\u0AA2",
"ddhagurmukhi": "\u0A22",
"ddotaccent": "\u1E0B",
"ddotbelow": "\u1E0D",
"decimalseparatorarabic": "\u066B",
"decimalseparatorpersian": "\u066B",
"decyrillic": "\u0434",
"degree": "\u00B0",
"dehihebrew": "\u05AD",
"dehiragana": "\u3067",
"deicoptic": "\u03EF",
"dekatakana": "\u30C7",
"deleteleft": "\u232B",
"deleteright": "\u2326",
"delta": "\u03B4",
"deltaturned": "\u018D",
"denominatorminusonenumeratorbengali": "\u09F8",
"dezh": "\u02A4",
"dhabengali": "\u09A7",
"dhadeva": "\u0927",
"dhagujarati": "\u0AA7",
"dhagurmukhi": "\u0A27",
"dhook": "\u0257",
"dialytikatonos": "\u0385",
"dialytikatonoscmb": "\u0344",
"diamond": "\u2666",
"diamondsuitwhite": "\u2662",
"dieresis": "\u00A8",
"dieresisacute": "\uF6D7",
"dieresisbelowcmb": "\u0324",
"dieresiscmb": "\u0308",
"dieresisgrave": "\uF6D8",
"dieresistonos": "\u0385",
"dihiragana": "\u3062",
"dikatakana": "\u30C2",
"dittomark": "\u3003",
"divide": "\u00F7",
"divides": "\u2223",
"divisionslash": "\u2215",
"djecyrillic": "\u0452",
"dkshade": "\u2593",
"dlinebelow": "\u1E0F",
"dlsquare": "\u3397",
"dmacron": "\u0111",
"dmonospace": "\uFF44",
"dnblock": "\u2584",
"dochadathai": "\u0E0E",
"dodekthai": "\u0E14",
"dohiragana": "\u3069",
"dokatakana": "\u30C9",
"dollar": "\u0024",
"dollarinferior": "\uF6E3",
"dollarmonospace": "\uFF04",
"dollaroldstyle": "\uF724",
"dollarsmall": "\uFE69",
"dollarsuperior": "\uF6E4",
"dong": "\u20AB",
"dorusquare": "\u3326",
"dotaccent": "\u02D9",
"dotaccentcmb": "\u0307",
"dotbelowcmb": "\u0323",
"dotbelowcomb": "\u0323",
"dotkatakana": "\u30FB",
"dotlessi": "\u0131",
"dotlessj": "\uF6BE",
"dotlessjstrokehook": "\u0284",
"dotmath": "\u22C5",
"dottedcircle": "\u25CC",
"doubleyodpatah": "\uFB1F",
"doubleyodpatahhebrew": "\uFB1F",
"downtackbelowcmb": "\u031E",
"downtackmod": "\u02D5",
"dparen": "\u249F",
"dsuperior": "\uF6EB",
"dtail": "\u0256",
"dtopbar": "\u018C",
"duhiragana": "\u3065",
"dukatakana": "\u30C5",
"dz": "\u01F3",
"dzaltone": "\u02A3",
"dzcaron": "\u01C6",
"dzcurl": "\u02A5",
"dzeabkhasiancyrillic": "\u04E1",
"dzecyrillic": "\u0455",
"dzhecyrillic": "\u045F",
"e": "\u0065",
"eacute": "\u00E9",
"earth": "\u2641",
"ebengali": "\u098F",
"ebopomofo": "\u311C",
"ebreve": "\u0115",
"ecandradeva": "\u090D",
"ecandragujarati": "\u0A8D",
"ecandravowelsigndeva": "\u0945",
"ecandravowelsigngujarati": "\u0AC5",
"ecaron": "\u011B",
"ecedillabreve": "\u1E1D",
"echarmenian": "\u0565",
"echyiwnarmenian": "\u0587",
"ecircle": "\u24D4",
"ecircumflex": "\u00EA",
"ecircumflexacute": "\u1EBF",
"ecircumflexbelow": "\u1E19",
"ecircumflexdotbelow": "\u1EC7",
"ecircumflexgrave": "\u1EC1",
"ecircumflexhookabove": "\u1EC3",
"ecircumflextilde": "\u1EC5",
"ecyrillic": "\u0454",
"edblgrave": "\u0205",
"edeva": "\u090F",
"edieresis": "\u00EB",
"edot": "\u0117",
"edotaccent": "\u0117",
"edotbelow": "\u1EB9",
"eegurmukhi": "\u0A0F",
"eematragurmukhi": "\u0A47",
"efcyrillic": "\u0444",
"egrave": "\u00E8",
"egujarati": "\u0A8F",
"eharmenian": "\u0567",
"ehbopomofo": "\u311D",
"ehiragana": "\u3048",
"ehookabove": "\u1EBB",
"eibopomofo": "\u311F",
"eight": "\u0038",
"eightarabic": "\u0668",
"eightbengali": "\u09EE",
"eightcircle": "\u2467",
"eightcircleinversesansserif": "\u2791",
"eightdeva": "\u096E",
"eighteencircle": "\u2471",
"eighteenparen": "\u2485",
"eighteenperiod": "\u2499",
"eightgujarati": "\u0AEE",
"eightgurmukhi": "\u0A6E",
"eighthackarabic": "\u0668",
"eighthangzhou": "\u3028",
"eighthnotebeamed": "\u266B",
"eightideographicparen": "\u3227",
"eightinferior": "\u2088",
"eightmonospace": "\uFF18",
"eightoldstyle": "\uF738",
"eightparen": "\u247B",
"eightperiod": "\u248F",
"eightpersian": "\u06F8",
"eightroman": "\u2177",
"eightsuperior": "\u2078",
"eightthai": "\u0E58",
"einvertedbreve": "\u0207",
"eiotifiedcyrillic": "\u0465",
"ekatakana": "\u30A8",
"ekatakanahalfwidth": "\uFF74",
"ekonkargurmukhi": "\u0A74",
"ekorean": "\u3154",
"elcyrillic": "\u043B",
"element": "\u2208",
"elevencircle": "\u246A",
"elevenparen": "\u247E",
"elevenperiod": "\u2492",
"elevenroman": "\u217A",
"ellipsis": "\u2026",
"ellipsisvertical": "\u22EE",
"emacron": "\u0113",
"emacronacute": "\u1E17",
"emacrongrave": "\u1E15",
"emcyrillic": "\u043C",
"emdash": "\u2014",
"emdashvertical": "\uFE31",
"emonospace": "\uFF45",
"emphasismarkarmenian": "\u055B",
"emptyset": "\u2205",
"enbopomofo": "\u3123",
"encyrillic": "\u043D",
"endash": "\u2013",
"endashvertical": "\uFE32",
"endescendercyrillic": "\u04A3",
"eng": "\u014B",
"engbopomofo": "\u3125",
"enghecyrillic": "\u04A5",
"enhookcyrillic": "\u04C8",
"enspace": "\u2002",
"eogonek": "\u0119",
"eokorean": "\u3153",
"eopen": "\u025B",
"eopenclosed": "\u029A",
"eopenreversed": "\u025C",
"eopenreversedclosed": "\u025E",
"eopenreversedhook": "\u025D",
"eparen": "\u24A0",
"epsilon": "\u03B5",
"epsilontonos": "\u03AD",
"equal": "\u003D",
"equalmonospace": "\uFF1D",
"equalsmall": "\uFE66",
"equalsuperior": "\u207C",
"equivalence": "\u2261",
"erbopomofo": "\u3126",
"ercyrillic": "\u0440",
"ereversed": "\u0258",
"ereversedcyrillic": "\u044D",
"escyrillic": "\u0441",
"esdescendercyrillic": "\u04AB",
"esh": "\u0283",
"eshcurl": "\u0286",
"eshortdeva": "\u090E",
"eshortvowelsigndeva": "\u0946",
"eshreversedloop": "\u01AA",
"eshsquatreversed": "\u0285",
"esmallhiragana": "\u3047",
"esmallkatakana": "\u30A7",
"esmallkatakanahalfwidth": "\uFF6A",
"estimated": "\u212E",
"esuperior": "\uF6EC",
"eta": "\u03B7",
"etarmenian": "\u0568",
"etatonos": "\u03AE",
"eth": "\u00F0",
"etilde": "\u1EBD",
"etildebelow": "\u1E1B",
"etnahtafoukhhebrew": "\u0591",
"etnahtafoukhlefthebrew": "\u0591",
"etnahtahebrew": "\u0591",
"etnahtalefthebrew": "\u0591",
"eturned": "\u01DD",
"eukorean": "\u3161",
"euro": "\u20AC",
"evowelsignbengali": "\u09C7",
"evowelsigndeva": "\u0947",
"evowelsigngujarati": "\u0AC7",
"exclam": "\u0021",
"exclamarmenian": "\u055C",
"exclamdbl": "\u203C",
"exclamdown": "\u00A1",
"exclamdownsmall": "\uF7A1",
"exclammonospace": "\uFF01",
"exclamsmall": "\uF721",
"existential": "\u2203",
"ezh": "\u0292",
"ezhcaron": "\u01EF",
"ezhcurl": "\u0293",
"ezhreversed": "\u01B9",
"ezhtail": "\u01BA",
"f": "\u0066",
"fadeva": "\u095E",
"fagurmukhi": "\u0A5E",
"fahrenheit": "\u2109",
"fathaarabic": "\u064E",
"fathalowarabic": "\u064E",
"fathatanarabic": "\u064B",
"fbopomofo": "\u3108",
"fcircle": "\u24D5",
"fdotaccent": "\u1E1F",
"feharabic": "\u0641",
"feharmenian": "\u0586",
"fehfinalarabic": "\uFED2",
"fehinitialarabic": "\uFED3",
"fehmedialarabic": "\uFED4",
"feicoptic": "\u03E5",
"female": "\u2640",
"ff": "\uFB00",
"ffi": "\uFB03",
"ffl": "\uFB04",
"fi": "\uFB01",
"fifteencircle": "\u246E",
"fifteenparen": "\u2482",
"fifteenperiod": "\u2496",
"figuredash": "\u2012",
"filledbox": "\u25A0",
"filledrect": "\u25AC",
"finalkaf": "\u05DA",
"finalkafdagesh": "\uFB3A",
"finalkafdageshhebrew": "\uFB3A",
"finalkafhebrew": "\u05DA",
"finalkafqamats": "\u05DA\u05B8",
"finalkafqamatshebrew": "\u05DA\u05B8",
"finalkafsheva": "\u05DA\u05B0",
"finalkafshevahebrew": "\u05DA\u05B0",
"finalmem": "\u05DD",
"finalmemhebrew": "\u05DD",
"finalnun": "\u05DF",
"finalnunhebrew": "\u05DF",
"finalpe": "\u05E3",
"finalpehebrew": "\u05E3",
"finaltsadi": "\u05E5",
"finaltsadihebrew": "\u05E5",
"firsttonechinese": "\u02C9",
"fisheye": "\u25C9",
"fitacyrillic": "\u0473",
"five": "\u0035",
"fivearabic": "\u0665",
"fivebengali": "\u09EB",
"fivecircle": "\u2464",
"fivecircleinversesansserif": "\u278E",
"fivedeva": "\u096B",
"fiveeighths": "\u215D",
"fivegujarati": "\u0AEB",
"fivegurmukhi": "\u0A6B",
"fivehackarabic": "\u0665",
"fivehangzhou": "\u3025",
"fiveideographicparen": "\u3224",
"fiveinferior": "\u2085",
"fivemonospace": "\uFF15",
"fiveoldstyle": "\uF735",
"fiveparen": "\u2478",
"fiveperiod": "\u248C",
"fivepersian": "\u06F5",
"fiveroman": "\u2174",
"fivesuperior": "\u2075",
"fivethai": "\u0E55",
"fl": "\uFB02",
"florin": "\u0192",
"fmonospace": "\uFF46",
"fmsquare": "\u3399",
"fofanthai": "\u0E1F",
"fofathai": "\u0E1D",
"fongmanthai": "\u0E4F",
"forall": "\u2200",
"four": "\u0034",
"fourarabic": "\u0664",
"fourbengali": "\u09EA",
"fourcircle": "\u2463",
"fourcircleinversesansserif": "\u278D",
"fourdeva": "\u096A",
"fourgujarati": "\u0AEA",
"fourgurmukhi": "\u0A6A",
"fourhackarabic": "\u0664",
"fourhangzhou": "\u3024",
"fourideographicparen": "\u3223",
"fourinferior": "\u2084",
"fourmonospace": "\uFF14",
"fournumeratorbengali": "\u09F7",
"fouroldstyle": "\uF734",
"fourparen": "\u2477",
"fourperiod": "\u248B",
"fourpersian": "\u06F4",
"fourroman": "\u2173",
"foursuperior": "\u2074",
"fourteencircle": "\u246D",
"fourteenparen": "\u2481",
"fourteenperiod": "\u2495",
"fourthai": "\u0E54",
"fourthtonechinese": "\u02CB",
"fparen": "\u24A1",
"fraction": "\u2044",
"franc": "\u20A3",
"g": "\u0067",
"gabengali": "\u0997",
"gacute": "\u01F5",
"gadeva": "\u0917",
"gafarabic": "\u06AF",
"gaffinalarabic": "\uFB93",
"gafinitialarabic": "\uFB94",
"gafmedialarabic": "\uFB95",
"gagujarati": "\u0A97",
"gagurmukhi": "\u0A17",
"gahiragana": "\u304C",
"gakatakana": "\u30AC",
"gamma": "\u03B3",
"gammalatinsmall": "\u0263",
"gammasuperior": "\u02E0",
"gangiacoptic": "\u03EB",
"gbopomofo": "\u310D",
"gbreve": "\u011F",
"gcaron": "\u01E7",
"gcedilla": "\u0123",
"gcircle": "\u24D6",
"gcircumflex": "\u011D",
"gcommaaccent": "\u0123",
"gdot": "\u0121",
"gdotaccent": "\u0121",
"gecyrillic": "\u0433",
"gehiragana": "\u3052",
"gekatakana": "\u30B2",
"geometricallyequal": "\u2251",
"gereshaccenthebrew": "\u059C",
"gereshhebrew": "\u05F3",
"gereshmuqdamhebrew": "\u059D",
"germandbls": "\u00DF",
"gershayimaccenthebrew": "\u059E",
"gershayimhebrew": "\u05F4",
"getamark": "\u3013",
"ghabengali": "\u0998",
"ghadarmenian": "\u0572",
"ghadeva": "\u0918",
"ghagujarati": "\u0A98",
"ghagurmukhi": "\u0A18",
"ghainarabic": "\u063A",
"ghainfinalarabic": "\uFECE",
"ghaininitialarabic": "\uFECF",
"ghainmedialarabic": "\uFED0",
"ghemiddlehookcyrillic": "\u0495",
"ghestrokecyrillic": "\u0493",
"gheupturncyrillic": "\u0491",
"ghhadeva": "\u095A",
"ghhagurmukhi": "\u0A5A",
"ghook": "\u0260",
"ghzsquare": "\u3393",
"gihiragana": "\u304E",
"gikatakana": "\u30AE",
"gimarmenian": "\u0563",
"gimel": "\u05D2",
"gimeldagesh": "\uFB32",
"gimeldageshhebrew": "\uFB32",
"gimelhebrew": "\u05D2",
"gjecyrillic": "\u0453",
"glottalinvertedstroke": "\u01BE",
"glottalstop": "\u0294",
"glottalstopinverted": "\u0296",
"glottalstopmod": "\u02C0",
"glottalstopreversed": "\u0295",
"glottalstopreversedmod": "\u02C1",
"glottalstopreversedsuperior": "\u02E4",
"glottalstopstroke": "\u02A1",
"glottalstopstrokereversed": "\u02A2",
"gmacron": "\u1E21",
"gmonospace": "\uFF47",
"gohiragana": "\u3054",
"gokatakana": "\u30B4",
"gparen": "\u24A2",
"gpasquare": "\u33AC",
"gradient": "\u2207",
"grave": "\u0060",
"gravebelowcmb": "\u0316",
"gravecmb": "\u0300",
"gravecomb": "\u0300",
"gravedeva": "\u0953",
"gravelowmod": "\u02CE",
"gravemonospace": "\uFF40",
"gravetonecmb": "\u0340",
"greater": "\u003E",
"greaterequal": "\u2265",
"greaterequalorless": "\u22DB",
"greatermonospace": "\uFF1E",
"greaterorequivalent": "\u2273",
"greaterorless": "\u2277",
"greateroverequal": "\u2267",
"greatersmall": "\uFE65",
"gscript": "\u0261",
"gstroke": "\u01E5",
"guhiragana": "\u3050",
"guillemotleft": "\u00AB",
"guillemotright": "\u00BB",
"guilsinglleft": "\u2039",
"guilsinglright": "\u203A",
"gukatakana": "\u30B0",
"guramusquare": "\u3318",
"gysquare": "\u33C9",
"h": "\u0068",
"haabkhasiancyrillic": "\u04A9",
"haaltonearabic": "\u06C1",
"habengali": "\u09B9",
"hadescendercyrillic": "\u04B3",
"hadeva": "\u0939",
"hagujarati": "\u0AB9",
"hagurmukhi": "\u0A39",
"haharabic": "\u062D",
"hahfinalarabic": "\uFEA2",
"hahinitialarabic": "\uFEA3",
"hahiragana": "\u306F",
"hahmedialarabic": "\uFEA4",
"haitusquare": "\u332A",
"hakatakana": "\u30CF",
"hakatakanahalfwidth": "\uFF8A",
"halantgurmukhi": "\u0A4D",
"hamzaarabic": "\u0621",
"hamzadammaarabic": "\u0621\u064F",
"hamzadammatanarabic": "\u0621\u064C",
"hamzafathaarabic": "\u0621\u064E",
"hamzafathatanarabic": "\u0621\u064B",
"hamzalowarabic": "\u0621",
"hamzalowkasraarabic": "\u0621\u0650",
"hamzalowkasratanarabic": "\u0621\u064D",
"hamzasukunarabic": "\u0621\u0652",
"hangulfiller": "\u3164",
"hardsigncyrillic": "\u044A",
"harpoonleftbarbup": "\u21BC",
"harpoonrightbarbup": "\u21C0",
"hasquare": "\u33CA",
"hatafpatah": "\u05B2",
"hatafpatah16": "\u05B2",
"hatafpatah23": "\u05B2",
"hatafpatah2f": "\u05B2",
"hatafpatahhebrew": "\u05B2",
"hatafpatahnarrowhebrew": "\u05B2",
"hatafpatahquarterhebrew": "\u05B2",
"hatafpatahwidehebrew": "\u05B2",
"hatafqamats": "\u05B3",
"hatafqamats1b": "\u05B3",
"hatafqamats28": "\u05B3",
"hatafqamats34": "\u05B3",
"hatafqamatshebrew": "\u05B3",
"hatafqamatsnarrowhebrew": "\u05B3",
"hatafqamatsquarterhebrew": "\u05B3",
"hatafqamatswidehebrew": "\u05B3",
"hatafsegol": "\u05B1",
"hatafsegol17": "\u05B1",
"hatafsegol24": "\u05B1",
"hatafsegol30": "\u05B1",
"hatafsegolhebrew": "\u05B1",
"hatafsegolnarrowhebrew": "\u05B1",
"hatafsegolquarterhebrew": "\u05B1",
"hatafsegolwidehebrew": "\u05B1",
"hbar": "\u0127",
"hbopomofo": "\u310F",
"hbrevebelow": "\u1E2B",
"hcedilla": "\u1E29",
"hcircle": "\u24D7",
"hcircumflex": "\u0125",
"hdieresis": "\u1E27",
"hdotaccent": "\u1E23",
"hdotbelow": "\u1E25",
"he": "\u05D4",
"heart": "\u2665",
"heartsuitblack": "\u2665",
"heartsuitwhite": "\u2661",
"hedagesh": "\uFB34",
"hedageshhebrew": "\uFB34",
"hehaltonearabic": "\u06C1",
"heharabic": "\u0647",
"hehebrew": "\u05D4",
"hehfinalaltonearabic": "\uFBA7",
"hehfinalalttwoarabic": "\uFEEA",
"hehfinalarabic": "\uFEEA",
"hehhamzaabovefinalarabic": "\uFBA5",
"hehhamzaaboveisolatedarabic": "\uFBA4",
"hehinitialaltonearabic": "\uFBA8",
"hehinitialarabic": "\uFEEB",
"hehiragana": "\u3078",
"hehmedialaltonearabic": "\uFBA9",
"hehmedialarabic": "\uFEEC",
"heiseierasquare": "\u337B",
"hekatakana": "\u30D8",
"hekatakanahalfwidth": "\uFF8D",
"hekutaarusquare": "\u3336",
"henghook": "\u0267",
"herutusquare": "\u3339",
"het": "\u05D7",
"hethebrew": "\u05D7",
"hhook": "\u0266",
"hhooksuperior": "\u02B1",
"hieuhacirclekorean": "\u327B",
"hieuhaparenkorean": "\u321B",
"hieuhcirclekorean": "\u326D",
"hieuhkorean": "\u314E",
"hieuhparenkorean": "\u320D",
"hihiragana": "\u3072",
"hikatakana": "\u30D2",
"hikatakanahalfwidth": "\uFF8B",
"hiriq": "\u05B4",
"hiriq14": "\u05B4",
"hiriq21": "\u05B4",
"hiriq2d": "\u05B4",
"hiriqhebrew": "\u05B4",
"hiriqnarrowhebrew": "\u05B4",
"hiriqquarterhebrew": "\u05B4",
"hiriqwidehebrew": "\u05B4",
"hlinebelow": "\u1E96",
"hmonospace": "\uFF48",
"hoarmenian": "\u0570",
"hohipthai": "\u0E2B",
"hohiragana": "\u307B",
"hokatakana": "\u30DB",
"hokatakanahalfwidth": "\uFF8E",
"holam": "\u05B9",
"holam19": "\u05B9",
"holam26": "\u05B9",
"holam32": "\u05B9",
"holamhebrew": "\u05B9",
"holamnarrowhebrew": "\u05B9",
"holamquarterhebrew": "\u05B9",
"holamwidehebrew": "\u05B9",
"honokhukthai": "\u0E2E",
"hookabovecomb": "\u0309",
"hookcmb": "\u0309",
"hookpalatalizedbelowcmb": "\u0321",
"hookretroflexbelowcmb": "\u0322",
"hoonsquare": "\u3342",
"horicoptic": "\u03E9",
"horizontalbar": "\u2015",
"horncmb": "\u031B",
"hotsprings": "\u2668",
"house": "\u2302",
"hparen": "\u24A3",
"hsuperior": "\u02B0",
"hturned": "\u0265",
"huhiragana": "\u3075",
"huiitosquare": "\u3333",
"hukatakana": "\u30D5",
"hukatakanahalfwidth": "\uFF8C",
"hungarumlaut": "\u02DD",
"hungarumlautcmb": "\u030B",
"hv": "\u0195",
"hyphen": "\u002D",
"hypheninferior": "\uF6E5",
"hyphenmonospace": "\uFF0D",
"hyphensmall": "\uFE63",
"hyphensuperior": "\uF6E6",
"hyphentwo": "\u2010",
"i": "\u0069",
"iacute": "\u00ED",
"iacyrillic": "\u044F",
"ibengali": "\u0987",
"ibopomofo": "\u3127",
"ibreve": "\u012D",
"icaron": "\u01D0",
"icircle": "\u24D8",
"icircumflex": "\u00EE",
"icyrillic": "\u0456",
"idblgrave": "\u0209",
"ideographearthcircle": "\u328F",
"ideographfirecircle": "\u328B",
"ideographicallianceparen": "\u323F",
"ideographiccallparen": "\u323A",
"ideographiccentrecircle": "\u32A5",
"ideographicclose": "\u3006",
"ideographiccomma": "\u3001",
"ideographiccommaleft": "\uFF64",
"ideographiccongratulationparen": "\u3237",
"ideographiccorrectcircle": "\u32A3",
"ideographicearthparen": "\u322F",
"ideographicenterpriseparen": "\u323D",
"ideographicexcellentcircle": "\u329D",
"ideographicfestivalparen": "\u3240",
"ideographicfinancialcircle": "\u3296",
"ideographicfinancialparen": "\u3236",
"ideographicfireparen": "\u322B",
"ideographichaveparen": "\u3232",
"ideographichighcircle": "\u32A4",
"ideographiciterationmark": "\u3005",
"ideographiclaborcircle": "\u3298",
"ideographiclaborparen": "\u3238",
"ideographicleftcircle": "\u32A7",
"ideographiclowcircle": "\u32A6",
"ideographicmedicinecircle": "\u32A9",
"ideographicmetalparen": "\u322E",
"ideographicmoonparen": "\u322A",
"ideographicnameparen": "\u3234",
"ideographicperiod": "\u3002",
"ideographicprintcircle": "\u329E",
"ideographicreachparen": "\u3243",
"ideographicrepresentparen": "\u3239",
"ideographicresourceparen": "\u323E",
"ideographicrightcircle": "\u32A8",
"ideographicsecretcircle": "\u3299",
"ideographicselfparen": "\u3242",
"ideographicsocietyparen": "\u3233",
"ideographicspace": "\u3000",
"ideographicspecialparen": "\u3235",
"ideographicstockparen": "\u3231",
"ideographicstudyparen": "\u323B",
"ideographicsunparen": "\u3230",
"ideographicsuperviseparen": "\u323C",
"ideographicwaterparen": "\u322C",
"ideographicwoodparen": "\u322D",
"ideographiczero": "\u3007",
"ideographmetalcircle": "\u328E",
"ideographmooncircle": "\u328A",
"ideographnamecircle": "\u3294",
"ideographsuncircle": "\u3290",
"ideographwatercircle": "\u328C",
"ideographwoodcircle": "\u328D",
"ideva": "\u0907",
"idieresis": "\u00EF",
"idieresisacute": "\u1E2F",
"idieresiscyrillic": "\u04E5",
"idotbelow": "\u1ECB",
"iebrevecyrillic": "\u04D7",
"iecyrillic": "\u0435",
"ieungacirclekorean": "\u3275",
"ieungaparenkorean": "\u3215",
"ieungcirclekorean": "\u3267",
"ieungkorean": "\u3147",
"ieungparenkorean": "\u3207",
"igrave": "\u00EC",
"igujarati": "\u0A87",
"igurmukhi": "\u0A07",
"ihiragana": "\u3044",
"ihookabove": "\u1EC9",
"iibengali": "\u0988",
"iicyrillic": "\u0438",
"iideva": "\u0908",
"iigujarati": "\u0A88",
"iigurmukhi": "\u0A08",
"iimatragurmukhi": "\u0A40",
"iinvertedbreve": "\u020B",
"iishortcyrillic": "\u0439",
"iivowelsignbengali": "\u09C0",
"iivowelsigndeva": "\u0940",
"iivowelsigngujarati": "\u0AC0",
"ij": "\u0133",
"ikatakana": "\u30A4",
"ikatakanahalfwidth": "\uFF72",
"ikorean": "\u3163",
"ilde": "\u02DC",
"iluyhebrew": "\u05AC",
"imacron": "\u012B",
"imacroncyrillic": "\u04E3",
"imageorapproximatelyequal": "\u2253",
"imatragurmukhi": "\u0A3F",
"imonospace": "\uFF49",
"increment": "\u2206",
"infinity": "\u221E",
"iniarmenian": "\u056B",
"integral": "\u222B",
"integralbottom": "\u2321",
"integralbt": "\u2321",
"integralex": "\uF8F5",
"integraltop": "\u2320",
"integraltp": "\u2320",
"intersection": "\u2229",
"intisquare": "\u3305",
"invbullet": "\u25D8",
"invcircle": "\u25D9",
"invsmileface": "\u263B",
"iocyrillic": "\u0451",
"iogonek": "\u012F",
"iota": "\u03B9",
"iotadieresis": "\u03CA",
"iotadieresistonos": "\u0390",
"iotalatin": "\u0269",
"iotatonos": "\u03AF",
"iparen": "\u24A4",
"irigurmukhi": "\u0A72",
"ismallhiragana": "\u3043",
"ismallkatakana": "\u30A3",
"ismallkatakanahalfwidth": "\uFF68",
"issharbengali": "\u09FA",
"istroke": "\u0268",
"isuperior": "\uF6ED",
"iterationhiragana": "\u309D",
"iterationkatakana": "\u30FD",
"itilde": "\u0129",
"itildebelow": "\u1E2D",
"iubopomofo": "\u3129",
"iucyrillic": "\u044E",
"ivowelsignbengali": "\u09BF",
"ivowelsigndeva": "\u093F",
"ivowelsigngujarati": "\u0ABF",
"izhitsacyrillic": "\u0475",
"izhitsadblgravecyrillic": "\u0477",
"j": "\u006A",
"jaarmenian": "\u0571",
"jabengali": "\u099C",
"jadeva": "\u091C",
"jagujarati": "\u0A9C",
"jagurmukhi": "\u0A1C",
"jbopomofo": "\u3110",
"jcaron": "\u01F0",
"jcircle": "\u24D9",
"jcircumflex": "\u0135",
"jcrossedtail": "\u029D",
"jdotlessstroke": "\u025F",
"jecyrillic": "\u0458",
"jeemarabic": "\u062C",
"jeemfinalarabic": "\uFE9E",
"jeeminitialarabic": "\uFE9F",
"jeemmedialarabic": "\uFEA0",
"jeharabic": "\u0698",
"jehfinalarabic": "\uFB8B",
"jhabengali": "\u099D",
"jhadeva": "\u091D",
"jhagujarati": "\u0A9D",
"jhagurmukhi": "\u0A1D",
"jheharmenian": "\u057B",
"jis": "\u3004",
"jmonospace": "\uFF4A",
"jparen": "\u24A5",
"jsuperior": "\u02B2",
"k": "\u006B",
"kabashkircyrillic": "\u04A1",
"kabengali": "\u0995",
"kacute": "\u1E31",
"kacyrillic": "\u043A",
"kadescendercyrillic": "\u049B",
"kadeva": "\u0915",
"kaf": "\u05DB",
"kafarabic": "\u0643",
"kafdagesh": "\uFB3B",
"kafdageshhebrew": "\uFB3B",
"kaffinalarabic": "\uFEDA",
"kafhebrew": "\u05DB",
"kafinitialarabic": "\uFEDB",
"kafmedialarabic": "\uFEDC",
"kafrafehebrew": "\uFB4D",
"kagujarati": "\u0A95",
"kagurmukhi": "\u0A15",
"kahiragana": "\u304B",
"kahookcyrillic": "\u04C4",
"kakatakana": "\u30AB",
"kakatakanahalfwidth": "\uFF76",
"kappa": "\u03BA",
"kappasymbolgreek": "\u03F0",
"kapyeounmieumkorean": "\u3171",
"kapyeounphieuphkorean": "\u3184",
"kapyeounpieupkorean": "\u3178",
"kapyeounssangpieupkorean": "\u3179",
"karoriisquare": "\u330D",
"kashidaautoarabic": "\u0640",
"kashidaautonosidebearingarabic": "\u0640",
"kasmallkatakana": "\u30F5",
"kasquare": "\u3384",
"kasraarabic": "\u0650",
"kasratanarabic": "\u064D",
"kastrokecyrillic": "\u049F",
"katahiraprolongmarkhalfwidth": "\uFF70",
"kaverticalstrokecyrillic": "\u049D",
"kbopomofo": "\u310E",
"kcalsquare": "\u3389",
"kcaron": "\u01E9",
"kcedilla": "\u0137",
"kcircle": "\u24DA",
"kcommaaccent": "\u0137",
"kdotbelow": "\u1E33",
"keharmenian": "\u0584",
"kehiragana": "\u3051",
"kekatakana": "\u30B1",
"kekatakanahalfwidth": "\uFF79",
"kenarmenian": "\u056F",
"kesmallkatakana": "\u30F6",
"kgreenlandic": "\u0138",
"khabengali": "\u0996",
"khacyrillic": "\u0445",
"khadeva": "\u0916",
"khagujarati": "\u0A96",
"khagurmukhi": "\u0A16",
"khaharabic": "\u062E",
"khahfinalarabic": "\uFEA6",
"khahinitialarabic": "\uFEA7",
"khahmedialarabic": "\uFEA8",
"kheicoptic": "\u03E7",
"khhadeva": "\u0959",
"khhagurmukhi": "\u0A59",
"khieukhacirclekorean": "\u3278",
"khieukhaparenkorean": "\u3218",
"khieukhcirclekorean": "\u326A",
"khieukhkorean": "\u314B",
"khieukhparenkorean": "\u320A",
"khokhaithai": "\u0E02",
"khokhonthai": "\u0E05",
"khokhuatthai": "\u0E03",
"khokhwaithai": "\u0E04",
"khomutthai": "\u0E5B",
"khook": "\u0199",
"khorakhangthai": "\u0E06",
"khzsquare": "\u3391",
"kihiragana": "\u304D",
"kikatakana": "\u30AD",
"kikatakanahalfwidth": "\uFF77",
"kiroguramusquare": "\u3315",
"kiromeetorusquare": "\u3316",
"kirosquare": "\u3314",
"kiyeokacirclekorean": "\u326E",
"kiyeokaparenkorean": "\u320E",
"kiyeokcirclekorean": "\u3260",
"kiyeokkorean": "\u3131",
"kiyeokparenkorean": "\u3200",
"kiyeoksioskorean": "\u3133",
"kjecyrillic": "\u045C",
"klinebelow": "\u1E35",
"klsquare": "\u3398",
"kmcubedsquare": "\u33A6",
"kmonospace": "\uFF4B",
"kmsquaredsquare": "\u33A2",
"kohiragana": "\u3053",
"kohmsquare": "\u33C0",
"kokaithai": "\u0E01",
"kokatakana": "\u30B3",
"kokatakanahalfwidth": "\uFF7A",
"kooposquare": "\u331E",
"koppacyrillic": "\u0481",
"koreanstandardsymbol": "\u327F",
"koroniscmb": "\u0343",
"kparen": "\u24A6",
"kpasquare": "\u33AA",
"ksicyrillic": "\u046F",
"ktsquare": "\u33CF",
"kturned": "\u029E",
"kuhiragana": "\u304F",
"kukatakana": "\u30AF",
"kukatakanahalfwidth": "\uFF78",
"kvsquare": "\u33B8",
"kwsquare": "\u33BE",
"l": "\u006C",
"labengali": "\u09B2",
"lacute": "\u013A",
"ladeva": "\u0932",
"lagujarati": "\u0AB2",
"lagurmukhi": "\u0A32",
"lakkhangyaothai": "\u0E45",
"lamaleffinalarabic": "\uFEFC",
"lamalefhamzaabovefinalarabic": "\uFEF8",
"lamalefhamzaaboveisolatedarabic": "\uFEF7",
"lamalefhamzabelowfinalarabic": "\uFEFA",
"lamalefhamzabelowisolatedarabic": "\uFEF9",
"lamalefisolatedarabic": "\uFEFB",
"lamalefmaddaabovefinalarabic": "\uFEF6",
"lamalefmaddaaboveisolatedarabic": "\uFEF5",
"lamarabic": "\u0644",
"lambda": "\u03BB",
"lambdastroke": "\u019B",
"lamed": "\u05DC",
"lameddagesh": "\uFB3C",
"lameddageshhebrew": "\uFB3C",
"lamedhebrew": "\u05DC",
"lamedholam": "\u05DC\u05B9",
"lamedholamdagesh": "\u05DC\u05B9\u05BC",
"lamedholamdageshhebrew": "\u05DC\u05B9\u05BC",
"lamedholamhebrew": "\u05DC\u05B9",
"lamfinalarabic": "\uFEDE",
"lamhahinitialarabic": "\uFCCA",
"laminitialarabic": "\uFEDF",
"lamjeeminitialarabic": "\uFCC9",
"lamkhahinitialarabic": "\uFCCB",
"lamlamhehisolatedarabic": "\uFDF2",
"lammedialarabic": "\uFEE0",
"lammeemhahinitialarabic": "\uFD88",
"lammeeminitialarabic": "\uFCCC",
"lammeemjeeminitialarabic": "\uFEDF\uFEE4\uFEA0",
"lammeemkhahinitialarabic": "\uFEDF\uFEE4\uFEA8",
"largecircle": "\u25EF",
"lbar": "\u019A",
"lbelt": "\u026C",
"lbopomofo": "\u310C",
"lcaron": "\u013E",
"lcedilla": "\u013C",
"lcircle": "\u24DB",
"lcircumflexbelow": "\u1E3D",
"lcommaaccent": "\u013C",
"ldot": "\u0140",
"ldotaccent": "\u0140",
"ldotbelow": "\u1E37",
"ldotbelowmacron": "\u1E39",
"leftangleabovecmb": "\u031A",
"lefttackbelowcmb": "\u0318",
"less": "\u003C",
"lessequal": "\u2264",
"lessequalorgreater": "\u22DA",
"lessmonospace": "\uFF1C",
"lessorequivalent": "\u2272",
"lessorgreater": "\u2276",
"lessoverequal": "\u2266",
"lesssmall": "\uFE64",
"lezh": "\u026E",
"lfblock": "\u258C",
"lhookretroflex": "\u026D",
"lira": "\u20A4",
"liwnarmenian": "\u056C",
"lj": "\u01C9",
"ljecyrillic": "\u0459",
"ll": "\uF6C0",
"lladeva": "\u0933",
"llagujarati": "\u0AB3",
"llinebelow": "\u1E3B",
"llladeva": "\u0934",
"llvocalicbengali": "\u09E1",
"llvocalicdeva": "\u0961",
"llvocalicvowelsignbengali": "\u09E3",
"llvocalicvowelsigndeva": "\u0963",
"lmiddletilde": "\u026B",
"lmonospace": "\uFF4C",
"lmsquare": "\u33D0",
"lochulathai": "\u0E2C",
"logicaland": "\u2227",
"logicalnot": "\u00AC",
"logicalnotreversed": "\u2310",
"logicalor": "\u2228",
"lolingthai": "\u0E25",
"longs": "\u017F",
"lowlinecenterline": "\uFE4E",
"lowlinecmb": "\u0332",
"lowlinedashed": "\uFE4D",
"lozenge": "\u25CA",
"lparen": "\u24A7",
"lslash": "\u0142",
"lsquare": "\u2113",
"lsuperior": "\uF6EE",
"ltshade": "\u2591",
"luthai": "\u0E26",
"lvocalicbengali": "\u098C",
"lvocalicdeva": "\u090C",
"lvocalicvowelsignbengali": "\u09E2",
"lvocalicvowelsigndeva": "\u0962",
"lxsquare": "\u33D3",
"m": "\u006D",
"mabengali": "\u09AE",
"macron": "\u00AF",
"macronbelowcmb": "\u0331",
"macroncmb": "\u0304",
"macronlowmod": "\u02CD",
"macronmonospace": "\uFFE3",
"macute": "\u1E3F",
"madeva": "\u092E",
"magujarati": "\u0AAE",
"magurmukhi": "\u0A2E",
"mahapakhhebrew": "\u05A4",
"mahapakhlefthebrew": "\u05A4",
"mahiragana": "\u307E",
"maichattawalowleftthai": "\uF895",
"maichattawalowrightthai": "\uF894",
"maichattawathai": "\u0E4B",
"maichattawaupperleftthai": "\uF893",
"maieklowleftthai": "\uF88C",
"maieklowrightthai": "\uF88B",
"maiekthai": "\u0E48",
"maiekupperleftthai": "\uF88A",
"maihanakatleftthai": "\uF884",
"maihanakatthai": "\u0E31",
"maitaikhuleftthai": "\uF889",
"maitaikhuthai": "\u0E47",
"maitholowleftthai": "\uF88F",
"maitholowrightthai": "\uF88E",
"maithothai": "\u0E49",
"maithoupperleftthai": "\uF88D",
"maitrilowleftthai": "\uF892",
"maitrilowrightthai": "\uF891",
"maitrithai": "\u0E4A",
"maitriupperleftthai": "\uF890",
"maiyamokthai": "\u0E46",
"makatakana": "\u30DE",
"makatakanahalfwidth": "\uFF8F",
"male": "\u2642",
"mansyonsquare": "\u3347",
"maqafhebrew": "\u05BE",
"mars": "\u2642",
"masoracirclehebrew": "\u05AF",
"masquare": "\u3383",
"mbopomofo": "\u3107",
"mbsquare": "\u33D4",
"mcircle": "\u24DC",
"mcubedsquare": "\u33A5",
"mdotaccent": "\u1E41",
"mdotbelow": "\u1E43",
"meemarabic": "\u0645",
"meemfinalarabic": "\uFEE2",
"meeminitialarabic": "\uFEE3",
"meemmedialarabic": "\uFEE4",
"meemmeeminitialarabic": "\uFCD1",
"meemmeemisolatedarabic": "\uFC48",
"meetorusquare": "\u334D",
"mehiragana": "\u3081",
"meizierasquare": "\u337E",
"mekatakana": "\u30E1",
"mekatakanahalfwidth": "\uFF92",
"mem": "\u05DE",
"memdagesh": "\uFB3E",
"memdageshhebrew": "\uFB3E",
"memhebrew": "\u05DE",
"menarmenian": "\u0574",
"merkhahebrew": "\u05A5",
"merkhakefulahebrew": "\u05A6",
"merkhakefulalefthebrew": "\u05A6",
"merkhalefthebrew": "\u05A5",
"mhook": "\u0271",
"mhzsquare": "\u3392",
"middledotkatakanahalfwidth": "\uFF65",
"middot": "\u00B7",
"mieumacirclekorean": "\u3272",
"mieumaparenkorean": "\u3212",
"mieumcirclekorean": "\u3264",
"mieumkorean": "\u3141",
"mieumpansioskorean": "\u3170",
"mieumparenkorean": "\u3204",
"mieumpieupkorean": "\u316E",
"mieumsioskorean": "\u316F",
"mihiragana": "\u307F",
"mikatakana": "\u30DF",
"mikatakanahalfwidth": "\uFF90",
"minus": "\u2212",
"minusbelowcmb": "\u0320",
"minuscircle": "\u2296",
"minusmod": "\u02D7",
"minusplus": "\u2213",
"minute": "\u2032",
"miribaarusquare": "\u334A",
"mirisquare": "\u3349",
"mlonglegturned": "\u0270",
"mlsquare": "\u3396",
"mmcubedsquare": "\u33A3",
"mmonospace": "\uFF4D",
"mmsquaredsquare": "\u339F",
"mohiragana": "\u3082",
"mohmsquare": "\u33C1",
"mokatakana": "\u30E2",
"mokatakanahalfwidth": "\uFF93",
"molsquare": "\u33D6",
"momathai": "\u0E21",
"moverssquare": "\u33A7",
"moverssquaredsquare": "\u33A8",
"mparen": "\u24A8",
"mpasquare": "\u33AB",
"mssquare": "\u33B3",
"msuperior": "\uF6EF",
"mturned": "\u026F",
"mu": "\u00B5",
"mu1": "\u00B5",
"muasquare": "\u3382",
"muchgreater": "\u226B",
"muchless": "\u226A",
"mufsquare": "\u338C",
"mugreek": "\u03BC",
"mugsquare": "\u338D",
"muhiragana": "\u3080",
"mukatakana": "\u30E0",
"mukatakanahalfwidth": "\uFF91",
"mulsquare": "\u3395",
"multiply": "\u00D7",
"mumsquare": "\u339B",
"munahhebrew": "\u05A3",
"munahlefthebrew": "\u05A3",
"musicalnote": "\u266A",
"musicalnotedbl": "\u266B",
"musicflatsign": "\u266D",
"musicsharpsign": "\u266F",
"mussquare": "\u33B2",
"muvsquare": "\u33B6",
"muwsquare": "\u33BC",
"mvmegasquare": "\u33B9",
"mvsquare": "\u33B7",
"mwmegasquare": "\u33BF",
"mwsquare": "\u33BD",
"n": "\u006E",
"nabengali": "\u09A8",
"nabla": "\u2207",
"nacute": "\u0144",
"nadeva": "\u0928",
"nagujarati": "\u0AA8",
"nagurmukhi": "\u0A28",
"nahiragana": "\u306A",
"nakatakana": "\u30CA",
"nakatakanahalfwidth": "\uFF85",
"napostrophe": "\u0149",
"nasquare": "\u3381",
"nbopomofo": "\u310B",
"nbspace": "\u00A0",
"ncaron": "\u0148",
"ncedilla": "\u0146",
"ncircle": "\u24DD",
"ncircumflexbelow": "\u1E4B",
"ncommaaccent": "\u0146",
"ndotaccent": "\u1E45",
"ndotbelow": "\u1E47",
"nehiragana": "\u306D",
"nekatakana": "\u30CD",
"nekatakanahalfwidth": "\uFF88",
"newsheqelsign": "\u20AA",
"nfsquare": "\u338B",
"ngabengali": "\u0999",
"ngadeva": "\u0919",
"ngagujarati": "\u0A99",
"ngagurmukhi": "\u0A19",
"ngonguthai": "\u0E07",
"nhiragana": "\u3093",
"nhookleft": "\u0272",
"nhookretroflex": "\u0273",
"nieunacirclekorean": "\u326F",
"nieunaparenkorean": "\u320F",
"nieuncieuckorean": "\u3135",
"nieuncirclekorean": "\u3261",
"nieunhieuhkorean": "\u3136",
"nieunkorean": "\u3134",
"nieunpansioskorean": "\u3168",
"nieunparenkorean": "\u3201",
"nieunsioskorean": "\u3167",
"nieuntikeutkorean": "\u3166",
"nihiragana": "\u306B",
"nikatakana": "\u30CB",
"nikatakanahalfwidth": "\uFF86",
"nikhahitleftthai": "\uF899",
"nikhahitthai": "\u0E4D",
"nine": "\u0039",
"ninearabic": "\u0669",
"ninebengali": "\u09EF",
"ninecircle": "\u2468",
"ninecircleinversesansserif": "\u2792",
"ninedeva": "\u096F",
"ninegujarati": "\u0AEF",
"ninegurmukhi": "\u0A6F",
"ninehackarabic": "\u0669",
"ninehangzhou": "\u3029",
"nineideographicparen": "\u3228",
"nineinferior": "\u2089",
"ninemonospace": "\uFF19",
"nineoldstyle": "\uF739",
"nineparen": "\u247C",
"nineperiod": "\u2490",
"ninepersian": "\u06F9",
"nineroman": "\u2178",
"ninesuperior": "\u2079",
"nineteencircle": "\u2472",
"nineteenparen": "\u2486",
"nineteenperiod": "\u249A",
"ninethai": "\u0E59",
"nj": "\u01CC",
"njecyrillic": "\u045A",
"nkatakana": "\u30F3",
"nkatakanahalfwidth": "\uFF9D",
"nlegrightlong": "\u019E",
"nlinebelow": "\u1E49",
"nmonospace": "\uFF4E",
"nmsquare": "\u339A",
"nnabengali": "\u09A3",
"nnadeva": "\u0923",
"nnagujarati": "\u0AA3",
"nnagurmukhi": "\u0A23",
"nnnadeva": "\u0929",
"nohiragana": "\u306E",
"nokatakana": "\u30CE",
"nokatakanahalfwidth": "\uFF89",
"nonbreakingspace": "\u00A0",
"nonenthai": "\u0E13",
"nonuthai": "\u0E19",
"noonarabic": "\u0646",
"noonfinalarabic": "\uFEE6",
"noonghunnaarabic": "\u06BA",
"noonghunnafinalarabic": "\uFB9F",
"noonhehinitialarabic": "\uFEE7\uFEEC",
"nooninitialarabic": "\uFEE7",
"noonjeeminitialarabic": "\uFCD2",
"noonjeemisolatedarabic": "\uFC4B",
"noonmedialarabic": "\uFEE8",
"noonmeeminitialarabic": "\uFCD5",
"noonmeemisolatedarabic": "\uFC4E",
"noonnoonfinalarabic": "\uFC8D",
"notcontains": "\u220C",
"notelement": "\u2209",
"notelementof": "\u2209",
"notequal": "\u2260",
"notgreater": "\u226F",
"notgreaternorequal": "\u2271",
"notgreaternorless": "\u2279",
"notidentical": "\u2262",
"notless": "\u226E",
"notlessnorequal": "\u2270",
"notparallel": "\u2226",
"notprecedes": "\u2280",
"notsubset": "\u2284",
"notsucceeds": "\u2281",
"notsuperset": "\u2285",
"nowarmenian": "\u0576",
"nparen": "\u24A9",
"nssquare": "\u33B1",
"nsuperior": "\u207F",
"ntilde": "\u00F1",
"nu": "\u03BD",
"nuhiragana": "\u306C",
"nukatakana": "\u30CC",
"nukatakanahalfwidth": "\uFF87",
"nuktabengali": "\u09BC",
"nuktadeva": "\u093C",
"nuktagujarati": "\u0ABC",
"nuktagurmukhi": "\u0A3C",
"numbersign": "\u0023",
"numbersignmonospace": "\uFF03",
"numbersignsmall": "\uFE5F",
"numeralsigngreek": "\u0374",
"numeralsignlowergreek": "\u0375",
"numero": "\u2116",
"nun": "\u05E0",
"nundagesh": "\uFB40",
"nundageshhebrew": "\uFB40",
"nunhebrew": "\u05E0",
"nvsquare": "\u33B5",
"nwsquare": "\u33BB",
"nyabengali": "\u099E",
"nyadeva": "\u091E",
"nyagujarati": "\u0A9E",
"nyagurmukhi": "\u0A1E",
"o": "\u006F",
"oacute": "\u00F3",
"oangthai": "\u0E2D",
"obarred": "\u0275",
"obarredcyrillic": "\u04E9",
"obarreddieresiscyrillic": "\u04EB",
"obengali": "\u0993",
"obopomofo": "\u311B",
"obreve": "\u014F",
"ocandradeva": "\u0911",
"ocandragujarati": "\u0A91",
"ocandravowelsigndeva": "\u0949",
"ocandravowelsigngujarati": "\u0AC9",
"ocaron": "\u01D2",
"ocircle": "\u24DE",
"ocircumflex": "\u00F4",
"ocircumflexacute": "\u1ED1",
"ocircumflexdotbelow": "\u1ED9",
"ocircumflexgrave": "\u1ED3",
"ocircumflexhookabove": "\u1ED5",
"ocircumflextilde": "\u1ED7",
"ocyrillic": "\u043E",
"odblacute": "\u0151",
"odblgrave": "\u020D",
"odeva": "\u0913",
"odieresis": "\u00F6",
"odieresiscyrillic": "\u04E7",
"odotbelow": "\u1ECD",
"oe": "\u0153",
"oekorean": "\u315A",
"ogonek": "\u02DB",
"ogonekcmb": "\u0328",
"ograve": "\u00F2",
"ogujarati": "\u0A93",
"oharmenian": "\u0585",
"ohiragana": "\u304A",
"ohookabove": "\u1ECF",
"ohorn": "\u01A1",
"ohornacute": "\u1EDB",
"ohorndotbelow": "\u1EE3",
"ohorngrave": "\u1EDD",
"ohornhookabove": "\u1EDF",
"ohorntilde": "\u1EE1",
"ohungarumlaut": "\u0151",
"oi": "\u01A3",
"oinvertedbreve": "\u020F",
"okatakana": "\u30AA",
"okatakanahalfwidth": "\uFF75",
"okorean": "\u3157",
"olehebrew": "\u05AB",
"omacron": "\u014D",
"omacronacute": "\u1E53",
"omacrongrave": "\u1E51",
"omdeva": "\u0950",
"omega": "\u03C9",
"omega1": "\u03D6",
"omegacyrillic": "\u0461",
"omegalatinclosed": "\u0277",
"omegaroundcyrillic": "\u047B",
"omegatitlocyrillic": "\u047D",
"omegatonos": "\u03CE",
"omgujarati": "\u0AD0",
"omicron": "\u03BF",
"omicrontonos": "\u03CC",
"omonospace": "\uFF4F",
"one": "\u0031",
"onearabic": "\u0661",
"onebengali": "\u09E7",
"onecircle": "\u2460",
"onecircleinversesansserif": "\u278A",
"onedeva": "\u0967",
"onedotenleader": "\u2024",
"oneeighth": "\u215B",
"onefitted": "\uF6DC",
"onegujarati": "\u0AE7",
"onegurmukhi": "\u0A67",
"onehackarabic": "\u0661",
"onehalf": "\u00BD",
"onehangzhou": "\u3021",
"oneideographicparen": "\u3220",
"oneinferior": "\u2081",
"onemonospace": "\uFF11",
"onenumeratorbengali": "\u09F4",
"oneoldstyle": "\uF731",
"oneparen": "\u2474",
"oneperiod": "\u2488",
"onepersian": "\u06F1",
"onequarter": "\u00BC",
"oneroman": "\u2170",
"onesuperior": "\u00B9",
"onethai": "\u0E51",
"onethird": "\u2153",
"oogonek": "\u01EB",
"oogonekmacron": "\u01ED",
"oogurmukhi": "\u0A13",
"oomatragurmukhi": "\u0A4B",
"oopen": "\u0254",
"oparen": "\u24AA",
"openbullet": "\u25E6",
"option": "\u2325",
"ordfeminine": "\u00AA",
"ordmasculine": "\u00BA",
"orthogonal": "\u221F",
"oshortdeva": "\u0912",
"oshortvowelsigndeva": "\u094A",
"oslash": "\u00F8",
"oslashacute": "\u01FF",
"osmallhiragana": "\u3049",
"osmallkatakana": "\u30A9",
"osmallkatakanahalfwidth": "\uFF6B",
"ostrokeacute": "\u01FF",
"osuperior": "\uF6F0",
"otcyrillic": "\u047F",
"otilde": "\u00F5",
"otildeacute": "\u1E4D",
"otildedieresis": "\u1E4F",
"oubopomofo": "\u3121",
"overline": "\u203E",
"overlinecenterline": "\uFE4A",
"overlinecmb": "\u0305",
"overlinedashed": "\uFE49",
"overlinedblwavy": "\uFE4C",
"overlinewavy": "\uFE4B",
"overscore": "\u00AF",
"ovowelsignbengali": "\u09CB",
"ovowelsigndeva": "\u094B",
"ovowelsigngujarati": "\u0ACB",
"p": "\u0070",
"paampssquare": "\u3380",
"paasentosquare": "\u332B",
"pabengali": "\u09AA",
"pacute": "\u1E55",
"padeva": "\u092A",
"pagedown": "\u21DF",
"pageup": "\u21DE",
"pagujarati": "\u0AAA",
"pagurmukhi": "\u0A2A",
"pahiragana": "\u3071",
"paiyannoithai": "\u0E2F",
"pakatakana": "\u30D1",
"palatalizationcyrilliccmb": "\u0484",
"palochkacyrillic": "\u04C0",
"pansioskorean": "\u317F",
"paragraph": "\u00B6",
"parallel": "\u2225",
"parenleft": "\u0028",
"parenleftaltonearabic": "\uFD3E",
"parenleftbt": "\uF8ED",
"parenleftex": "\uF8EC",
"parenleftinferior": "\u208D",
"parenleftmonospace": "\uFF08",
"parenleftsmall": "\uFE59",
"parenleftsuperior": "\u207D",
"parenlefttp": "\uF8EB",
"parenleftvertical": "\uFE35",
"parenright": "\u0029",
"parenrightaltonearabic": "\uFD3F",
"parenrightbt": "\uF8F8",
"parenrightex": "\uF8F7",
"parenrightinferior": "\u208E",
"parenrightmonospace": "\uFF09",
"parenrightsmall": "\uFE5A",
"parenrightsuperior": "\u207E",
"parenrighttp": "\uF8F6",
"parenrightvertical": "\uFE36",
"partialdiff": "\u2202",
"paseqhebrew": "\u05C0",
"pashtahebrew": "\u0599",
"pasquare": "\u33A9",
"patah": "\u05B7",
"patah11": "\u05B7",
"patah1d": "\u05B7",
"patah2a": "\u05B7",
"patahhebrew": "\u05B7",
"patahnarrowhebrew": "\u05B7",
"patahquarterhebrew": "\u05B7",
"patahwidehebrew": "\u05B7",
"pazerhebrew": "\u05A1",
"pbopomofo": "\u3106",
"pcircle": "\u24DF",
"pdotaccent": "\u1E57",
"pe": "\u05E4",
"pecyrillic": "\u043F",
"pedagesh": "\uFB44",
"pedageshhebrew": "\uFB44",
"peezisquare": "\u333B",
"pefinaldageshhebrew": "\uFB43",
"peharabic": "\u067E",
"peharmenian": "\u057A",
"pehebrew": "\u05E4",
"pehfinalarabic": "\uFB57",
"pehinitialarabic": "\uFB58",
"pehiragana": "\u307A",
"pehmedialarabic": "\uFB59",
"pekatakana": "\u30DA",
"pemiddlehookcyrillic": "\u04A7",
"perafehebrew": "\uFB4E",
"percent": "\u0025",
"percentarabic": "\u066A",
"percentmonospace": "\uFF05",
"percentsmall": "\uFE6A",
"period": "\u002E",
"periodarmenian": "\u0589",
"periodcentered": "\u00B7",
"periodhalfwidth": "\uFF61",
"periodinferior": "\uF6E7",
"periodmonospace": "\uFF0E",
"periodsmall": "\uFE52",
"periodsuperior": "\uF6E8",
"perispomenigreekcmb": "\u0342",
"perpendicular": "\u22A5",
"perthousand": "\u2030",
"peseta": "\u20A7",
"pfsquare": "\u338A",
"phabengali": "\u09AB",
"phadeva": "\u092B",
"phagujarati": "\u0AAB",
"phagurmukhi": "\u0A2B",
"phi": "\u03C6",
"phi1": "\u03D5",
"phieuphacirclekorean": "\u327A",
"phieuphaparenkorean": "\u321A",
"phieuphcirclekorean": "\u326C",
"phieuphkorean": "\u314D",
"phieuphparenkorean": "\u320C",
"philatin": "\u0278",
"phinthuthai": "\u0E3A",
"phisymbolgreek": "\u03D5",
"phook": "\u01A5",
"phophanthai": "\u0E1E",
"phophungthai": "\u0E1C",
"phosamphaothai": "\u0E20",
"pi": "\u03C0",
"pieupacirclekorean": "\u3273",
"pieupaparenkorean": "\u3213",
"pieupcieuckorean": "\u3176",
"pieupcirclekorean": "\u3265",
"pieupkiyeokkorean": "\u3172",
"pieupkorean": "\u3142",
"pieupparenkorean": "\u3205",
"pieupsioskiyeokkorean": "\u3174",
"pieupsioskorean": "\u3144",
"pieupsiostikeutkorean": "\u3175",
"pieupthieuthkorean": "\u3177",
"pieuptikeutkorean": "\u3173",
"pihiragana": "\u3074",
"pikatakana": "\u30D4",
"pisymbolgreek": "\u03D6",
"piwrarmenian": "\u0583",
"plus": "\u002B",
"plusbelowcmb": "\u031F",
"pluscircle": "\u2295",
"plusminus": "\u00B1",
"plusmod": "\u02D6",
"plusmonospace": "\uFF0B",
"plussmall": "\uFE62",
"plussuperior": "\u207A",
"pmonospace": "\uFF50",
"pmsquare": "\u33D8",
"pohiragana": "\u307D",
"pointingindexdownwhite": "\u261F",
"pointingindexleftwhite": "\u261C",
"pointingindexrightwhite": "\u261E",
"pointingindexupwhite": "\u261D",
"pokatakana": "\u30DD",
"poplathai": "\u0E1B",
"postalmark": "\u3012",
"postalmarkface": "\u3020",
"pparen": "\u24AB",
"precedes": "\u227A",
"prescription": "\u211E",
"primemod": "\u02B9",
"primereversed": "\u2035",
"product": "\u220F",
"projective": "\u2305",
"prolongedkana": "\u30FC",
"propellor": "\u2318",
"propersubset": "\u2282",
"propersuperset": "\u2283",
"proportion": "\u2237",
"proportional": "\u221D",
"psi": "\u03C8",
"psicyrillic": "\u0471",
"psilipneumatacyrilliccmb": "\u0486",
"pssquare": "\u33B0",
"puhiragana": "\u3077",
"pukatakana": "\u30D7",
"pvsquare": "\u33B4",
"pwsquare": "\u33BA",
"q": "\u0071",
"qadeva": "\u0958",
"qadmahebrew": "\u05A8",
"qafarabic": "\u0642",
"qaffinalarabic": "\uFED6",
"qafinitialarabic": "\uFED7",
"qafmedialarabic": "\uFED8",
"qamats": "\u05B8",
"qamats10": "\u05B8",
"qamats1a": "\u05B8",
"qamats1c": "\u05B8",
"qamats27": "\u05B8",
"qamats29": "\u05B8",
"qamats33": "\u05B8",
"qamatsde": "\u05B8",
"qamatshebrew": "\u05B8",
"qamatsnarrowhebrew": "\u05B8",
"qamatsqatanhebrew": "\u05B8",
"qamatsqatannarrowhebrew": "\u05B8",
"qamatsqatanquarterhebrew": "\u05B8",
"qamatsqatanwidehebrew": "\u05B8",
"qamatsquarterhebrew": "\u05B8",
"qamatswidehebrew": "\u05B8",
"qarneyparahebrew": "\u059F",
"qbopomofo": "\u3111",
"qcircle": "\u24E0",
"qhook": "\u02A0",
"qmonospace": "\uFF51",
"qof": "\u05E7",
"qofdagesh": "\uFB47",
"qofdageshhebrew": "\uFB47",
"qofhatafpatah": "\u05E7\u05B2",
"qofhatafpatahhebrew": "\u05E7\u05B2",
"qofhatafsegol": "\u05E7\u05B1",
"qofhatafsegolhebrew": "\u05E7\u05B1",
"qofhebrew": "\u05E7",
"qofhiriq": "\u05E7\u05B4",
"qofhiriqhebrew": "\u05E7\u05B4",
"qofholam": "\u05E7\u05B9",
"qofholamhebrew": "\u05E7\u05B9",
"qofpatah": "\u05E7\u05B7",
"qofpatahhebrew": "\u05E7\u05B7",
"qofqamats": "\u05E7\u05B8",
"qofqamatshebrew": "\u05E7\u05B8",
"qofqubuts": "\u05E7\u05BB",
"qofqubutshebrew": "\u05E7\u05BB",
"qofsegol": "\u05E7\u05B6",
"qofsegolhebrew": "\u05E7\u05B6",
"qofsheva": "\u05E7\u05B0",
"qofshevahebrew": "\u05E7\u05B0",
"qoftsere": "\u05E7\u05B5",
"qoftserehebrew": "\u05E7\u05B5",
"qparen": "\u24AC",
"quarternote": "\u2669",
"qubuts": "\u05BB",
"qubuts18": "\u05BB",
"qubuts25": "\u05BB",
"qubuts31": "\u05BB",
"qubutshebrew": "\u05BB",
"qubutsnarrowhebrew": "\u05BB",
"qubutsquarterhebrew": "\u05BB",
"qubutswidehebrew": "\u05BB",
"question": "\u003F",
"questionarabic": "\u061F",
"questionarmenian": "\u055E",
"questiondown": "\u00BF",
"questiondownsmall": "\uF7BF",
"questiongreek": "\u037E",
"questionmonospace": "\uFF1F",
"questionsmall": "\uF73F",
"quotedbl": "\u0022",
"quotedblbase": "\u201E",
"quotedblleft": "\u201C",
"quotedblmonospace": "\uFF02",
"quotedblprime": "\u301E",
"quotedblprimereversed": "\u301D",
"quotedblright": "\u201D",
"quoteleft": "\u2018",
"quoteleftreversed": "\u201B",
"quotereversed": "\u201B",
"quoteright": "\u2019",
"quoterightn": "\u0149",
"quotesinglbase": "\u201A",
"quotesingle": "\u0027",
"quotesinglemonospace": "\uFF07",
"r": "\u0072",
"raarmenian": "\u057C",
"rabengali": "\u09B0",
"racute": "\u0155",
"radeva": "\u0930",
"radical": "\u221A",
"radicalex": "\uF8E5",
"radoverssquare": "\u33AE",
"radoverssquaredsquare": "\u33AF",
"radsquare": "\u33AD",
"rafe": "\u05BF",
"rafehebrew": "\u05BF",
"ragujarati": "\u0AB0",
"ragurmukhi": "\u0A30",
"rahiragana": "\u3089",
"rakatakana": "\u30E9",
"rakatakanahalfwidth": "\uFF97",
"ralowerdiagonalbengali": "\u09F1",
"ramiddlediagonalbengali": "\u09F0",
"ramshorn": "\u0264",
"ratio": "\u2236",
"rbopomofo": "\u3116",
"rcaron": "\u0159",
"rcedilla": "\u0157",
"rcircle": "\u24E1",
"rcommaaccent": "\u0157",
"rdblgrave": "\u0211",
"rdotaccent": "\u1E59",
"rdotbelow": "\u1E5B",
"rdotbelowmacron": "\u1E5D",
"referencemark": "\u203B",
"reflexsubset": "\u2286",
"reflexsuperset": "\u2287",
"registered": "\u00AE",
"registersans": "\uF8E8",
"registerserif": "\uF6DA",
"reharabic": "\u0631",
"reharmenian": "\u0580",
"rehfinalarabic": "\uFEAE",
"rehiragana": "\u308C",
"rehyehaleflamarabic": "\u0631\uFEF3\uFE8E\u0644",
"rekatakana": "\u30EC",
"rekatakanahalfwidth": "\uFF9A",
"resh": "\u05E8",
"reshdageshhebrew": "\uFB48",
"reshhatafpatah": "\u05E8\u05B2",
"reshhatafpatahhebrew": "\u05E8\u05B2",
"reshhatafsegol": "\u05E8\u05B1",
"reshhatafsegolhebrew": "\u05E8\u05B1",
"reshhebrew": "\u05E8",
"reshhiriq": "\u05E8\u05B4",
"reshhiriqhebrew": "\u05E8\u05B4",
"reshholam": "\u05E8\u05B9",
"reshholamhebrew": "\u05E8\u05B9",
"reshpatah": "\u05E8\u05B7",
"reshpatahhebrew": "\u05E8\u05B7",
"reshqamats": "\u05E8\u05B8",
"reshqamatshebrew": "\u05E8\u05B8",
"reshqubuts": "\u05E8\u05BB",
"reshqubutshebrew": "\u05E8\u05BB",
"reshsegol": "\u05E8\u05B6",
"reshsegolhebrew": "\u05E8\u05B6",
"reshsheva": "\u05E8\u05B0",
"reshshevahebrew": "\u05E8\u05B0",
"reshtsere": "\u05E8\u05B5",
"reshtserehebrew": "\u05E8\u05B5",
"reversedtilde": "\u223D",
"reviahebrew": "\u0597",
"reviamugrashhebrew": "\u0597",
"revlogicalnot": "\u2310",
"rfishhook": "\u027E",
"rfishhookreversed": "\u027F",
"rhabengali": "\u09DD",
"rhadeva": "\u095D",
"rho": "\u03C1",
"rhook": "\u027D",
"rhookturned": "\u027B",
"rhookturnedsuperior": "\u02B5",
"rhosymbolgreek": "\u03F1",
"rhotichookmod": "\u02DE",
"rieulacirclekorean": "\u3271",
"rieulaparenkorean": "\u3211",
"rieulcirclekorean": "\u3263",
"rieulhieuhkorean": "\u3140",
"rieulkiyeokkorean": "\u313A",
"rieulkiyeoksioskorean": "\u3169",
"rieulkorean": "\u3139",
"rieulmieumkorean": "\u313B",
"rieulpansioskorean": "\u316C",
"rieulparenkorean": "\u3203",
"rieulphieuphkorean": "\u313F",
"rieulpieupkorean": "\u313C",
"rieulpieupsioskorean": "\u316B",
"rieulsioskorean": "\u313D",
"rieulthieuthkorean": "\u313E",
"rieultikeutkorean": "\u316A",
"rieulyeorinhieuhkorean": "\u316D",
"rightangle": "\u221F",
"righttackbelowcmb": "\u0319",
"righttriangle": "\u22BF",
"rihiragana": "\u308A",
"rikatakana": "\u30EA",
"rikatakanahalfwidth": "\uFF98",
"ring": "\u02DA",
"ringbelowcmb": "\u0325",
"ringcmb": "\u030A",
"ringhalfleft": "\u02BF",
"ringhalfleftarmenian": "\u0559",
"ringhalfleftbelowcmb": "\u031C",
"ringhalfleftcentered": "\u02D3",
"ringhalfright": "\u02BE",
"ringhalfrightbelowcmb": "\u0339",
"ringhalfrightcentered": "\u02D2",
"rinvertedbreve": "\u0213",
"rittorusquare": "\u3351",
"rlinebelow": "\u1E5F",
"rlongleg": "\u027C",
"rlonglegturned": "\u027A",
"rmonospace": "\uFF52",
"rohiragana": "\u308D",
"rokatakana": "\u30ED",
"rokatakanahalfwidth": "\uFF9B",
"roruathai": "\u0E23",
"rparen": "\u24AD",
"rrabengali": "\u09DC",
"rradeva": "\u0931",
"rragurmukhi": "\u0A5C",
"rreharabic": "\u0691",
"rrehfinalarabic": "\uFB8D",
"rrvocalicbengali": "\u09E0",
"rrvocalicdeva": "\u0960",
"rrvocalicgujarati": "\u0AE0",
"rrvocalicvowelsignbengali": "\u09C4",
"rrvocalicvowelsigndeva": "\u0944",
"rrvocalicvowelsigngujarati": "\u0AC4",
"rsuperior": "\uF6F1",
"rtblock": "\u2590",
"rturned": "\u0279",
"rturnedsuperior": "\u02B4",
"ruhiragana": "\u308B",
"rukatakana": "\u30EB",
"rukatakanahalfwidth": "\uFF99",
"rupeemarkbengali": "\u09F2",
"rupeesignbengali": "\u09F3",
"rupiah": "\uF6DD",
"ruthai": "\u0E24",
"rvocalicbengali": "\u098B",
"rvocalicdeva": "\u090B",
"rvocalicgujarati": "\u0A8B",
"rvocalicvowelsignbengali": "\u09C3",
"rvocalicvowelsigndeva": "\u0943",
"rvocalicvowelsigngujarati": "\u0AC3",
"s": "\u0073",
"sabengali": "\u09B8",
"sacute": "\u015B",
"sacutedotaccent": "\u1E65",
"sadarabic": "\u0635",
"sadeva": "\u0938",
"sadfinalarabic": "\uFEBA",
"sadinitialarabic": "\uFEBB",
"sadmedialarabic": "\uFEBC",
"sagujarati": "\u0AB8",
"sagurmukhi": "\u0A38",
"sahiragana": "\u3055",
"sakatakana": "\u30B5",
"sakatakanahalfwidth": "\uFF7B",
"sallallahoualayhewasallamarabic": "\uFDFA",
"samekh": "\u05E1",
"samekhdagesh": "\uFB41",
"samekhdageshhebrew": "\uFB41",
"samekhhebrew": "\u05E1",
"saraaathai": "\u0E32",
"saraaethai": "\u0E41",
"saraaimaimalaithai": "\u0E44",
"saraaimaimuanthai": "\u0E43",
"saraamthai": "\u0E33",
"saraathai": "\u0E30",
"saraethai": "\u0E40",
"saraiileftthai": "\uF886",
"saraiithai": "\u0E35",
"saraileftthai": "\uF885",
"saraithai": "\u0E34",
"saraothai": "\u0E42",
"saraueeleftthai": "\uF888",
"saraueethai": "\u0E37",
"saraueleftthai": "\uF887",
"sarauethai": "\u0E36",
"sarauthai": "\u0E38",
"sarauuthai": "\u0E39",
"sbopomofo": "\u3119",
"scaron": "\u0161",
"scarondotaccent": "\u1E67",
"scedilla": "\u015F",
"schwa": "\u0259",
"schwacyrillic": "\u04D9",
"schwadieresiscyrillic": "\u04DB",
"schwahook": "\u025A",
"scircle": "\u24E2",
"scircumflex": "\u015D",
"scommaaccent": "\u0219",
"sdotaccent": "\u1E61",
"sdotbelow": "\u1E63",
"sdotbelowdotaccent": "\u1E69",
"seagullbelowcmb": "\u033C",
"second": "\u2033",
"secondtonechinese": "\u02CA",
"section": "\u00A7",
"seenarabic": "\u0633",
"seenfinalarabic": "\uFEB2",
"seeninitialarabic": "\uFEB3",
"seenmedialarabic": "\uFEB4",
"segol": "\u05B6",
"segol13": "\u05B6",
"segol1f": "\u05B6",
"segol2c": "\u05B6",
"segolhebrew": "\u05B6",
"segolnarrowhebrew": "\u05B6",
"segolquarterhebrew": "\u05B6",
"segoltahebrew": "\u0592",
"segolwidehebrew": "\u05B6",
"seharmenian": "\u057D",
"sehiragana": "\u305B",
"sekatakana": "\u30BB",
"sekatakanahalfwidth": "\uFF7E",
"semicolon": "\u003B",
"semicolonarabic": "\u061B",
"semicolonmonospace": "\uFF1B",
"semicolonsmall": "\uFE54",
"semivoicedmarkkana": "\u309C",
"semivoicedmarkkanahalfwidth": "\uFF9F",
"sentisquare": "\u3322",
"sentosquare": "\u3323",
"seven": "\u0037",
"sevenarabic": "\u0667",
"sevenbengali": "\u09ED",
"sevencircle": "\u2466",
"sevencircleinversesansserif": "\u2790",
"sevendeva": "\u096D",
"seveneighths": "\u215E",
"sevengujarati": "\u0AED",
"sevengurmukhi": "\u0A6D",
"sevenhackarabic": "\u0667",
"sevenhangzhou": "\u3027",
"sevenideographicparen": "\u3226",
"seveninferior": "\u2087",
"sevenmonospace": "\uFF17",
"sevenoldstyle": "\uF737",
"sevenparen": "\u247A",
"sevenperiod": "\u248E",
"sevenpersian": "\u06F7",
"sevenroman": "\u2176",
"sevensuperior": "\u2077",
"seventeencircle": "\u2470",
"seventeenparen": "\u2484",
"seventeenperiod": "\u2498",
"seventhai": "\u0E57",
"sfthyphen": "\u00AD",
"shaarmenian": "\u0577",
"shabengali": "\u09B6",
"shacyrillic": "\u0448",
"shaddaarabic": "\u0651",
"shaddadammaarabic": "\uFC61",
"shaddadammatanarabic": "\uFC5E",
"shaddafathaarabic": "\uFC60",
"shaddafathatanarabic": "\u0651\u064B",
"shaddakasraarabic": "\uFC62",
"shaddakasratanarabic": "\uFC5F",
"shade": "\u2592",
"shadedark": "\u2593",
"shadelight": "\u2591",
"shademedium": "\u2592",
"shadeva": "\u0936",
"shagujarati": "\u0AB6",
"shagurmukhi": "\u0A36",
"shalshelethebrew": "\u0593",
"shbopomofo": "\u3115",
"shchacyrillic": "\u0449",
"sheenarabic": "\u0634",
"sheenfinalarabic": "\uFEB6",
"sheeninitialarabic": "\uFEB7",
"sheenmedialarabic": "\uFEB8",
"sheicoptic": "\u03E3",
"sheqel": "\u20AA",
"sheqelhebrew": "\u20AA",
"sheva": "\u05B0",
"sheva115": "\u05B0",
"sheva15": "\u05B0",
"sheva22": "\u05B0",
"sheva2e": "\u05B0",
"shevahebrew": "\u05B0",
"shevanarrowhebrew": "\u05B0",
"shevaquarterhebrew": "\u05B0",
"shevawidehebrew": "\u05B0",
"shhacyrillic": "\u04BB",
"shimacoptic": "\u03ED",
"shin": "\u05E9",
"shindagesh": "\uFB49",
"shindageshhebrew": "\uFB49",
"shindageshshindot": "\uFB2C",
"shindageshshindothebrew": "\uFB2C",
"shindageshsindot": "\uFB2D",
"shindageshsindothebrew": "\uFB2D",
"shindothebrew": "\u05C1",
"shinhebrew": "\u05E9",
"shinshindot": "\uFB2A",
"shinshindothebrew": "\uFB2A",
"shinsindot": "\uFB2B",
"shinsindothebrew": "\uFB2B",
"shook": "\u0282",
"sigma": "\u03C3",
"sigma1": "\u03C2",
"sigmafinal": "\u03C2",
"sigmalunatesymbolgreek": "\u03F2",
"sihiragana": "\u3057",
"sikatakana": "\u30B7",
"sikatakanahalfwidth": "\uFF7C",
"siluqhebrew": "\u05BD",
"siluqlefthebrew": "\u05BD",
"similar": "\u223C",
"sindothebrew": "\u05C2",
"siosacirclekorean": "\u3274",
"siosaparenkorean": "\u3214",
"sioscieuckorean": "\u317E",
"sioscirclekorean": "\u3266",
"sioskiyeokkorean": "\u317A",
"sioskorean": "\u3145",
"siosnieunkorean": "\u317B",
"siosparenkorean": "\u3206",
"siospieupkorean": "\u317D",
"siostikeutkorean": "\u317C",
"six": "\u0036",
"sixarabic": "\u0666",
"sixbengali": "\u09EC",
"sixcircle": "\u2465",
"sixcircleinversesansserif": "\u278F",
"sixdeva": "\u096C",
"sixgujarati": "\u0AEC",
"sixgurmukhi": "\u0A6C",
"sixhackarabic": "\u0666",
"sixhangzhou": "\u3026",
"sixideographicparen": "\u3225",
"sixinferior": "\u2086",
"sixmonospace": "\uFF16",
"sixoldstyle": "\uF736",
"sixparen": "\u2479",
"sixperiod": "\u248D",
"sixpersian": "\u06F6",
"sixroman": "\u2175",
"sixsuperior": "\u2076",
"sixteencircle": "\u246F",
"sixteencurrencydenominatorbengali": "\u09F9",
"sixteenparen": "\u2483",
"sixteenperiod": "\u2497",
"sixthai": "\u0E56",
"slash": "\u002F",
"slashmonospace": "\uFF0F",
"slong": "\u017F",
"slongdotaccent": "\u1E9B",
"smileface": "\u263A",
"smonospace": "\uFF53",
"sofpasuqhebrew": "\u05C3",
"softhyphen": "\u00AD",
"softsigncyrillic": "\u044C",
"sohiragana": "\u305D",
"sokatakana": "\u30BD",
"sokatakanahalfwidth": "\uFF7F",
"soliduslongoverlaycmb": "\u0338",
"solidusshortoverlaycmb": "\u0337",
"sorusithai": "\u0E29",
"sosalathai": "\u0E28",
"sosothai": "\u0E0B",
"sosuathai": "\u0E2A",
"space": "\u0020",
"spacehackarabic": "\u0020",
"spade": "\u2660",
"spadesuitblack": "\u2660",
"spadesuitwhite": "\u2664",
"sparen": "\u24AE",
"squarebelowcmb": "\u033B",
"squarecc": "\u33C4",
"squarecm": "\u339D",
"squarediagonalcrosshatchfill": "\u25A9",
"squarehorizontalfill": "\u25A4",
"squarekg": "\u338F",
"squarekm": "\u339E",
"squarekmcapital": "\u33CE",
"squareln": "\u33D1",
"squarelog": "\u33D2",
"squaremg": "\u338E",
"squaremil": "\u33D5",
"squaremm": "\u339C",
"squaremsquared": "\u33A1",
"squareorthogonalcrosshatchfill": "\u25A6",
"squareupperlefttolowerrightfill": "\u25A7",
"squareupperrighttolowerleftfill": "\u25A8",
"squareverticalfill": "\u25A5",
"squarewhitewithsmallblack": "\u25A3",
"srsquare": "\u33DB",
"ssabengali": "\u09B7",
"ssadeva": "\u0937",
"ssagujarati": "\u0AB7",
"ssangcieuckorean": "\u3149",
"ssanghieuhkorean": "\u3185",
"ssangieungkorean": "\u3180",
"ssangkiyeokkorean": "\u3132",
"ssangnieunkorean": "\u3165",
"ssangpieupkorean": "\u3143",
"ssangsioskorean": "\u3146",
"ssangtikeutkorean": "\u3138",
"ssuperior": "\uF6F2",
"sterling": "\u00A3",
"sterlingmonospace": "\uFFE1",
"strokelongoverlaycmb": "\u0336",
"strokeshortoverlaycmb": "\u0335",
"subset": "\u2282",
"subsetnotequal": "\u228A",
"subsetorequal": "\u2286",
"succeeds": "\u227B",
"suchthat": "\u220B",
"suhiragana": "\u3059",
"sukatakana": "\u30B9",
"sukatakanahalfwidth": "\uFF7D",
"sukunarabic": "\u0652",
"summation": "\u2211",
"sun": "\u263C",
"superset": "\u2283",
"supersetnotequal": "\u228B",
"supersetorequal": "\u2287",
"svsquare": "\u33DC",
"syouwaerasquare": "\u337C",
"t": "\u0074",
"tabengali": "\u09A4",
"tackdown": "\u22A4",
"tackleft": "\u22A3",
"tadeva": "\u0924",
"tagujarati": "\u0AA4",
"tagurmukhi": "\u0A24",
"taharabic": "\u0637",
"tahfinalarabic": "\uFEC2",
"tahinitialarabic": "\uFEC3",
"tahiragana": "\u305F",
"tahmedialarabic": "\uFEC4",
"taisyouerasquare": "\u337D",
"takatakana": "\u30BF",
"takatakanahalfwidth": "\uFF80",
"tatweelarabic": "\u0640",
"tau": "\u03C4",
"tav": "\u05EA",
"tavdages": "\uFB4A",
"tavdagesh": "\uFB4A",
"tavdageshhebrew": "\uFB4A",
"tavhebrew": "\u05EA",
"tbar": "\u0167",
"tbopomofo": "\u310A",
"tcaron": "\u0165",
"tccurl": "\u02A8",
"tcedilla": "\u0163",
"tcheharabic": "\u0686",
"tchehfinalarabic": "\uFB7B",
"tchehinitialarabic": "\uFB7C",
"tchehmedialarabic": "\uFB7D",
"tchehmeeminitialarabic": "\uFB7C\uFEE4",
"tcircle": "\u24E3",
"tcircumflexbelow": "\u1E71",
"tcommaaccent": "\u0163",
"tdieresis": "\u1E97",
"tdotaccent": "\u1E6B",
"tdotbelow": "\u1E6D",
"tecyrillic": "\u0442",
"tedescendercyrillic": "\u04AD",
"teharabic": "\u062A",
"tehfinalarabic": "\uFE96",
"tehhahinitialarabic": "\uFCA2",
"tehhahisolatedarabic": "\uFC0C",
"tehinitialarabic": "\uFE97",
"tehiragana": "\u3066",
"tehjeeminitialarabic": "\uFCA1",
"tehjeemisolatedarabic": "\uFC0B",
"tehmarbutaarabic": "\u0629",
"tehmarbutafinalarabic": "\uFE94",
"tehmedialarabic": "\uFE98",
"tehmeeminitialarabic": "\uFCA4",
"tehmeemisolatedarabic": "\uFC0E",
"tehnoonfinalarabic": "\uFC73",
"tekatakana": "\u30C6",
"tekatakanahalfwidth": "\uFF83",
"telephone": "\u2121",
"telephoneblack": "\u260E",
"telishagedolahebrew": "\u05A0",
"telishaqetanahebrew": "\u05A9",
"tencircle": "\u2469",
"tenideographicparen": "\u3229",
"tenparen": "\u247D",
"tenperiod": "\u2491",
"tenroman": "\u2179",
"tesh": "\u02A7",
"tet": "\u05D8",
"tetdagesh": "\uFB38",
"tetdageshhebrew": "\uFB38",
"tethebrew": "\u05D8",
"tetsecyrillic": "\u04B5",
"tevirhebrew": "\u059B",
"tevirlefthebrew": "\u059B",
"thabengali": "\u09A5",
"thadeva": "\u0925",
"thagujarati": "\u0AA5",
"thagurmukhi": "\u0A25",
"thalarabic": "\u0630",
"thalfinalarabic": "\uFEAC",
"thanthakhatlowleftthai": "\uF898",
"thanthakhatlowrightthai": "\uF897",
"thanthakhatthai": "\u0E4C",
"thanthakhatupperleftthai": "\uF896",
"theharabic": "\u062B",
"thehfinalarabic": "\uFE9A",
"thehinitialarabic": "\uFE9B",
"thehmedialarabic": "\uFE9C",
"thereexists": "\u2203",
"therefore": "\u2234",
"theta": "\u03B8",
"theta1": "\u03D1",
"thetasymbolgreek": "\u03D1",
"thieuthacirclekorean": "\u3279",
"thieuthaparenkorean": "\u3219",
"thieuthcirclekorean": "\u326B",
"thieuthkorean": "\u314C",
"thieuthparenkorean": "\u320B",
"thirteencircle": "\u246C",
"thirteenparen": "\u2480",
"thirteenperiod": "\u2494",
"thonangmonthothai": "\u0E11",
"thook": "\u01AD",
"thophuthaothai": "\u0E12",
"thorn": "\u00FE",
"thothahanthai": "\u0E17",
"thothanthai": "\u0E10",
"thothongthai": "\u0E18",
"thothungthai": "\u0E16",
"thousandcyrillic": "\u0482",
"thousandsseparatorarabic": "\u066C",
"thousandsseparatorpersian": "\u066C",
"three": "\u0033",
"threearabic": "\u0663",
"threebengali": "\u09E9",
"threecircle": "\u2462",
"threecircleinversesansserif": "\u278C",
"threedeva": "\u0969",
"threeeighths": "\u215C",
"threegujarati": "\u0AE9",
"threegurmukhi": "\u0A69",
"threehackarabic": "\u0663",
"threehangzhou": "\u3023",
"threeideographicparen": "\u3222",
"threeinferior": "\u2083",
"threemonospace": "\uFF13",
"threenumeratorbengali": "\u09F6",
"threeoldstyle": "\uF733",
"threeparen": "\u2476",
"threeperiod": "\u248A",
"threepersian": "\u06F3",
"threequarters": "\u00BE",
"threequartersemdash": "\uF6DE",
"threeroman": "\u2172",
"threesuperior": "\u00B3",
"threethai": "\u0E53",
"thzsquare": "\u3394",
"tihiragana": "\u3061",
"tikatakana": "\u30C1",
"tikatakanahalfwidth": "\uFF81",
"tikeutacirclekorean": "\u3270",
"tikeutaparenkorean": "\u3210",
"tikeutcirclekorean": "\u3262",
"tikeutkorean": "\u3137",
"tikeutparenkorean": "\u3202",
"tilde": "\u02DC",
"tildebelowcmb": "\u0330",
"tildecmb": "\u0303",
"tildecomb": "\u0303",
"tildedoublecmb": "\u0360",
"tildeoperator": "\u223C",
"tildeoverlaycmb": "\u0334",
"tildeverticalcmb": "\u033E",
"timescircle": "\u2297",
"tipehahebrew": "\u0596",
"tipehalefthebrew": "\u0596",
"tippigurmukhi": "\u0A70",
"titlocyrilliccmb": "\u0483",
"tiwnarmenian": "\u057F",
"tlinebelow": "\u1E6F",
"tmonospace": "\uFF54",
"toarmenian": "\u0569",
"tohiragana": "\u3068",
"tokatakana": "\u30C8",
"tokatakanahalfwidth": "\uFF84",
"tonebarextrahighmod": "\u02E5",
"tonebarextralowmod": "\u02E9",
"tonebarhighmod": "\u02E6",
"tonebarlowmod": "\u02E8",
"tonebarmidmod": "\u02E7",
"tonefive": "\u01BD",
"tonesix": "\u0185",
"tonetwo": "\u01A8",
"tonos": "\u0384",
"tonsquare": "\u3327",
"topatakthai": "\u0E0F",
"tortoiseshellbracketleft": "\u3014",
"tortoiseshellbracketleftsmall": "\uFE5D",
"tortoiseshellbracketleftvertical": "\uFE39",
"tortoiseshellbracketright": "\u3015",
"tortoiseshellbracketrightsmall": "\uFE5E",
"tortoiseshellbracketrightvertical": "\uFE3A",
"totaothai": "\u0E15",
"tpalatalhook": "\u01AB",
"tparen": "\u24AF",
"trademark": "\u2122",
"trademarksans": "\uF8EA",
"trademarkserif": "\uF6DB",
"tretroflexhook": "\u0288",
"triagdn": "\u25BC",
"triaglf": "\u25C4",
"triagrt": "\u25BA",
"triagup": "\u25B2",
"ts": "\u02A6",
"tsadi": "\u05E6",
"tsadidagesh": "\uFB46",
"tsadidageshhebrew": "\uFB46",
"tsadihebrew": "\u05E6",
"tsecyrillic": "\u0446",
"tsere": "\u05B5",
"tsere12": "\u05B5",
"tsere1e": "\u05B5",
"tsere2b": "\u05B5",
"tserehebrew": "\u05B5",
"tserenarrowhebrew": "\u05B5",
"tserequarterhebrew": "\u05B5",
"tserewidehebrew": "\u05B5",
"tshecyrillic": "\u045B",
"tsuperior": "\uF6F3",
"ttabengali": "\u099F",
"ttadeva": "\u091F",
"ttagujarati": "\u0A9F",
"ttagurmukhi": "\u0A1F",
"tteharabic": "\u0679",
"ttehfinalarabic": "\uFB67",
"ttehinitialarabic": "\uFB68",
"ttehmedialarabic": "\uFB69",
"tthabengali": "\u09A0",
"tthadeva": "\u0920",
"tthagujarati": "\u0AA0",
"tthagurmukhi": "\u0A20",
"tturned": "\u0287",
"tuhiragana": "\u3064",
"tukatakana": "\u30C4",
"tukatakanahalfwidth": "\uFF82",
"tusmallhiragana": "\u3063",
"tusmallkatakana": "\u30C3",
"tusmallkatakanahalfwidth": "\uFF6F",
"twelvecircle": "\u246B",
"twelveparen": "\u247F",
"twelveperiod": "\u2493",
"twelveroman": "\u217B",
"twentycircle": "\u2473",
"twentyhangzhou": "\u5344",
"twentyparen": "\u2487",
"twentyperiod": "\u249B",
"two": "\u0032",
"twoarabic": "\u0662",
"twobengali": "\u09E8",
"twocircle": "\u2461",
"twocircleinversesansserif": "\u278B",
"twodeva": "\u0968",
"twodotenleader": "\u2025",
"twodotleader": "\u2025",
"twodotleadervertical": "\uFE30",
"twogujarati": "\u0AE8",
"twogurmukhi": "\u0A68",
"twohackarabic": "\u0662",
"twohangzhou": "\u3022",
"twoideographicparen": "\u3221",
"twoinferior": "\u2082",
"twomonospace": "\uFF12",
"twonumeratorbengali": "\u09F5",
"twooldstyle": "\uF732",
"twoparen": "\u2475",
"twoperiod": "\u2489",
"twopersian": "\u06F2",
"tworoman": "\u2171",
"twostroke": "\u01BB",
"twosuperior": "\u00B2",
"twothai": "\u0E52",
"twothirds": "\u2154",
"u": "\u0075",
"uacute": "\u00FA",
"ubar": "\u0289",
"ubengali": "\u0989",
"ubopomofo": "\u3128",
"ubreve": "\u016D",
"ucaron": "\u01D4",
"ucircle": "\u24E4",
"ucircumflex": "\u00FB",
"ucircumflexbelow": "\u1E77",
"ucyrillic": "\u0443",
"udattadeva": "\u0951",
"udblacute": "\u0171",
"udblgrave": "\u0215",
"udeva": "\u0909",
"udieresis": "\u00FC",
"udieresisacute": "\u01D8",
"udieresisbelow": "\u1E73",
"udieresiscaron": "\u01DA",
"udieresiscyrillic": "\u04F1",
"udieresisgrave": "\u01DC",
"udieresismacron": "\u01D6",
"udotbelow": "\u1EE5",
"ugrave": "\u00F9",
"ugujarati": "\u0A89",
"ugurmukhi": "\u0A09",
"uhiragana": "\u3046",
"uhookabove": "\u1EE7",
"uhorn": "\u01B0",
"uhornacute": "\u1EE9",
"uhorndotbelow": "\u1EF1",
"uhorngrave": "\u1EEB",
"uhornhookabove": "\u1EED",
"uhorntilde": "\u1EEF",
"uhungarumlaut": "\u0171",
"uhungarumlautcyrillic": "\u04F3",
"uinvertedbreve": "\u0217",
"ukatakana": "\u30A6",
"ukatakanahalfwidth": "\uFF73",
"ukcyrillic": "\u0479",
"ukorean": "\u315C",
"umacron": "\u016B",
"umacroncyrillic": "\u04EF",
"umacrondieresis": "\u1E7B",
"umatragurmukhi": "\u0A41",
"umonospace": "\uFF55",
"underscore": "\u005F",
"underscoredbl": "\u2017",
"underscoremonospace": "\uFF3F",
"underscorevertical": "\uFE33",
"underscorewavy": "\uFE4F",
"union": "\u222A",
"universal": "\u2200",
"uogonek": "\u0173",
"uparen": "\u24B0",
"upblock": "\u2580",
"upperdothebrew": "\u05C4",
"upsilon": "\u03C5",
"upsilondieresis": "\u03CB",
"upsilondieresistonos": "\u03B0",
"upsilonlatin": "\u028A",
"upsilontonos": "\u03CD",
"uptackbelowcmb": "\u031D",
"uptackmod": "\u02D4",
"uragurmukhi": "\u0A73",
"uring": "\u016F",
"ushortcyrillic": "\u045E",
"usmallhiragana": "\u3045",
"usmallkatakana": "\u30A5",
"usmallkatakanahalfwidth": "\uFF69",
"ustraightcyrillic": "\u04AF",
"ustraightstrokecyrillic": "\u04B1",
"utilde": "\u0169",
"utildeacute": "\u1E79",
"utildebelow": "\u1E75",
"uubengali": "\u098A",
"uudeva": "\u090A",
"uugujarati": "\u0A8A",
"uugurmukhi": "\u0A0A",
"uumatragurmukhi": "\u0A42",
"uuvowelsignbengali": "\u09C2",
"uuvowelsigndeva": "\u0942",
"uuvowelsigngujarati": "\u0AC2",
"uvowelsignbengali": "\u09C1",
"uvowelsigndeva": "\u0941",
"uvowelsigngujarati": "\u0AC1",
"v": "\u0076",
"vadeva": "\u0935",
"vagujarati": "\u0AB5",
"vagurmukhi": "\u0A35",
"vakatakana": "\u30F7",
"vav": "\u05D5",
"vavdagesh": "\uFB35",
"vavdagesh65": "\uFB35",
"vavdageshhebrew": "\uFB35",
"vavhebrew": "\u05D5",
"vavholam": "\uFB4B",
"vavholamhebrew": "\uFB4B",
"vavvavhebrew": "\u05F0",
"vavyodhebrew": "\u05F1",
"vcircle": "\u24E5",
"vdotbelow": "\u1E7F",
"vecyrillic": "\u0432",
"veharabic": "\u06A4",
"vehfinalarabic": "\uFB6B",
"vehinitialarabic": "\uFB6C",
"vehmedialarabic": "\uFB6D",
"vekatakana": "\u30F9",
"venus": "\u2640",
"verticalbar": "\u007C",
"verticallineabovecmb": "\u030D",
"verticallinebelowcmb": "\u0329",
"verticallinelowmod": "\u02CC",
"verticallinemod": "\u02C8",
"vewarmenian": "\u057E",
"vhook": "\u028B",
"vikatakana": "\u30F8",
"viramabengali": "\u09CD",
"viramadeva": "\u094D",
"viramagujarati": "\u0ACD",
"visargabengali": "\u0983",
"visargadeva": "\u0903",
"visargagujarati": "\u0A83",
"vmonospace": "\uFF56",
"voarmenian": "\u0578",
"voicediterationhiragana": "\u309E",
"voicediterationkatakana": "\u30FE",
"voicedmarkkana": "\u309B",
"voicedmarkkanahalfwidth": "\uFF9E",
"vokatakana": "\u30FA",
"vparen": "\u24B1",
"vtilde": "\u1E7D",
"vturned": "\u028C",
"vuhiragana": "\u3094",
"vukatakana": "\u30F4",
"w": "\u0077",
"wacute": "\u1E83",
"waekorean": "\u3159",
"wahiragana": "\u308F",
"wakatakana": "\u30EF",
"wakatakanahalfwidth": "\uFF9C",
"wakorean": "\u3158",
"wasmallhiragana": "\u308E",
"wasmallkatakana": "\u30EE",
"wattosquare": "\u3357",
"wavedash": "\u301C",
"wavyunderscorevertical": "\uFE34",
"wawarabic": "\u0648",
"wawfinalarabic": "\uFEEE",
"wawhamzaabovearabic": "\u0624",
"wawhamzaabovefinalarabic": "\uFE86",
"wbsquare": "\u33DD",
"wcircle": "\u24E6",
"wcircumflex": "\u0175",
"wdieresis": "\u1E85",
"wdotaccent": "\u1E87",
"wdotbelow": "\u1E89",
"wehiragana": "\u3091",
"weierstrass": "\u2118",
"wekatakana": "\u30F1",
"wekorean": "\u315E",
"weokorean": "\u315D",
"wgrave": "\u1E81",
"whitebullet": "\u25E6",
"whitecircle": "\u25CB",
"whitecircleinverse": "\u25D9",
"whitecornerbracketleft": "\u300E",
"whitecornerbracketleftvertical": "\uFE43",
"whitecornerbracketright": "\u300F",
"whitecornerbracketrightvertical": "\uFE44",
"whitediamond": "\u25C7",
"whitediamondcontainingblacksmalldiamond": "\u25C8",
"whitedownpointingsmalltriangle": "\u25BF",
"whitedownpointingtriangle": "\u25BD",
"whiteleftpointingsmalltriangle": "\u25C3",
"whiteleftpointingtriangle": "\u25C1",
"whitelenticularbracketleft": "\u3016",
"whitelenticularbracketright": "\u3017",
"whiterightpointingsmalltriangle": "\u25B9",
"whiterightpointingtriangle": "\u25B7",
"whitesmallsquare": "\u25AB",
"whitesmilingface": "\u263A",
"whitesquare": "\u25A1",
"whitestar": "\u2606",
"whitetelephone": "\u260F",
"whitetortoiseshellbracketleft": "\u3018",
"whitetortoiseshellbracketright": "\u3019",
"whiteuppointingsmalltriangle": "\u25B5",
"whiteuppointingtriangle": "\u25B3",
"wihiragana": "\u3090",
"wikatakana": "\u30F0",
"wikorean": "\u315F",
"wmonospace": "\uFF57",
"wohiragana": "\u3092",
"wokatakana": "\u30F2",
"wokatakanahalfwidth": "\uFF66",
"won": "\u20A9",
"wonmonospace": "\uFFE6",
"wowaenthai": "\u0E27",
"wparen": "\u24B2",
"wring": "\u1E98",
"wsuperior": "\u02B7",
"wturned": "\u028D",
"wynn": "\u01BF",
"x": "\u0078",
"xabovecmb": "\u033D",
"xbopomofo": "\u3112",
"xcircle": "\u24E7",
"xdieresis": "\u1E8D",
"xdotaccent": "\u1E8B",
"xeharmenian": "\u056D",
"xi": "\u03BE",
"xmonospace": "\uFF58",
"xparen": "\u24B3",
"xsuperior": "\u02E3",
"y": "\u0079",
"yaadosquare": "\u334E",
"yabengali": "\u09AF",
"yacute": "\u00FD",
"yadeva": "\u092F",
"yaekorean": "\u3152",
"yagujarati": "\u0AAF",
"yagurmukhi": "\u0A2F",
"yahiragana": "\u3084",
"yakatakana": "\u30E4",
"yakatakanahalfwidth": "\uFF94",
"yakorean": "\u3151",
"yamakkanthai": "\u0E4E",
"yasmallhiragana": "\u3083",
"yasmallkatakana": "\u30E3",
"yasmallkatakanahalfwidth": "\uFF6C",
"yatcyrillic": "\u0463",
"ycircle": "\u24E8",
"ycircumflex": "\u0177",
"ydieresis": "\u00FF",
"ydotaccent": "\u1E8F",
"ydotbelow": "\u1EF5",
"yeharabic": "\u064A",
"yehbarreearabic": "\u06D2",
"yehbarreefinalarabic": "\uFBAF",
"yehfinalarabic": "\uFEF2",
"yehhamzaabovearabic": "\u0626",
"yehhamzaabovefinalarabic": "\uFE8A",
"yehhamzaaboveinitialarabic": "\uFE8B",
"yehhamzaabovemedialarabic": "\uFE8C",
"yehinitialarabic": "\uFEF3",
"yehmedialarabic": "\uFEF4",
"yehmeeminitialarabic": "\uFCDD",
"yehmeemisolatedarabic": "\uFC58",
"yehnoonfinalarabic": "\uFC94",
"yehthreedotsbelowarabic": "\u06D1",
"yekorean": "\u3156",
"yen": "\u00A5",
"yenmonospace": "\uFFE5",
"yeokorean": "\u3155",
"yeorinhieuhkorean": "\u3186",
"yerahbenyomohebrew": "\u05AA",
"yerahbenyomolefthebrew": "\u05AA",
"yericyrillic": "\u044B",
"yerudieresiscyrillic": "\u04F9",
"yesieungkorean": "\u3181",
"yesieungpansioskorean": "\u3183",
"yesieungsioskorean": "\u3182",
"yetivhebrew": "\u059A",
"ygrave": "\u1EF3",
"yhook": "\u01B4",
"yhookabove": "\u1EF7",
"yiarmenian": "\u0575",
"yicyrillic": "\u0457",
"yikorean": "\u3162",
"yinyang": "\u262F",
"yiwnarmenian": "\u0582",
"ymonospace": "\uFF59",
"yod": "\u05D9",
"yoddagesh": "\uFB39",
"yoddageshhebrew": "\uFB39",
"yodhebrew": "\u05D9",
"yodyodhebrew": "\u05F2",
"yodyodpatahhebrew": "\uFB1F",
"yohiragana": "\u3088",
"yoikorean": "\u3189",
"yokatakana": "\u30E8",
"yokatakanahalfwidth": "\uFF96",
"yokorean": "\u315B",
"yosmallhiragana": "\u3087",
"yosmallkatakana": "\u30E7",
"yosmallkatakanahalfwidth": "\uFF6E",
"yotgreek": "\u03F3",
"yoyaekorean": "\u3188",
"yoyakorean": "\u3187",
"yoyakthai": "\u0E22",
"yoyingthai": "\u0E0D",
"yparen": "\u24B4",
"ypogegrammeni": "\u037A",
"ypogegrammenigreekcmb": "\u0345",
"yr": "\u01A6",
"yring": "\u1E99",
"ysuperior": "\u02B8",
"ytilde": "\u1EF9",
"yturned": "\u028E",
"yuhiragana": "\u3086",
"yuikorean": "\u318C",
"yukatakana": "\u30E6",
"yukatakanahalfwidth": "\uFF95",
"yukorean": "\u3160",
"yusbigcyrillic": "\u046B",
"yusbigiotifiedcyrillic": "\u046D",
"yuslittlecyrillic": "\u0467",
"yuslittleiotifiedcyrillic": "\u0469",
"yusmallhiragana": "\u3085",
"yusmallkatakana": "\u30E5",
"yusmallkatakanahalfwidth": "\uFF6D",
"yuyekorean": "\u318B",
"yuyeokorean": "\u318A",
"yyabengali": "\u09DF",
"yyadeva": "\u095F",
"z": "\u007A",
"zaarmenian": "\u0566",
"zacute": "\u017A",
"zadeva": "\u095B",
"zagurmukhi": "\u0A5B",
"zaharabic": "\u0638",
"zahfinalarabic": "\uFEC6",
"zahinitialarabic": "\uFEC7",
"zahiragana": "\u3056",
"zahmedialarabic": "\uFEC8",
"zainarabic": "\u0632",
"zainfinalarabic": "\uFEB0",
"zakatakana": "\u30B6",
"zaqefgadolhebrew": "\u0595",
"zaqefqatanhebrew": "\u0594",
"zarqahebrew": "\u0598",
"zayin": "\u05D6",
"zayindagesh": "\uFB36",
"zayindageshhebrew": "\uFB36",
"zayinhebrew": "\u05D6",
"zbopomofo": "\u3117",
"zcaron": "\u017E",
"zcircle": "\u24E9",
"zcircumflex": "\u1E91",
"zcurl": "\u0291",
"zdot": "\u017C",
"zdotaccent": "\u017C",
"zdotbelow": "\u1E93",
"zecyrillic": "\u0437",
"zedescendercyrillic": "\u0499",
"zedieresiscyrillic": "\u04DF",
"zehiragana": "\u305C",
"zekatakana": "\u30BC",
"zero": "\u0030",
"zeroarabic": "\u0660",
"zerobengali": "\u09E6",
"zerodeva": "\u0966",
"zerogujarati": "\u0AE6",
"zerogurmukhi": "\u0A66",
"zerohackarabic": "\u0660",
"zeroinferior": "\u2080",
"zeromonospace": "\uFF10",
"zerooldstyle": "\uF730",
"zeropersian": "\u06F0",
"zerosuperior": "\u2070",
"zerothai": "\u0E50",
"zerowidthjoiner": "\uFEFF",
"zerowidthnonjoiner": "\u200C",
"zerowidthspace": "\u200B",
"zeta": "\u03B6",
"zhbopomofo": "\u3113",
"zhearmenian": "\u056A",
"zhebrevecyrillic": "\u04C2",
"zhecyrillic": "\u0436",
"zhedescendercyrillic": "\u0497",
"zhedieresiscyrillic": "\u04DD",
"zihiragana": "\u3058",
"zikatakana": "\u30B8",
"zinorhebrew": "\u05AE",
"zlinebelow": "\u1E95",
"zmonospace": "\uFF5A",
"zohiragana": "\u305E",
"zokatakana": "\u30BE",
"zparen": "\u24B5",
"zretroflexhook": "\u0290",
"zstroke": "\u01B6",
"zuhiragana": "\u305A",
"zukatakana": "\u30BA",
}
# --end
|
PypiClean
|
/ansys_dpf_core-0.9.0.tar.gz/ansys_dpf_core-0.9.0/src/ansys/dpf/core/operators/result/elastic_strain_principal_3.py
|
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class elastic_strain_principal_3(Operator):
"""Read/compute element nodal component elastic strains 3rd principal
component by calling the readers defined by the datasources and
computing its eigen values.
Parameters
----------
time_scoping : Scoping or int or float or Field, optional
Time/freq values (use doubles or field),
time/freq set ids (use ints or
scoping) or time/freq step ids (use
scoping with timefreq_steps location)
required in output.to specify
time/freq values at specific load
steps, put a field (and not a list)
in input with a scoping located on
"timefreq_steps".linear time freq
intrapolation is performed if the
values are not in the result files
and the data at the max time or freq
is taken when time/freqs are higher
than available time/freqs in result
files.
mesh_scoping : ScopingsContainer or Scoping, optional
Nodes or elements scoping required in output.
the output fields will be scoped on
these node or element ids. to figure
out the ordering of the fields data,
look at their scoping ids as they
might not be ordered as the input
scoping was. the scoping's location
indicates whether nodes or elements
are asked for. using scopings
container allows you to split the
result fields container into domains
fields_container : FieldsContainer, optional
Fieldscontainer already allocated modified
inplace
streams_container : StreamsContainer, optional
Result file container allowed to be kept open
to cache data
data_sources : DataSources
Result file path container, used if no
streams are set
bool_rotate_to_global : bool, optional
If true the field is rotated to global
coordinate system (default true)
mesh : MeshedRegion or MeshesContainer, optional
Prevents from reading the mesh in the result
files
requested_location : str, optional
read_cyclic : int, optional
If 0 cyclic symmetry is ignored, if 1 cyclic
sector is read, if 2 cyclic expansion
is done, if 3 cyclic expansion is
done and stages are merged (default
is 1)
read_beams : bool, optional
Elemental nodal beam results are read if this
pin is set to true (default is false)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> # Make input connections
>>> my_time_scoping = dpf.Scoping()
>>> op.inputs.time_scoping.connect(my_time_scoping)
>>> my_mesh_scoping = dpf.ScopingsContainer()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> my_streams_container = dpf.StreamsContainer()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> my_data_sources = dpf.DataSources()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> my_bool_rotate_to_global = bool()
>>> op.inputs.bool_rotate_to_global.connect(my_bool_rotate_to_global)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_requested_location = str()
>>> op.inputs.requested_location.connect(my_requested_location)
>>> my_read_cyclic = int()
>>> op.inputs.read_cyclic.connect(my_read_cyclic)
>>> my_read_beams = bool()
>>> op.inputs.read_beams.connect(my_read_beams)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.result.elastic_strain_principal_3(
... time_scoping=my_time_scoping,
... mesh_scoping=my_mesh_scoping,
... fields_container=my_fields_container,
... streams_container=my_streams_container,
... data_sources=my_data_sources,
... bool_rotate_to_global=my_bool_rotate_to_global,
... mesh=my_mesh,
... requested_location=my_requested_location,
... read_cyclic=my_read_cyclic,
... read_beams=my_read_beams,
... )
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(
self,
time_scoping=None,
mesh_scoping=None,
fields_container=None,
streams_container=None,
data_sources=None,
bool_rotate_to_global=None,
mesh=None,
requested_location=None,
read_cyclic=None,
read_beams=None,
config=None,
server=None,
):
super().__init__(name="EPEL3", config=config, server=server)
self._inputs = InputsElasticStrainPrincipal3(self)
self._outputs = OutputsElasticStrainPrincipal3(self)
if time_scoping is not None:
self.inputs.time_scoping.connect(time_scoping)
if mesh_scoping is not None:
self.inputs.mesh_scoping.connect(mesh_scoping)
if fields_container is not None:
self.inputs.fields_container.connect(fields_container)
if streams_container is not None:
self.inputs.streams_container.connect(streams_container)
if data_sources is not None:
self.inputs.data_sources.connect(data_sources)
if bool_rotate_to_global is not None:
self.inputs.bool_rotate_to_global.connect(bool_rotate_to_global)
if mesh is not None:
self.inputs.mesh.connect(mesh)
if requested_location is not None:
self.inputs.requested_location.connect(requested_location)
if read_cyclic is not None:
self.inputs.read_cyclic.connect(read_cyclic)
if read_beams is not None:
self.inputs.read_beams.connect(read_beams)
@staticmethod
def _spec():
description = """Read/compute element nodal component elastic strains 3rd principal
component by calling the readers defined by the
datasources and computing its eigen values."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="time_scoping",
type_names=[
"scoping",
"int32",
"vector<int32>",
"double",
"field",
"vector<double>",
],
optional=True,
document="""Time/freq values (use doubles or field),
time/freq set ids (use ints or
scoping) or time/freq step ids (use
scoping with timefreq_steps location)
required in output.to specify
time/freq values at specific load
steps, put a field (and not a list)
in input with a scoping located on
"timefreq_steps".linear time freq
intrapolation is performed if the
values are not in the result files
and the data at the max time or freq
is taken when time/freqs are higher
than available time/freqs in result
files.""",
),
1: PinSpecification(
name="mesh_scoping",
type_names=["scopings_container", "scoping"],
optional=True,
document="""Nodes or elements scoping required in output.
the output fields will be scoped on
these node or element ids. to figure
out the ordering of the fields data,
look at their scoping ids as they
might not be ordered as the input
scoping was. the scoping's location
indicates whether nodes or elements
are asked for. using scopings
container allows you to split the
result fields container into domains""",
),
2: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=True,
document="""Fieldscontainer already allocated modified
inplace""",
),
3: PinSpecification(
name="streams_container",
type_names=["streams_container"],
optional=True,
document="""Result file container allowed to be kept open
to cache data""",
),
4: PinSpecification(
name="data_sources",
type_names=["data_sources"],
optional=False,
document="""Result file path container, used if no
streams are set""",
),
5: PinSpecification(
name="bool_rotate_to_global",
type_names=["bool"],
optional=True,
document="""If true the field is rotated to global
coordinate system (default true)""",
),
7: PinSpecification(
name="mesh",
type_names=["abstract_meshed_region", "meshes_container"],
optional=True,
document="""Prevents from reading the mesh in the result
files""",
),
9: PinSpecification(
name="requested_location",
type_names=["string"],
optional=True,
document="""""",
),
14: PinSpecification(
name="read_cyclic",
type_names=["enum dataProcessing::ECyclicReading", "int32"],
optional=True,
document="""If 0 cyclic symmetry is ignored, if 1 cyclic
sector is read, if 2 cyclic expansion
is done, if 3 cyclic expansion is
done and stages are merged (default
is 1)""",
),
22: PinSpecification(
name="read_beams",
type_names=["bool"],
optional=True,
document="""Elemental nodal beam results are read if this
pin is set to true (default is false)""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="EPEL3", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsElasticStrainPrincipal3
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsElasticStrainPrincipal3
"""
return super().outputs
class InputsElasticStrainPrincipal3(_Inputs):
"""Intermediate class used to connect user inputs to
elastic_strain_principal_3 operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> my_time_scoping = dpf.Scoping()
>>> op.inputs.time_scoping.connect(my_time_scoping)
>>> my_mesh_scoping = dpf.ScopingsContainer()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> my_streams_container = dpf.StreamsContainer()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> my_data_sources = dpf.DataSources()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> my_bool_rotate_to_global = bool()
>>> op.inputs.bool_rotate_to_global.connect(my_bool_rotate_to_global)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_requested_location = str()
>>> op.inputs.requested_location.connect(my_requested_location)
>>> my_read_cyclic = int()
>>> op.inputs.read_cyclic.connect(my_read_cyclic)
>>> my_read_beams = bool()
>>> op.inputs.read_beams.connect(my_read_beams)
"""
def __init__(self, op: Operator):
super().__init__(elastic_strain_principal_3._spec().inputs, op)
self._time_scoping = Input(
elastic_strain_principal_3._spec().input_pin(0), 0, op, -1
)
self._inputs.append(self._time_scoping)
self._mesh_scoping = Input(
elastic_strain_principal_3._spec().input_pin(1), 1, op, -1
)
self._inputs.append(self._mesh_scoping)
self._fields_container = Input(
elastic_strain_principal_3._spec().input_pin(2), 2, op, -1
)
self._inputs.append(self._fields_container)
self._streams_container = Input(
elastic_strain_principal_3._spec().input_pin(3), 3, op, -1
)
self._inputs.append(self._streams_container)
self._data_sources = Input(
elastic_strain_principal_3._spec().input_pin(4), 4, op, -1
)
self._inputs.append(self._data_sources)
self._bool_rotate_to_global = Input(
elastic_strain_principal_3._spec().input_pin(5), 5, op, -1
)
self._inputs.append(self._bool_rotate_to_global)
self._mesh = Input(elastic_strain_principal_3._spec().input_pin(7), 7, op, -1)
self._inputs.append(self._mesh)
self._requested_location = Input(
elastic_strain_principal_3._spec().input_pin(9), 9, op, -1
)
self._inputs.append(self._requested_location)
self._read_cyclic = Input(
elastic_strain_principal_3._spec().input_pin(14), 14, op, -1
)
self._inputs.append(self._read_cyclic)
self._read_beams = Input(
elastic_strain_principal_3._spec().input_pin(22), 22, op, -1
)
self._inputs.append(self._read_beams)
@property
def time_scoping(self):
"""Allows to connect time_scoping input to the operator.
Time/freq values (use doubles or field),
time/freq set ids (use ints or
scoping) or time/freq step ids (use
scoping with timefreq_steps location)
required in output.to specify
time/freq values at specific load
steps, put a field (and not a list)
in input with a scoping located on
"timefreq_steps".linear time freq
intrapolation is performed if the
values are not in the result files
and the data at the max time or freq
is taken when time/freqs are higher
than available time/freqs in result
files.
Parameters
----------
my_time_scoping : Scoping or int or float or Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.time_scoping.connect(my_time_scoping)
>>> # or
>>> op.inputs.time_scoping(my_time_scoping)
"""
return self._time_scoping
@property
def mesh_scoping(self):
"""Allows to connect mesh_scoping input to the operator.
Nodes or elements scoping required in output.
the output fields will be scoped on
these node or element ids. to figure
out the ordering of the fields data,
look at their scoping ids as they
might not be ordered as the input
scoping was. the scoping's location
indicates whether nodes or elements
are asked for. using scopings
container allows you to split the
result fields container into domains
Parameters
----------
my_mesh_scoping : ScopingsContainer or Scoping
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> # or
>>> op.inputs.mesh_scoping(my_mesh_scoping)
"""
return self._mesh_scoping
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator.
Fieldscontainer already allocated modified
inplace
Parameters
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
@property
def streams_container(self):
"""Allows to connect streams_container input to the operator.
Result file container allowed to be kept open
to cache data
Parameters
----------
my_streams_container : StreamsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> # or
>>> op.inputs.streams_container(my_streams_container)
"""
return self._streams_container
@property
def data_sources(self):
"""Allows to connect data_sources input to the operator.
Result file path container, used if no
streams are set
Parameters
----------
my_data_sources : DataSources
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> # or
>>> op.inputs.data_sources(my_data_sources)
"""
return self._data_sources
@property
def bool_rotate_to_global(self):
"""Allows to connect bool_rotate_to_global input to the operator.
If true the field is rotated to global
coordinate system (default true)
Parameters
----------
my_bool_rotate_to_global : bool
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.bool_rotate_to_global.connect(my_bool_rotate_to_global)
>>> # or
>>> op.inputs.bool_rotate_to_global(my_bool_rotate_to_global)
"""
return self._bool_rotate_to_global
@property
def mesh(self):
"""Allows to connect mesh input to the operator.
Prevents from reading the mesh in the result
files
Parameters
----------
my_mesh : MeshedRegion or MeshesContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.mesh.connect(my_mesh)
>>> # or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh
@property
def requested_location(self):
"""Allows to connect requested_location input to the operator.
Parameters
----------
my_requested_location : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.requested_location.connect(my_requested_location)
>>> # or
>>> op.inputs.requested_location(my_requested_location)
"""
return self._requested_location
@property
def read_cyclic(self):
"""Allows to connect read_cyclic input to the operator.
If 0 cyclic symmetry is ignored, if 1 cyclic
sector is read, if 2 cyclic expansion
is done, if 3 cyclic expansion is
done and stages are merged (default
is 1)
Parameters
----------
my_read_cyclic : int
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.read_cyclic.connect(my_read_cyclic)
>>> # or
>>> op.inputs.read_cyclic(my_read_cyclic)
"""
return self._read_cyclic
@property
def read_beams(self):
"""Allows to connect read_beams input to the operator.
Elemental nodal beam results are read if this
pin is set to true (default is false)
Parameters
----------
my_read_beams : bool
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> op.inputs.read_beams.connect(my_read_beams)
>>> # or
>>> op.inputs.read_beams(my_read_beams)
"""
return self._read_beams
class OutputsElasticStrainPrincipal3(_Outputs):
"""Intermediate class used to get outputs from
elastic_strain_principal_3 operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(elastic_strain_principal_3._spec().outputs, op)
self._fields_container = Output(
elastic_strain_principal_3._spec().output_pin(0), 0, op
)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.elastic_strain_principal_3()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
""" # noqa: E501
return self._fields_container
|
PypiClean
|
/instabot-bot-0.1.0.tar.gz/instabot-bot-0.1.0/instabot/bot/bot_filter.py
|
def filter_medias(self, media_items, filtration=True, quiet=False, is_comment=False):
if filtration:
if not quiet:
self.logger.info("Received {} medias.".format(len(media_items)))
if not is_comment:
media_items = _filter_medias_not_liked(media_items)
if self.max_likes_to_like:
media_items = _filter_medias_nlikes(
media_items, self.max_likes_to_like, self.min_likes_to_like
)
else:
media_items = _filter_medias_not_commented(self, media_items)
if not quiet:
msg = "After filtration {} medias left."
self.logger.info(msg.format(len(media_items)))
return _get_media_ids(media_items)
def _filter_medias_not_liked(media_items):
not_liked_medias = []
for media in media_items:
if "has_liked" in media and not media["has_liked"]:
not_liked_medias.append(media)
return not_liked_medias
def _filter_medias_not_commented(self, media_items):
not_commented_medias = []
for media in media_items:
if media.get("comment_count", 0) > 0 and media.get("comments"):
my_comments = [
comment
for comment in media["comments"]
if comment["user_id"] == self.user_id
]
if my_comments:
continue
not_commented_medias.append(media)
return not_commented_medias
def _filter_medias_nlikes(media_items, max_likes_to_like, min_likes_to_like):
filtered_medias = []
for media in media_items:
if "like_count" in media:
if (
media["like_count"] < max_likes_to_like
and media["like_count"] > min_likes_to_like
):
filtered_medias.append(media)
return filtered_medias
def _get_media_ids(media_items):
result = []
for media in media_items:
if "id" in media:
result.append(media["id"])
elif "pk" in media:
result.append(media["pk"])
return result
def check_media(self, media_id):
if self.api.media_info(media_id):
medias = self.api.last_json["items"]
if search_blacklist_hashtags_in_media(self, media_id):
msg = "Blacklist hashtag found in media, skipping!"
self.console_print(msg, "red")
return False
if self.filter_medias(medias, quiet=True):
return check_user(self, self.get_media_owner(media_id))
return False
msg = "Media ID error!"
self.console_print(msg, "red")
return False
# Filter users
def search_stop_words_in_user(self, user_info):
text = ""
if "biography" in user_info:
text += user_info["biography"].lower()
if "username" in user_info:
text += user_info["username"].lower()
if "full_name" in user_info:
text += user_info["full_name"].lower()
for stop_word in self.stop_words:
if stop_word in text:
return True
return False
def search_blacklist_hashtags_in_media(self, media_id):
media_info = self.get_media_info(media_id)
text = media_info[0]["caption"]["text"] if media_info[0]["caption"] else ""
media_comments = self.get_media_comments(media_id)
comments_number = min(6, len(media_comments))
for i in range(0, comments_number):
text += "".join(media_comments[i]["text"])
return any((h in text) for h in self.blacklist_hashtags)
def check_user(self, user_id, unfollowing=False): # noqa: C901
if not self.filter_users and not unfollowing:
return True
self.small_delay()
user_id = self.convert_to_user_id(user_id)
if not user_id:
self.console_print("not user_id, skipping!", "red")
return False
if user_id in self.whitelist:
self.console_print("`user_id` in `self.whitelist`.", "green")
return True
if user_id in self.blacklist:
self.console_print("`user_id` in `self.blacklist`.", "red")
return False
if user_id == str(self.user_id):
self.console_print(("`user_id` equals bot's `user_id`, skipping!"), "green")
return False
if user_id in self.following:
if not unfollowing:
# Log to Console
self.console_print("Already following, skipping!", "red")
return False
user_info = self.get_user_info(user_id)
if not user_info:
self.console_print("not `user_info`, skipping!", "red")
return False
msg = "USER_NAME: {username}, FOLLOWER: {followers}, FOLLOWING: {following}"
follower_count = user_info["follower_count"]
following_count = user_info["following_count"]
self.console_print(
msg.format(
username=user_info["username"],
followers=follower_count,
following=following_count,
)
)
skipped = self.skipped_file
followed = self.followed_file
if not unfollowing:
if self.filter_previously_followed and user_id in followed.list:
self.console_print(("info: account previously followed, skipping!"), "red")
return False
if (
"has_anonymous_profile_picture" in user_info
and self.filter_users_without_profile_photo
):
if user_info["has_anonymous_profile_picture"]:
self.console_print(
("info: account DOES NOT HAVE " "A PROFILE PHOTO, skipping! "), "red"
)
skipped.append(user_id)
return False
if "is_private" in user_info and self.filter_private_users:
if user_info["is_private"]:
self.console_print("info: account is PRIVATE, skipping! ", "red")
skipped.append(user_id)
return False
if "is_business" in user_info and self.filter_business_accounts:
if user_info["is_business"]:
self.console_print("info: is BUSINESS, skipping!", "red")
skipped.append(user_id)
return False
if "is_verified" in user_info and self.filter_verified_accounts:
if user_info["is_verified"]:
self.console_print("info: is VERIFIED, skipping !", "red")
skipped.append(user_id)
return False
if follower_count < self.min_followers_to_follow:
msg = "follower_count < bot.min_followers_to_follow, skipping!"
self.console_print(msg, "red")
skipped.append(user_id)
return False
if follower_count > self.max_followers_to_follow:
msg = "follower_count > bot.max_followers_to_follow, skipping!"
self.console_print(msg, "red")
skipped.append(user_id)
return False
if user_info["following_count"] < self.min_following_to_follow:
msg = "following_count < bot.min_following_to_follow, skipping!"
self.console_print(msg, "red")
skipped.append(user_id)
return False
if user_info["following_count"] > self.max_following_to_follow:
msg = "following_count > bot.max_following_to_follow, skipping!"
self.console_print(msg, "red")
skipped.append(user_id)
return False
try:
if (
(following_count > 0)
and follower_count / following_count > self.max_followers_to_following_ratio
):
msg = (
"follower_count / following_count > "
"bot.max_followers_to_following_ratio, skipping!"
)
self.console_print(msg, "red")
skipped.append(user_id)
return False
if (
(follower_count > 0)
and following_count / follower_count > self.max_following_to_followers_ratio
):
msg = (
"following_count / follower_count > "
"bot.max_following_to_followers_ratio, skipping!"
)
self.console_print(msg, "red")
skipped.append(user_id)
return False
except ZeroDivisionError:
self.console_print("ZeroDivisionError: division by zero", "red")
return False
if (
"media_count" in user_info
and user_info["media_count"] < self.min_media_count_to_follow
):
msg = (
"media_count < bot.min_media_count_to_follow, " "BOT or INACTIVE, skipping!"
)
self.console_print(msg, "red")
skipped.append(user_id)
return False
if search_stop_words_in_user(self, user_info):
msg = "`bot.search_stop_words_in_user` found in user, skipping!"
self.console_print(msg, "red")
skipped.append(user_id)
return False
return True
def check_not_bot(self, user_id):
""" Filter bot from real users. """
self.small_delay()
user_id = self.convert_to_user_id(user_id)
if not user_id:
return False
if user_id in self.whitelist:
return True
if user_id in self.blacklist:
return False
user_info = self.get_user_info(user_id)
if not user_info:
return True # closed acc
skipped = self.skipped_file
if (
"following_count" in user_info
and user_info["following_count"] > self.max_following_to_block
):
msg = "following_count > bot.max_following_to_block, skipping!"
self.console_print(msg, "red")
skipped.append(user_id)
return False # massfollower
if search_stop_words_in_user(self, user_info):
msg = "`bot.search_stop_words_in_user` found in user, skipping!"
skipped.append(user_id)
return False
return True
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flashblade/FB_2_0/models/certificate_patch.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_0 import models
class CertificatePatch(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'id': 'str',
'certificate': 'str',
'intermediate_certificate': 'str',
'passphrase': 'str',
'private_key': 'str'
}
attribute_map = {
'name': 'name',
'id': 'id',
'certificate': 'certificate',
'intermediate_certificate': 'intermediate_certificate',
'passphrase': 'passphrase',
'private_key': 'private_key'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
id=None, # type: str
certificate=None, # type: str
intermediate_certificate=None, # type: str
passphrase=None, # type: str
private_key=None, # type: str
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
id (str): A non-modifiable, globally unique ID chosen by the system.
certificate (str): The text of the certificate.
intermediate_certificate (str): Intermediate certificate chains.
passphrase (str): The passphrase used to encrypt `private_key`.
private_key (str): The private key used to sign the certificate.
"""
if name is not None:
self.name = name
if id is not None:
self.id = id
if certificate is not None:
self.certificate = certificate
if intermediate_certificate is not None:
self.intermediate_certificate = intermediate_certificate
if passphrase is not None:
self.passphrase = passphrase
if private_key is not None:
self.private_key = private_key
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `CertificatePatch`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CertificatePatch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificatePatch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/ticktick_py-2.0.3-py3-none-any.whl/ticktick/oauth2.py
|
import requests
import webbrowser
import time
import logging
import ast
import os
from urllib.parse import urlparse, urlencode, parse_qsl
from ticktick.cache import CacheHandler
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
log = logging.getLogger(__name__)
def requests_retry_session(retries=3,
backoff_factor=1,
status_forcelist=(405, 500, 502, 504),
session=None,
allowed_methods=frozenset(['GET', 'POST', 'PUT', 'DELETE'])):
"""
Method for http retries
"""
session = session or requests.session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
allowed_methods=allowed_methods
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class OAuth2:
"""
Implements the Authorization flow for TickTick's Open API
"""
OAUTH_AUTHORIZE_URL = "https://ticktick.com/oauth/authorize"
OBTAIN_TOKEN_URL = "https://ticktick.com/oauth/token"
def __init__(self,
client_id: str,
client_secret: str,
redirect_uri: str,
scope: str = "tasks:write tasks:read", # only available options right now
state: str = None,
session=None,
env_key: str = None,
cache_path: str = '.token-oauth',
check_cache: bool = True
):
"""
Initialize the object.
Arguments:
client_id: Client ID string
client_secret: Client secret string
redirect_uri: Redirect uri
scope: Scope for the permissions. Current options are only the default.
state (str): State parameter
session (requests session): Requests session
env_key: The environment variable name where the access token dictionary is stored as a string literal.
cache_path: The desired path of the file where the access token information will be stored.
check_cache: Whether to check the cache file for the access token information
!!! examples
=== "Standard Method"
This way would instantiate the steps to get a new access token, or just retrieve the cached one.
```python
oauth = OAuth2(client_id=cliend_id,
client_secret=client_secret,
redirect_uri=redirect_uri)
```
=== "Check Environment Method"
If you are in a situation where you don't want to keep the cached token file, you can save the
access token dictionary as a string literal in your environment, and pass the name of the variable to
prevent having to request a new access token.
``` python
auth_client = OAuth2(client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
env_key='ACCESS_TOKEN_DICT')
```
Where in the environment you have declared `ACCESS_TOKEN_DICT` to be
the string literal of the token dictionary:
```
'{'access_token': '628ff081-5331-4a37-8ddk-021974c9f43g',
'token_type': 'bearer', 'expires_in': 14772375,
'scope': 'tasks:read tasks:write',
'expire_time': 1637192935,
'readable_expire_time':
'Wed Nov 17 15:48:55 2021'}'
```
"""
# If a proper session is passed then we will just use the existing session
self.session = session or requests_retry_session()
# Set the client_id
self._client_id = client_id
# Set the client_secret
self._client_secret = client_secret
# Set the redirect_uri
self._redirect_uri = redirect_uri
# Set the scope
self._scope = scope
# Set the state
self._state = state
# Initialize code parameter
self._code = None
# Set the cache handler
self.cache = CacheHandler(cache_path)
# Set the access token
self.access_token_info = None
# get access token
self.get_access_token(check_cache=check_cache, check_env=env_key)
def _get_auth_url(self):
"""
Returns the url for authentication
"""
payload = {
"client_id": self._client_id,
"scope": self._scope,
"response_type": "code",
"redirect_uri": self._redirect_uri,
"state": self._state,
}
parameters = urlencode(payload)
return "%s?%s" % (self.OAUTH_AUTHORIZE_URL, parameters)
def _open_auth_url_in_browser(self):
"""
Opens the authorization url in the browser
Docs link: https://developer.ticktick.com/api#/openapi?id=first-step
"""
log.info("Providing authentication requires interacting with the web browser. Once you accept the "
"authorization you will be redirected to the redirect url that you provided with extra parameters "
"provided in the url. Paste the url that you "
"were redirected to into the console")
url = self._get_auth_url()
webbrowser.open(url)
def _get_redirected_url(self):
"""
Prompts the user for the redirected url to parse the token and state
"""
prompt = "Enter the URL you were redirected to: "
url = self._get_user_input(prompt)
# get the parsed parameters from the url
self._code, self._state = self._get_auth_response_parameters(url)
@staticmethod
def _get_user_input(prompt: str = ''):
"""
Prompts the user for input from the console based on the prompt
"""
return input(prompt)
@staticmethod
def _get_auth_response_parameters(url):
"""
Gets the code and state members contained in the redirected url.
Docs link: https://developer.ticktick.com/api#/openapi?id=second-step
:param url:
:return:
"""
# isolates "code" and "state" parameters in a string
parsed = urlparse(url).query
# creates a dictionary containing the "code" and "state" parameters returned from the url
isolated = dict(parse_qsl(parsed))
# return the parameters
return isolated["code"], isolated["state"]
def _request_access_token(self):
"""
Makes the POST request to get the token and returns the token info dictionary
Docs link: https://developer.ticktick.com/api#/openapi?id=third-step
:return:
"""
# Get the manual authentication from the user, and prompt for the redirected url
self._open_auth_url_in_browser()
self._get_redirected_url()
# create the payload
payload = {
"client_id": self._client_id,
"client_secret": self._client_secret,
"code": self._code,
"grant_type": "authorization_code", # currently only option
"scope": self._scope,
"redirect_uri": self._redirect_uri
}
# make the request
token_info = self._post(self.OBTAIN_TOKEN_URL, params=payload)
token_info = self._set_expire_time(token_info)
self.cache.write_token_to_cache(token_info)
return token_info
def _post(self, url, **kwargs):
"""
Sends an http post request with the specified url and keyword arguments.
Arguments:
url (str): Url to send the request.
**kwargs: Arguments to send with the request.
Returns:
dict: The json parsed response if possible or just a string of the response text if not.
Raises:
RunTimeError: If the request could not be completed.
"""
response = self.session.post(url, **kwargs)
if response.status_code != 200:
raise RuntimeError("POST request could not be completed")
try:
return response.json()
except ValueError:
return response.text
def get_access_token(self, check_cache: bool = True, check_env: str = None):
"""
Retrieves the authorization token from cache or makes a new request for it.
!!! note
This method does not need to be called explicitly.
Arguments:
check_cache (bool): Boolean on whether to check if the access token is in a cache file.
check_env (str): The environment variable name where the token dictionary is saved as a string literal.
Priority order for getting the access token:
1) From an already set class member in the current running instance
2) From an environment variable where the token dictionary is in a string literal form,
and the name of the environment variable name is the value passed to the "check_env" parameter
3) From a cache file that contains the access token dictionary (normal case)
4) From a new token request (which will create a new cache file that contains the access
token dictionary) (initial case if never setup)
"""
# check the local state for if the access token exists
if self.access_token_info is not None:
token_info = self.validate_token(self.access_token_info)
if token_info is not None:
self.access_token_info = token_info
return token_info["access_token"]
# check if in the environment the access token is set
if check_env is not None:
# get the access token string
token_dict_string = os.getenv(check_env)
try:
converted_token_dict = ast.literal_eval(token_dict_string)
except:
raise ValueError("Access token in the environment must be a python dictionary contained"
" in a string literal")
token_info = self.validate_token(converted_token_dict)
if token_info is not None:
self.cache.write_token_to_cache(token_info)
self.access_token_info = token_info
return token_info["access_token"]
# check if the cache file exists with the token
if check_cache:
token_info = self.validate_token(self.cache.get_cached_token())
# validate token will always return a valid token
if token_info is not None:
self.access_token_info = token_info
return token_info["access_token"]
# access token is not stored anywhere, request a new token
token_info = self._request_access_token()
self.access_token_info = token_info
return token_info["access_token"]
@staticmethod
def _set_expire_time(token_dict):
"""
Adds two members to the access_token_info dictionary containing the expire time of the token
self._access_token_info["expire_time"]: The integer representation of the token expiration
self._access_token_info["readable_expire_time"]: The readable date in the form like 'Wed Nov 17 15:48:57 2021'
:return:
"""
token_dict["expire_time"] = int(time.time()) + token_dict["expires_in"]
token_dict["readable_expire_time"] = time.asctime(time.localtime(time.time() +
token_dict["expires_in"]))
return token_dict
@staticmethod
def is_token_expired(token_dict):
"""
Returns a boolean for if the access token is expired
Arguments:
token_dict (dict): Access token dictionary
Returns:
bool: Whether the access token is expired
"""
current_time = int(time.time())
return token_dict["expire_time"] - current_time < 60
def validate_token(self, token_dict):
"""
Validates whether the access token is valid
Arguments:
token_dict (dict): Access token dictionary
Returns:
None or dict: None if the token_dict is not valid, else token_dict
"""
# if the token info dictionary does not exist then bounce
if token_dict is None:
return None
# check if the token is expired
if self.is_token_expired(token_dict):
# make a new request for a valid token since there is currently no refresh token
new_token_dict = self._request_access_token()
return new_token_dict
return token_dict # original token_dict is valid
|
PypiClean
|
/gamification-engine-0.4.0.tar.gz/gamification-engine-0.4.0/gengine/app/jsscripts/node_modules/intl/locale-data/jsonp/en-GU.js
|
IntlPolyfill.__addLocaleData({locale:"en-GU",date:{ca:["gregory","buddhist","chinese","coptic","dangi","ethioaa","ethiopic","generic","hebrew","indian","islamic","islamicc","japanese","persian","roc"],hourNo0:true,hour12:true,formats:{short:"{1}, {0}",medium:"{1}, {0}",full:"{1} 'at' {0}",long:"{1} 'at' {0}",availableFormats:{"d":"d","E":"ccc",Ed:"d E",Ehm:"E h:mm a",EHm:"E HH:mm",Ehms:"E h:mm:ss a",EHms:"E HH:mm:ss",Gy:"y G",GyMMM:"MMM y G",GyMMMd:"MMM d, y G",GyMMMEd:"E, MMM d, y G","h":"h a","H":"HH",hm:"h:mm a",Hm:"HH:mm",hms:"h:mm:ss a",Hms:"HH:mm:ss",hmsv:"h:mm:ss a v",Hmsv:"HH:mm:ss v",hmv:"h:mm a v",Hmv:"HH:mm v","M":"L",Md:"M/d",MEd:"E, M/d",MMM:"LLL",MMMd:"MMM d",MMMEd:"E, MMM d",MMMMd:"MMMM d",ms:"mm:ss","y":"y",yM:"M/y",yMd:"M/d/y",yMEd:"E, M/d/y",yMMM:"MMM y",yMMMd:"MMM d, y",yMMMEd:"E, MMM d, y",yMMMM:"MMMM y",yQQQ:"QQQ y",yQQQQ:"QQQQ y"},dateFormats:{yMMMMEEEEd:"EEEE, MMMM d, y",yMMMMd:"MMMM d, y",yMMMd:"MMM d, y",yMd:"M/d/yy"},timeFormats:{hmmsszzzz:"h:mm:ss a zzzz",hmsz:"h:mm:ss a z",hms:"h:mm:ss a",hm:"h:mm a"}},calendars:{buddhist:{months:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],short:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],long:["January","February","March","April","May","June","July","August","September","October","November","December"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["BE"],short:["BE"],long:["BE"]},dayPeriods:{am:"AM",pm:"PM"}},chinese:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Mo1","Mo2","Mo3","Mo4","Mo5","Mo6","Mo7","Mo8","Mo9","Mo10","Mo11","Mo12"],long:["Month1","Month2","Month3","Month4","Month5","Month6","Month7","Month8","Month9","Month10","Month11","Month12"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},dayPeriods:{am:"AM",pm:"PM"}},coptic:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12","13"],short:["Tout","Baba","Hator","Kiahk","Toba","Amshir","Baramhat","Baramouda","Bashans","Paona","Epep","Mesra","Nasie"],long:["Tout","Baba","Hator","Kiahk","Toba","Amshir","Baramhat","Baramouda","Bashans","Paona","Epep","Mesra","Nasie"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["ERA0","ERA1"],short:["ERA0","ERA1"],long:["ERA0","ERA1"]},dayPeriods:{am:"AM",pm:"PM"}},dangi:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Mo1","Mo2","Mo3","Mo4","Mo5","Mo6","Mo7","Mo8","Mo9","Mo10","Mo11","Mo12"],long:["Month1","Month2","Month3","Month4","Month5","Month6","Month7","Month8","Month9","Month10","Month11","Month12"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},dayPeriods:{am:"AM",pm:"PM"}},ethiopic:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12","13"],short:["Meskerem","Tekemt","Hedar","Tahsas","Ter","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehasse","Pagumen"],long:["Meskerem","Tekemt","Hedar","Tahsas","Ter","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehasse","Pagumen"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["ERA0","ERA1"],short:["ERA0","ERA1"],long:["ERA0","ERA1"]},dayPeriods:{am:"AM",pm:"PM"}},ethioaa:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12","13"],short:["Meskerem","Tekemt","Hedar","Tahsas","Ter","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehasse","Pagumen"],long:["Meskerem","Tekemt","Hedar","Tahsas","Ter","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehasse","Pagumen"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["ERA0"],short:["ERA0"],long:["ERA0"]},dayPeriods:{am:"AM",pm:"PM"}},generic:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["M01","M02","M03","M04","M05","M06","M07","M08","M09","M10","M11","M12"],long:["M01","M02","M03","M04","M05","M06","M07","M08","M09","M10","M11","M12"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["ERA0","ERA1"],short:["ERA0","ERA1"],long:["ERA0","ERA1"]},dayPeriods:{am:"AM",pm:"PM"}},gregory:{months:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],short:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],long:["January","February","March","April","May","June","July","August","September","October","November","December"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["B","A","BCE","CE"],short:["BC","AD","BCE","CE"],long:["Before Christ","Anno Domini","Before Common Era","Common Era"]},dayPeriods:{am:"AM",pm:"PM"}},hebrew:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12","13","7"],short:["Tishri","Heshvan","Kislev","Tevet","Shevat","Adar I","Adar","Nisan","Iyar","Sivan","Tamuz","Av","Elul","Adar II"],long:["Tishri","Heshvan","Kislev","Tevet","Shevat","Adar I","Adar","Nisan","Iyar","Sivan","Tamuz","Av","Elul","Adar II"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["AM"],short:["AM"],long:["AM"]},dayPeriods:{am:"AM",pm:"PM"}},indian:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Chaitra","Vaisakha","Jyaistha","Asadha","Sravana","Bhadra","Asvina","Kartika","Agrahayana","Pausa","Magha","Phalguna"],long:["Chaitra","Vaisakha","Jyaistha","Asadha","Sravana","Bhadra","Asvina","Kartika","Agrahayana","Pausa","Magha","Phalguna"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["Saka"],short:["Saka"],long:["Saka"]},dayPeriods:{am:"AM",pm:"PM"}},islamic:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Muh.","Saf.","Rab. I","Rab. II","Jum. I","Jum. II","Raj.","Sha.","Ram.","Shaw.","Dhuʻl-Q.","Dhuʻl-H."],long:["Muharram","Safar","Rabiʻ I","Rabiʻ II","Jumada I","Jumada II","Rajab","Shaʻban","Ramadan","Shawwal","Dhuʻl-Qiʻdah","Dhuʻl-Hijjah"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["AH"],short:["AH"],long:["AH"]},dayPeriods:{am:"AM",pm:"PM"}},islamicc:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Muh.","Saf.","Rab. I","Rab. II","Jum. I","Jum. II","Raj.","Sha.","Ram.","Shaw.","Dhuʻl-Q.","Dhuʻl-H."],long:["Muharram","Safar","Rabiʻ I","Rabiʻ II","Jumada I","Jumada II","Rajab","Shaʻban","Ramadan","Shawwal","Dhuʻl-Qiʻdah","Dhuʻl-Hijjah"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["AH"],short:["AH"],long:["AH"]},dayPeriods:{am:"AM",pm:"PM"}},japanese:{months:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],short:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],long:["January","February","March","April","May","June","July","August","September","October","November","December"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["Taika (645–650)","Hakuchi (650–671)","Hakuhō (672–686)","Shuchō (686–701)","Taihō (701–704)","Keiun (704–708)","Wadō (708–715)","Reiki (715–717)","Yōrō (717–724)","Jinki (724–729)","Tenpyō (729–749)","Tenpyō-kampō (749-749)","Tenpyō-shōhō (749-757)","Tenpyō-hōji (757-765)","Tenpyō-jingo (765-767)","Jingo-keiun (767-770)","Hōki (770–780)","Ten-ō (781-782)","Enryaku (782–806)","Daidō (806–810)","Kōnin (810–824)","Tenchō (824–834)","Jōwa (834–848)","Kajō (848–851)","Ninju (851–854)","Saikō (854–857)","Ten-an (857-859)","Jōgan (859–877)","Gangyō (877–885)","Ninna (885–889)","Kanpyō (889–898)","Shōtai (898–901)","Engi (901–923)","Enchō (923–931)","Jōhei (931–938)","Tengyō (938–947)","Tenryaku (947–957)","Tentoku (957–961)","Ōwa (961–964)","Kōhō (964–968)","Anna (968–970)","Tenroku (970–973)","Ten’en (973–976)","Jōgen (976–978)","Tengen (978–983)","Eikan (983–985)","Kanna (985–987)","Eien (987–989)","Eiso (989–990)","Shōryaku (990–995)","Chōtoku (995–999)","Chōhō (999–1004)","Kankō (1004–1012)","Chōwa (1012–1017)","Kannin (1017–1021)","Jian (1021–1024)","Manju (1024–1028)","Chōgen (1028–1037)","Chōryaku (1037–1040)","Chōkyū (1040–1044)","Kantoku (1044–1046)","Eishō (1046–1053)","Tengi (1053–1058)","Kōhei (1058–1065)","Jiryaku (1065–1069)","Enkyū (1069–1074)","Shōho (1074–1077)","Shōryaku (1077–1081)","Eihō (1081–1084)","Ōtoku (1084–1087)","Kanji (1087–1094)","Kahō (1094–1096)","Eichō (1096–1097)","Jōtoku (1097–1099)","Kōwa (1099–1104)","Chōji (1104–1106)","Kashō (1106–1108)","Tennin (1108–1110)","Ten-ei (1110-1113)","Eikyū (1113–1118)","Gen’ei (1118–1120)","Hōan (1120–1124)","Tenji (1124–1126)","Daiji (1126–1131)","Tenshō (1131–1132)","Chōshō (1132–1135)","Hōen (1135–1141)","Eiji (1141–1142)","Kōji (1142–1144)","Ten’yō (1144–1145)","Kyūan (1145–1151)","Ninpei (1151–1154)","Kyūju (1154–1156)","Hōgen (1156–1159)","Heiji (1159–1160)","Eiryaku (1160–1161)","Ōho (1161–1163)","Chōkan (1163–1165)","Eiman (1165–1166)","Nin’an (1166–1169)","Kaō (1169–1171)","Shōan (1171–1175)","Angen (1175–1177)","Jishō (1177–1181)","Yōwa (1181–1182)","Juei (1182–1184)","Genryaku (1184–1185)","Bunji (1185–1190)","Kenkyū (1190–1199)","Shōji (1199–1201)","Kennin (1201–1204)","Genkyū (1204–1206)","Ken’ei (1206–1207)","Jōgen (1207–1211)","Kenryaku (1211–1213)","Kenpō (1213–1219)","Jōkyū (1219–1222)","Jōō (1222–1224)","Gennin (1224–1225)","Karoku (1225–1227)","Antei (1227–1229)","Kanki (1229–1232)","Jōei (1232–1233)","Tenpuku (1233–1234)","Bunryaku (1234–1235)","Katei (1235–1238)","Ryakunin (1238–1239)","En’ō (1239–1240)","Ninji (1240–1243)","Kangen (1243–1247)","Hōji (1247–1249)","Kenchō (1249–1256)","Kōgen (1256–1257)","Shōka (1257–1259)","Shōgen (1259–1260)","Bun’ō (1260–1261)","Kōchō (1261–1264)","Bun’ei (1264–1275)","Kenji (1275–1278)","Kōan (1278–1288)","Shōō (1288–1293)","Einin (1293–1299)","Shōan (1299–1302)","Kengen (1302–1303)","Kagen (1303–1306)","Tokuji (1306–1308)","Enkyō (1308–1311)","Ōchō (1311–1312)","Shōwa (1312–1317)","Bunpō (1317–1319)","Genō (1319–1321)","Genkō (1321–1324)","Shōchū (1324–1326)","Karyaku (1326–1329)","Gentoku (1329–1331)","Genkō (1331–1334)","Kenmu (1334–1336)","Engen (1336–1340)","Kōkoku (1340–1346)","Shōhei (1346–1370)","Kentoku (1370–1372)","Bunchū (1372–1375)","Tenju (1375–1379)","Kōryaku (1379–1381)","Kōwa (1381–1384)","Genchū (1384–1392)","Meitoku (1384–1387)","Kakei (1387–1389)","Kōō (1389–1390)","Meitoku (1390–1394)","Ōei (1394–1428)","Shōchō (1428–1429)","Eikyō (1429–1441)","Kakitsu (1441–1444)","Bun’an (1444–1449)","Hōtoku (1449–1452)","Kyōtoku (1452–1455)","Kōshō (1455–1457)","Chōroku (1457–1460)","Kanshō (1460–1466)","Bunshō (1466–1467)","Ōnin (1467–1469)","Bunmei (1469–1487)","Chōkyō (1487–1489)","Entoku (1489–1492)","Meiō (1492–1501)","Bunki (1501–1504)","Eishō (1504–1521)","Taiei (1521–1528)","Kyōroku (1528–1532)","Tenbun (1532–1555)","Kōji (1555–1558)","Eiroku (1558–1570)","Genki (1570–1573)","Tenshō (1573–1592)","Bunroku (1592–1596)","Keichō (1596–1615)","Genna (1615–1624)","Kan’ei (1624–1644)","Shōho (1644–1648)","Keian (1648–1652)","Jōō (1652–1655)","Meireki (1655–1658)","Manji (1658–1661)","Kanbun (1661–1673)","Enpō (1673–1681)","Tenna (1681–1684)","Jōkyō (1684–1688)","Genroku (1688–1704)","Hōei (1704–1711)","Shōtoku (1711–1716)","Kyōhō (1716–1736)","Genbun (1736–1741)","Kanpō (1741–1744)","Enkyō (1744–1748)","Kan’en (1748–1751)","Hōreki (1751–1764)","Meiwa (1764–1772)","An’ei (1772–1781)","Tenmei (1781–1789)","Kansei (1789–1801)","Kyōwa (1801–1804)","Bunka (1804–1818)","Bunsei (1818–1830)","Tenpō (1830–1844)","Kōka (1844–1848)","Kaei (1848–1854)","Ansei (1854–1860)","Man’en (1860–1861)","Bunkyū (1861–1864)","Genji (1864–1865)","Keiō (1865–1868)","M","T","S","H"],short:["Taika (645–650)","Hakuchi (650–671)","Hakuhō (672–686)","Shuchō (686–701)","Taihō (701–704)","Keiun (704–708)","Wadō (708–715)","Reiki (715–717)","Yōrō (717–724)","Jinki (724–729)","Tenpyō (729–749)","Tenpyō-kampō (749-749)","Tenpyō-shōhō (749-757)","Tenpyō-hōji (757-765)","Tenpyō-jingo (765-767)","Jingo-keiun (767-770)","Hōki (770–780)","Ten-ō (781-782)","Enryaku (782–806)","Daidō (806–810)","Kōnin (810–824)","Tenchō (824–834)","Jōwa (834–848)","Kajō (848–851)","Ninju (851–854)","Saikō (854–857)","Ten-an (857-859)","Jōgan (859–877)","Gangyō (877–885)","Ninna (885–889)","Kanpyō (889–898)","Shōtai (898–901)","Engi (901–923)","Enchō (923–931)","Jōhei (931–938)","Tengyō (938–947)","Tenryaku (947–957)","Tentoku (957–961)","Ōwa (961–964)","Kōhō (964–968)","Anna (968–970)","Tenroku (970–973)","Ten’en (973–976)","Jōgen (976–978)","Tengen (978–983)","Eikan (983–985)","Kanna (985–987)","Eien (987–989)","Eiso (989–990)","Shōryaku (990–995)","Chōtoku (995–999)","Chōhō (999–1004)","Kankō (1004–1012)","Chōwa (1012–1017)","Kannin (1017–1021)","Jian (1021–1024)","Manju (1024–1028)","Chōgen (1028–1037)","Chōryaku (1037–1040)","Chōkyū (1040–1044)","Kantoku (1044–1046)","Eishō (1046–1053)","Tengi (1053–1058)","Kōhei (1058–1065)","Jiryaku (1065–1069)","Enkyū (1069–1074)","Shōho (1074–1077)","Shōryaku (1077–1081)","Eihō (1081–1084)","Ōtoku (1084–1087)","Kanji (1087–1094)","Kahō (1094–1096)","Eichō (1096–1097)","Jōtoku (1097–1099)","Kōwa (1099–1104)","Chōji (1104–1106)","Kashō (1106–1108)","Tennin (1108–1110)","Ten-ei (1110-1113)","Eikyū (1113–1118)","Gen’ei (1118–1120)","Hōan (1120–1124)","Tenji (1124–1126)","Daiji (1126–1131)","Tenshō (1131–1132)","Chōshō (1132–1135)","Hōen (1135–1141)","Eiji (1141–1142)","Kōji (1142–1144)","Ten’yō (1144–1145)","Kyūan (1145–1151)","Ninpei (1151–1154)","Kyūju (1154–1156)","Hōgen (1156–1159)","Heiji (1159–1160)","Eiryaku (1160–1161)","Ōho (1161–1163)","Chōkan (1163–1165)","Eiman (1165–1166)","Nin’an (1166–1169)","Kaō (1169–1171)","Shōan (1171–1175)","Angen (1175–1177)","Jishō (1177–1181)","Yōwa (1181–1182)","Juei (1182–1184)","Genryaku (1184–1185)","Bunji (1185–1190)","Kenkyū (1190–1199)","Shōji (1199–1201)","Kennin (1201–1204)","Genkyū (1204–1206)","Ken’ei (1206–1207)","Jōgen (1207–1211)","Kenryaku (1211–1213)","Kenpō (1213–1219)","Jōkyū (1219–1222)","Jōō (1222–1224)","Gennin (1224–1225)","Karoku (1225–1227)","Antei (1227–1229)","Kanki (1229–1232)","Jōei (1232–1233)","Tenpuku (1233–1234)","Bunryaku (1234–1235)","Katei (1235–1238)","Ryakunin (1238–1239)","En’ō (1239–1240)","Ninji (1240–1243)","Kangen (1243–1247)","Hōji (1247–1249)","Kenchō (1249–1256)","Kōgen (1256–1257)","Shōka (1257–1259)","Shōgen (1259–1260)","Bun’ō (1260–1261)","Kōchō (1261–1264)","Bun’ei (1264–1275)","Kenji (1275–1278)","Kōan (1278–1288)","Shōō (1288–1293)","Einin (1293–1299)","Shōan (1299–1302)","Kengen (1302–1303)","Kagen (1303–1306)","Tokuji (1306–1308)","Enkyō (1308–1311)","Ōchō (1311–1312)","Shōwa (1312–1317)","Bunpō (1317–1319)","Genō (1319–1321)","Genkō (1321–1324)","Shōchū (1324–1326)","Karyaku (1326–1329)","Gentoku (1329–1331)","Genkō (1331–1334)","Kenmu (1334–1336)","Engen (1336–1340)","Kōkoku (1340–1346)","Shōhei (1346–1370)","Kentoku (1370–1372)","Bunchū (1372–1375)","Tenju (1375–1379)","Kōryaku (1379–1381)","Kōwa (1381–1384)","Genchū (1384–1392)","Meitoku (1384–1387)","Kakei (1387–1389)","Kōō (1389–1390)","Meitoku (1390–1394)","Ōei (1394–1428)","Shōchō (1428–1429)","Eikyō (1429–1441)","Kakitsu (1441–1444)","Bun’an (1444–1449)","Hōtoku (1449–1452)","Kyōtoku (1452–1455)","Kōshō (1455–1457)","Chōroku (1457–1460)","Kanshō (1460–1466)","Bunshō (1466–1467)","Ōnin (1467–1469)","Bunmei (1469–1487)","Chōkyō (1487–1489)","Entoku (1489–1492)","Meiō (1492–1501)","Bunki (1501–1504)","Eishō (1504–1521)","Taiei (1521–1528)","Kyōroku (1528–1532)","Tenbun (1532–1555)","Kōji (1555–1558)","Eiroku (1558–1570)","Genki (1570–1573)","Tenshō (1573–1592)","Bunroku (1592–1596)","Keichō (1596–1615)","Genna (1615–1624)","Kan’ei (1624–1644)","Shōho (1644–1648)","Keian (1648–1652)","Jōō (1652–1655)","Meireki (1655–1658)","Manji (1658–1661)","Kanbun (1661–1673)","Enpō (1673–1681)","Tenna (1681–1684)","Jōkyō (1684–1688)","Genroku (1688–1704)","Hōei (1704–1711)","Shōtoku (1711–1716)","Kyōhō (1716–1736)","Genbun (1736–1741)","Kanpō (1741–1744)","Enkyō (1744–1748)","Kan’en (1748–1751)","Hōreki (1751–1764)","Meiwa (1764–1772)","An’ei (1772–1781)","Tenmei (1781–1789)","Kansei (1789–1801)","Kyōwa (1801–1804)","Bunka (1804–1818)","Bunsei (1818–1830)","Tenpō (1830–1844)","Kōka (1844–1848)","Kaei (1848–1854)","Ansei (1854–1860)","Man’en (1860–1861)","Bunkyū (1861–1864)","Genji (1864–1865)","Keiō (1865–1868)","Meiji","Taishō","Shōwa","Heisei"],long:["Taika (645–650)","Hakuchi (650–671)","Hakuhō (672–686)","Shuchō (686–701)","Taihō (701–704)","Keiun (704–708)","Wadō (708–715)","Reiki (715–717)","Yōrō (717–724)","Jinki (724–729)","Tenpyō (729–749)","Tenpyō-kampō (749-749)","Tenpyō-shōhō (749-757)","Tenpyō-hōji (757-765)","Tenpyō-jingo (765-767)","Jingo-keiun (767-770)","Hōki (770–780)","Ten-ō (781-782)","Enryaku (782–806)","Daidō (806–810)","Kōnin (810–824)","Tenchō (824–834)","Jōwa (834–848)","Kajō (848–851)","Ninju (851–854)","Saikō (854–857)","Ten-an (857-859)","Jōgan (859–877)","Gangyō (877–885)","Ninna (885–889)","Kanpyō (889–898)","Shōtai (898–901)","Engi (901–923)","Enchō (923–931)","Jōhei (931–938)","Tengyō (938–947)","Tenryaku (947–957)","Tentoku (957–961)","Ōwa (961–964)","Kōhō (964–968)","Anna (968–970)","Tenroku (970–973)","Ten’en (973–976)","Jōgen (976–978)","Tengen (978–983)","Eikan (983–985)","Kanna (985–987)","Eien (987–989)","Eiso (989–990)","Shōryaku (990–995)","Chōtoku (995–999)","Chōhō (999–1004)","Kankō (1004–1012)","Chōwa (1012–1017)","Kannin (1017–1021)","Jian (1021–1024)","Manju (1024–1028)","Chōgen (1028–1037)","Chōryaku (1037–1040)","Chōkyū (1040–1044)","Kantoku (1044–1046)","Eishō (1046–1053)","Tengi (1053–1058)","Kōhei (1058–1065)","Jiryaku (1065–1069)","Enkyū (1069–1074)","Shōho (1074–1077)","Shōryaku (1077–1081)","Eihō (1081–1084)","Ōtoku (1084–1087)","Kanji (1087–1094)","Kahō (1094–1096)","Eichō (1096–1097)","Jōtoku (1097–1099)","Kōwa (1099–1104)","Chōji (1104–1106)","Kashō (1106–1108)","Tennin (1108–1110)","Ten-ei (1110-1113)","Eikyū (1113–1118)","Gen’ei (1118–1120)","Hōan (1120–1124)","Tenji (1124–1126)","Daiji (1126–1131)","Tenshō (1131–1132)","Chōshō (1132–1135)","Hōen (1135–1141)","Eiji (1141–1142)","Kōji (1142–1144)","Ten’yō (1144–1145)","Kyūan (1145–1151)","Ninpei (1151–1154)","Kyūju (1154–1156)","Hōgen (1156–1159)","Heiji (1159–1160)","Eiryaku (1160–1161)","Ōho (1161–1163)","Chōkan (1163–1165)","Eiman (1165–1166)","Nin’an (1166–1169)","Kaō (1169–1171)","Shōan (1171–1175)","Angen (1175–1177)","Jishō (1177–1181)","Yōwa (1181–1182)","Juei (1182–1184)","Genryaku (1184–1185)","Bunji (1185–1190)","Kenkyū (1190–1199)","Shōji (1199–1201)","Kennin (1201–1204)","Genkyū (1204–1206)","Ken’ei (1206–1207)","Jōgen (1207–1211)","Kenryaku (1211–1213)","Kenpō (1213–1219)","Jōkyū (1219–1222)","Jōō (1222–1224)","Gennin (1224–1225)","Karoku (1225–1227)","Antei (1227–1229)","Kanki (1229–1232)","Jōei (1232–1233)","Tenpuku (1233–1234)","Bunryaku (1234–1235)","Katei (1235–1238)","Ryakunin (1238–1239)","En’ō (1239–1240)","Ninji (1240–1243)","Kangen (1243–1247)","Hōji (1247–1249)","Kenchō (1249–1256)","Kōgen (1256–1257)","Shōka (1257–1259)","Shōgen (1259–1260)","Bun’ō (1260–1261)","Kōchō (1261–1264)","Bun’ei (1264–1275)","Kenji (1275–1278)","Kōan (1278–1288)","Shōō (1288–1293)","Einin (1293–1299)","Shōan (1299–1302)","Kengen (1302–1303)","Kagen (1303–1306)","Tokuji (1306–1308)","Enkyō (1308–1311)","Ōchō (1311–1312)","Shōwa (1312–1317)","Bunpō (1317–1319)","Genō (1319–1321)","Genkō (1321–1324)","Shōchū (1324–1326)","Karyaku (1326–1329)","Gentoku (1329–1331)","Genkō (1331–1334)","Kenmu (1334–1336)","Engen (1336–1340)","Kōkoku (1340–1346)","Shōhei (1346–1370)","Kentoku (1370–1372)","Bunchū (1372–1375)","Tenju (1375–1379)","Kōryaku (1379–1381)","Kōwa (1381–1384)","Genchū (1384–1392)","Meitoku (1384–1387)","Kakei (1387–1389)","Kōō (1389–1390)","Meitoku (1390–1394)","Ōei (1394–1428)","Shōchō (1428–1429)","Eikyō (1429–1441)","Kakitsu (1441–1444)","Bun’an (1444–1449)","Hōtoku (1449–1452)","Kyōtoku (1452–1455)","Kōshō (1455–1457)","Chōroku (1457–1460)","Kanshō (1460–1466)","Bunshō (1466–1467)","Ōnin (1467–1469)","Bunmei (1469–1487)","Chōkyō (1487–1489)","Entoku (1489–1492)","Meiō (1492–1501)","Bunki (1501–1504)","Eishō (1504–1521)","Taiei (1521–1528)","Kyōroku (1528–1532)","Tenbun (1532–1555)","Kōji (1555–1558)","Eiroku (1558–1570)","Genki (1570–1573)","Tenshō (1573–1592)","Bunroku (1592–1596)","Keichō (1596–1615)","Genna (1615–1624)","Kan’ei (1624–1644)","Shōho (1644–1648)","Keian (1648–1652)","Jōō (1652–1655)","Meireki (1655–1658)","Manji (1658–1661)","Kanbun (1661–1673)","Enpō (1673–1681)","Tenna (1681–1684)","Jōkyō (1684–1688)","Genroku (1688–1704)","Hōei (1704–1711)","Shōtoku (1711–1716)","Kyōhō (1716–1736)","Genbun (1736–1741)","Kanpō (1741–1744)","Enkyō (1744–1748)","Kan’en (1748–1751)","Hōreki (1751–1764)","Meiwa (1764–1772)","An’ei (1772–1781)","Tenmei (1781–1789)","Kansei (1789–1801)","Kyōwa (1801–1804)","Bunka (1804–1818)","Bunsei (1818–1830)","Tenpō (1830–1844)","Kōka (1844–1848)","Kaei (1848–1854)","Ansei (1854–1860)","Man’en (1860–1861)","Bunkyū (1861–1864)","Genji (1864–1865)","Keiō (1865–1868)","Meiji","Taishō","Shōwa","Heisei"]},dayPeriods:{am:"AM",pm:"PM"}},persian:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Farvardin","Ordibehesht","Khordad","Tir","Mordad","Shahrivar","Mehr","Aban","Azar","Dey","Bahman","Esfand"],long:["Farvardin","Ordibehesht","Khordad","Tir","Mordad","Shahrivar","Mehr","Aban","Azar","Dey","Bahman","Esfand"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["AP"],short:["AP"],long:["AP"]},dayPeriods:{am:"AM",pm:"PM"}},roc:{months:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],short:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],long:["January","February","March","April","May","June","July","August","September","October","November","December"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["Before R.O.C.","Minguo"],short:["Before R.O.C.","Minguo"],long:["Before R.O.C.","Minguo"]},dayPeriods:{am:"AM",pm:"PM"}}}},number:{nu:["latn"],patterns:{decimal:{positivePattern:"{number}",negativePattern:"{minusSign}{number}"},currency:{positivePattern:"{currency}{number}",negativePattern:"{minusSign}{currency}{number}"},percent:{positivePattern:"{number}{percentSign}",negativePattern:"{minusSign}{number}{percentSign}"}},symbols:{latn:{decimal:".",group:",",nan:"NaN",plusSign:"+",minusSign:"-",percentSign:"%",infinity:"∞"}},currencies:{AUD:"A$",BRL:"R$",CAD:"CA$",CNY:"CN¥",EUR:"€",GBP:"£",HKD:"HK$",ILS:"₪",INR:"₹",JPY:"¥",KRW:"₩",MXN:"MX$",NZD:"NZ$",TWD:"NT$",USD:"$",VND:"₫",XAF:"FCFA",XCD:"EC$",XOF:"CFA",XPF:"CFPF"}}});
|
PypiClean
|
/server_mess_is_that_joke-0.1.2.tar.gz/server_mess_is_that_joke-0.1.2/server/server.py
|
import argparse
import configparser
import os
import select
import sys
import threading
import time
from collections import deque
from logging import getLogger
from socket import socket, AF_INET, SOCK_STREAM
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QApplication, QMessageBox
from additionals.server_meta import ServerVerifier
from additionals.server_descriptor import PortVerifier, HostVerifier
from additionals.settings import RESPONSE_200, RESPONSE_400, ACTION, ACCOUNT_NAME, TIME, PRESENCE, USER, MESSAGE, \
MESSAGE_TXT, SENDER, ERROR, TO, \
EXIT, ADD_CONTACT, REMOVE_CONTACT, RESPONSE_202, GET_CONTACTS, USERS_REQUEST, RESPONSE_203, PASSWORD, REGISTER,\
RESPONSE_412, PUB_KEY, PUBLIC_KEY_REQUEST, RESPONSE_511, DATA, RESPONSE_401, RESPONSE_403
from additionals.utils import send_msg, receive_msg
from additionals.decos import Log
from server.server_database import ServerDB
from server.server_gui import MainGui, gui_create_model, create_stat_model, ConfigWindow, HistoryWindow
new_connection = False
conflag_lock = threading.Lock()
@Log()
def arg_parser(default_address, default_port):
"""
argument parser for server_.py
"""
parser = argparse.ArgumentParser()
parser.add_argument('-a', default=default_address, nargs='?', help='ip address')
parser.add_argument('-p', default=default_port, type=int, nargs='?', help=':port')
namespace = parser.parse_args(sys.argv[1:])
addr = namespace.a
port = namespace.p
return addr, port
class Server(threading.Thread, metaclass=ServerVerifier):
"""
Server Main Class
"""
port = PortVerifier()
ip = HostVerifier()
def __init__(self, ip_addr, ip_port, database):
"""
server_ initialization
@param ip_addr: ip address
@param ip_port: ip port
@param database: connected database
"""
self.database = database
self.logger = getLogger('app.server_')
self.ip = ip_addr
self.port = ip_port
self.connected_clients = deque()
self.messages_lst = []
self.r_clients = deque()
self.w_clients = deque()
self.e_clients = deque()
self.names = {}
self.authentication_lst = {}
super().__init__()
@Log()
def init_socket(self):
"""
create socket connection
"""
self.logger.warning(f'server_ starts with {self.ip}:{self.port}')
self.sock = socket(AF_INET, SOCK_STREAM)
self.logger.debug(f'server_ successfully started socket {self.sock}')
self.sock.bind((self.ip, self.port))
self.sock.settimeout(0.5)
self.logger.debug('server_ successfully got socket')
self.sock.listen()
self.logger.debug('server_ started to listen socket')
def run(self):
global new_connection
self.init_socket()
while True:
try:
self.client, self.addr = self.sock.accept()
self.authentication_lst[self.client] = round(time.time())
self.logger.debug(f'clients connected {self.client}')
except OSError:
pass
else:
self.connected_clients.append(self.client)
try:
if self.connected_clients:
self.r_clients, self.w_clients, self.e_clients = select.select(self.connected_clients,
self.connected_clients,
[], 0)
except OSError:
pass
if self.r_clients:
for client_to_receive in self.r_clients:
try:
self.message_handler(receive_msg(client_to_receive), client_to_receive)
except Exception as e:
print(e)
self.get_name(client_to_receive)
with conflag_lock:
new_connection = True
for message in self.messages_lst:
try:
self.process_message(message, self.w_clients)
except (ConnectionAbortedError, ConnectionError, ConnectionResetError, ConnectionRefusedError):
self.logger.info(f'Connection with {message[TO]} has been lost')
self.connected_clients.remove(self.names[message[TO]])
self.database.user_logout(message[TO])
del self.names[message[TO]]
with conflag_lock:
new_connection = True
self.messages_lst.clear()
@Log()
def authenticate_request(self, name, pwd):
"""
check entered users password
@param name: username
@param pwd: password
@return: bool
"""
query_pwd = self.database.check_pwd(name)
if pwd == query_pwd[0]:
return True
return False
@Log()
def process_message(self, message, listen_socks):
"""
send messages func
@param message: dict
@param listen_socks: connected sockets
@return:
"""
if message[TO] in self.names and self.names[message[TO]] in listen_socks:
send_msg(self.names[message[TO]], message)
self.logger.info(f'Message to {message[TO]} has been sent from {message[SENDER]}.')
elif message[TO] in self.names and self.names[message[TO]] not in listen_socks:
raise ConnectionError
else:
self.logger.error(
f'User {message[TO]} is not registered on server_, send message failed.')
@Log()
def message_handler(self, message, client):
"""
incoming messages handler
@param message: dict
@param client: username
@return:
"""
global new_connection
print(message)
if ACTION in message and message[ACTION] == REGISTER and TIME in message \
and USER in message and client in self.authentication_lst:
if not message[USER][ACCOUNT_NAME] in self.names.keys()\
and not self.database.check_user(message[USER][ACCOUNT_NAME]):
self.names[message[USER][ACCOUNT_NAME]] = client
del self.authentication_lst[client]
client_ip, client_port = client.getpeername()
self.database.user_login(message[USER][ACCOUNT_NAME], client_ip, client_port,
message[USER][PASSWORD], message[USER][PUB_KEY])
self.send(client, RESPONSE_200)
with conflag_lock:
new_connection = True
return
else:
response = RESPONSE_412
response[ERROR] = f'client_ with name "{message[USER][ACCOUNT_NAME]}" is registered already'
self.send(client, response)
self.connected_clients.remove(client)
del self.authentication_lst[client]
client.close()
return
elif ACTION in message and message[ACTION] == PRESENCE and TIME in message \
and USER in message and client in self.authentication_lst and \
self.authenticate_request(message[USER][ACCOUNT_NAME], message[USER][PASSWORD]) and \
self.database.check_user(message[USER][ACCOUNT_NAME]):
if not message[USER][ACCOUNT_NAME] in self.names.keys():
self.names[message[USER][ACCOUNT_NAME]] = client
del self.authentication_lst[client]
client_ip, client_port = client.getpeername()
self.database.user_login(message[USER][ACCOUNT_NAME], client_ip, client_port,
message[USER][PASSWORD], message[USER][PUB_KEY])
self.send(client, RESPONSE_200)
with conflag_lock:
new_connection = True
return
else:
response = RESPONSE_401
response[ERROR] = f'client_ with name "{message[USER][ACCOUNT_NAME]}" is in chat already'
self.send(client, response)
self.connected_clients.remove(client)
del self.authentication_lst[client]
client.close()
return
elif ACTION in message and message[ACTION] == PRESENCE and TIME in message \
and USER in message and client in self.authentication_lst and not\
self.database.check_user(message[USER][ACCOUNT_NAME]):
response = RESPONSE_403
response[ERROR] = f'client_ with name "{message[USER][ACCOUNT_NAME]}" is not registered'
self.send(client, response)
self.connected_clients.remove(client)
del self.authentication_lst[client]
client.close()
return
elif ACTION in message and message[ACTION] == PRESENCE and TIME in message \
and USER in message and client in self.authentication_lst and \
self.database.check_user(message[USER][ACCOUNT_NAME]) and not \
self.authenticate_request(message[USER][ACCOUNT_NAME], message[USER][PASSWORD]):
response = RESPONSE_401
response[ERROR] = f'wrong password'
self.send(client, response)
self.connected_clients.remove(client)
del self.authentication_lst[client]
client.close()
return
elif ACTION in message and message[ACTION] == MESSAGE and TIME in message \
and MESSAGE_TXT in message and SENDER in message and TO in message \
and message[SENDER] not in self.authentication_lst:
if message[TO] in self.names:
self.messages_lst.append(message)
self.database.messages_count(message[SENDER], message[TO])
self.send(client, RESPONSE_200)
else:
response = RESPONSE_400
response[ERROR] = 'client_ is not authorized'
self.send(client, response)
return
elif ACTION in message and message[ACTION] == EXIT and TIME in message \
and SENDER in message and self.names[message[SENDER]] == client \
and message[SENDER] not in self.authentication_lst:
self.database.user_logout(message[SENDER])
self.connected_clients.remove(client)
self.names.pop(message[SENDER])
self.r_clients.remove(client)
self.w_clients.remove(client)
client.close()
with conflag_lock:
new_connection = True
return
elif ACTION in message and message[ACTION] == GET_CONTACTS and SENDER in message and \
self.names[message[SENDER]] == client and message[SENDER] not in self.authentication_lst:
self.response = RESPONSE_203
self.response[GET_CONTACTS] = self.database.get_contact(message[SENDER])
self.send(client, self.response)
return
elif ACTION in message and message[ACTION] == ADD_CONTACT and ACCOUNT_NAME in message and SENDER in message \
and self.names[message[SENDER]] == client and message[SENDER] not in self.authentication_lst:
self.database.add_contact(message[SENDER], message[ACCOUNT_NAME])
self.send(client, RESPONSE_200)
return
elif ACTION in message and message[ACTION] == REMOVE_CONTACT and ACCOUNT_NAME in message and SENDER in message \
and self.names[message[SENDER]] == client and message[SENDER] not in self.authentication_lst:
self.database.delete_contact(message[SENDER], message[ACCOUNT_NAME])
self.send(client, RESPONSE_200)
return
elif ACTION in message and message[ACTION] == USERS_REQUEST and SENDER in message and \
self.names[message[SENDER]] == client and message[SENDER] not in self.authentication_lst:
self.response = RESPONSE_202
self.response[USERS_REQUEST] = [user[0] for user in self.database.users_list()]
self.send(client, self.response)
return
elif ACTION in message and message[ACTION] == PUBLIC_KEY_REQUEST and ACCOUNT_NAME in message:
response = RESPONSE_511
response[DATA] = self.database.get_keys(message[ACCOUNT_NAME])
if response[DATA]:
try:
send_msg(client, response)
except OSError:
self.get_name(client)
else:
response = RESPONSE_400
response[ERROR] = 'Нет публичного ключа для данного пользователя'
try:
send_msg(client, response)
except OSError:
self.get_name(client)
else:
self.logger.error(f'created answer to client_ - {RESPONSE_400}')
self.send(client, RESPONSE_400)
return
@Log()
def send(self, *args):
"""
send message function
@param args: socket, message
@return:
"""
send_msg(*args)
@Log()
def get_name(self, client):
"""
handler to detect and delete disconnected users
@param client: username
@return:
"""
try:
if client in self.connected_clients:
self.connected_clients.remove(client)
elif client in self.r_clients:
self.r_clients.remove(client)
elif client in self.w_clients:
self.w_clients.remove(client)
for i in self.names.items():
if i[1] == client:
self.names.pop(i[0])
self.database.user_logout(i[0])
except Exception as e:
self.logger.warning(f'get_name exception {e}')
def main():
"""
main function, create server_ GUI, get server_ settings
@return:
"""
server_config = configparser.ConfigParser()
path = os.path.dirname(os.path.realpath(__file__))
server_config.read(os.path.join(path, 'server.ini'))
ip_addr, ip_port = arg_parser(server_config['SETTINGS']['DEFAULT_IP'], server_config['SETTINGS']['DEFAULT_PORT'])
database = ServerDB(os.path.join(server_config['SETTINGS']['db_path'], server_config['SETTINGS']['db_file_name']))
app = Server(ip_addr, ip_port, database)
app.daemon = True
app.start()
gui_app = QApplication(sys.argv)
main_window = MainGui()
main_window.statusBar().showMessage('Server Working')
main_window.active_users_table.setModel(gui_create_model(database))
main_window.active_users_table.resizeColumnsToContents()
main_window.active_users_table.resizeRowsToContents()
def list_update():
"""
func shows active users in GUI
@return:
"""
global new_connection
if conflag_lock:
main_window.active_users_table.setModel(
gui_create_model(database))
main_window.active_users_table.resizeColumnsToContents()
main_window.active_users_table.resizeRowsToContents()
with conflag_lock:
new_connection = False
def show_statistics():
"""
func shows messages history in GUI
@return:
"""
global stat_window
stat_window = HistoryWindow()
stat_window.history_table.setModel(create_stat_model(database))
stat_window.history_table.resizeColumnsToContents()
stat_window.history_table.resizeRowsToContents()
stat_window.show()
def server_configuration():
"""
func shows settings window in GUI
@return:
"""
global config_window
config_window = ConfigWindow()
config_window.db_path.insert(server_config['SETTINGS']['db_path'])
config_window.db_file.insert(server_config['SETTINGS']['db_file_name'])
config_window.port.insert(server_config['SETTINGS']['DEFAULT_PORT'])
config_window.ip.insert(server_config['SETTINGS']['DEFAULT_IP'])
config_window.save_btn.clicked.connect(save_server_config)
def save_server_config():
"""
check setting and save it in INI file
@return:
"""
global config_window
message = QMessageBox()
server_config['SETTINGS']['db_path'] = config_window.db_path.text()
server_config['SETTINGS']['db_file_name'] = config_window.db_file.text()
try:
port = int(config_window.port.text())
except ValueError:
message.warning(config_window, 'Error', 'Port should be an integer')
else:
server_config['SETTINGS']['DEFAULT_IP'] = config_window.ip.text()
if 1023 < port < 65536:
server_config['SETTINGS']['DEFAULT_PORT'] = str(port)
print(port)
with open('server.ini', 'w') as conf:
server_config.write(conf)
message.information(
config_window, 'OK', 'Settings saved')
else:
message.warning(
config_window,
'Error',
'Port should be 1024 - 65536')
timer = QTimer()
timer.timeout.connect(list_update)
timer.start(1000)
main_window.refresh.triggered.connect(list_update)
main_window.history.triggered.connect(show_statistics)
main_window.server_config.triggered.connect(server_configuration)
gui_app.exec_()
if __name__ == '__main__':
main()
|
PypiClean
|
/sciexp2-expdef-2.0.13.tar.gz/sciexp2-expdef-2.0.13/sciexp2/common/text.py
|
__author__ = "Lluís Vilanova"
__copyright__ = "Copyright 2019-2023, Lluís Vilanova"
__license__ = "GPL version 3 or later"
from collections import OrderedDict
try:
from collections.abc import Mapping
except:
pass
import pystache
import re
from .utils import OrderedSet
import six
import sys
class ParseError(Exception):
pass
class VariableError(Exception):
pass
class ExtractError(Exception):
pass
def _get_parsed_elems(parsed):
return parsed._parse_tree
def _parse(text, allow_nested=True, allow_inverted=True):
try:
pystache.render(text, {})
except pystache.parser.ParsingError as e:
_, _, exc_traceback = sys.exc_info()
new_e = ParseError(str(e))
six.reraise(new_e.__class__, new_e, exc_traceback)
parsed = pystache.parse(text)
elems = _get_parsed_elems(parsed)
if len(elems) == 0 and len(text) > 0:
raise ParseError("section start tag mismatch")
def traverse(elems, nested):
seen_node = False
for elem in elems:
if not isinstance(elem, six.string_types):
seen_node = True
if isinstance(elem, six.string_types):
pass
elif isinstance(elem, (pystache.parser._EscapeNode,
pystache.parser._ChangeNode)):
pass
elif isinstance(elem, pystache.parser._SectionNode):
nested = traverse(_get_parsed_elems(elem.parsed), True)
if nested and not allow_nested:
raise ParseError(
"nested tags not allowed in section %r" % elem.key)
elif isinstance(elem, pystache.parser._InvertedNode):
if not allow_inverted:
raise ParseError("inverted sections not allowed: %s" % elem.key)
nested = traverse(_get_parsed_elems(elem.parsed_section), True)
if nested and not allow_nested:
raise ParseError(
"nested tags not allowed in inverted section %r" % elem.key)
elif isinstance(elem, pystache.parser._PartialNode):
raise ParseError(
"partial tags not allowed")
else:
raise ParseError("tag not allowed %r" % elem.__class__)
return seen_node
traverse(elems, False)
return parsed
def get_variables(text, nested=False):
"""Get the variables referenced in the given text.
Parameters
----------
text : str
Text to get variables from.
nested : optional
Whether to allow nested variables. Can have values "all" for all the
variables, or "inner" for just the inner variables.
Examples
--------
>>> get_variables("{{a}}")
['a']
>>> get_variables("{{#a}} {{b}} {{/a}}", nested="inner")
['b']
>>> get_variables("{{#a}} {{b}} {{/a}}", nested="all")
['a', 'b']
"""
if nested not in [False, "all", "inner"]:
raise ValueError("invalid nested value:", nested)
parsed = _parse(text, allow_nested=bool(nested))
if not nested: # equivalent due to exception raised above
nested = "all"
def traverse(elems, variables):
added_variables = False
for elem in elems:
if isinstance(elem, pystache.parser._SectionNode):
traversed_variables = traverse(_get_parsed_elems(elem.parsed),
variables)
if nested == "all":
variables.add(elem.key)
added_variables = True
elif nested == "inner" and not traversed_variables:
variables.add(elem.key)
added_variables = True
elif isinstance(elem, pystache.parser._InvertedNode):
traversed_variables = traverse(_get_parsed_elems(elem.parsed_section),
variables)
if nested == "all":
variables.add(elem.key)
added_variables = True
elif nested == "inner" and not traversed_variables:
variables.add(elem.key)
added_variables = True
elif isinstance(elem, (pystache.parser._EscapeNode, pystache.parser._PartialNode)):
variables.add(elem.key)
added_variables = True
else:
assert isinstance(elem, six.string_types), elem
return added_variables
elems = _get_parsed_elems(parsed)
variables = set()
traverse(elems, variables)
return sorted(variables)
class Translator(object):
"""Translate a template text with given variables."""
def __init__(self, template):
"""
Parameters
----------
template : str
Template text to translate.
"""
self._template = template
self._parsed = _parse(self._template, allow_nested=True)
def identity(arg):
return arg
self._renderer = pystache.renderer.Renderer(search_dirs=[], file_extension=False,
partials=None, escape=identity,
missing_tags="strict")
def translate(self, env, recursive=True):
"""Apply translation with given variables.
Parameters
----------
env : dict
Mapping of variable names to their values.
recursive : bool, optional
Whether to apply translations recursively.
Examples
--------
You can perform simple text translations:
>>> t = Translator('Hello {{a}}')
>>> t.translate({'a': 'you'})
'Hello you'
>>> t.translate({'a': [1, 2]})
'Hello [1, 2]'
And also recursive ones:
>>> t.translate({'a': '{{b}}', 'b': 'them'})
'Hello them'
More complex cases like conditionals are also possible:
>>> t = Translator('{{#a}}is true{{/a}}{{^a}}is false{{/a}}')
>>> t.translate({'a': 1})
'is true'
>>> t.translate({'a': 0})
'is false'
Or even calls to functions (arguments are the unexpanded text on the template):
>>> Translator('{{a}}').translate({'a': lambda: 'value'})
'value'
>>> Translator('{{#a}}{{b}}{{/a}}').translate(
... {'a': lambda arg: 2*arg, 'b': 4})
'44'
>>> Translator('{{#a}}{{b}}{{/a}}').translate(
... {'a': lambda arg: " ".join(list(arg))})
'{ { b } }'
And expansion of nested variables with multiple values is also possible:
>>> Translator('{{#a}}A.B=={{b}} {{/a}}').translate({'a': [{'b': 1}, {'b': 2}]})
'A.B==1 A.B==2 '
"""
if not isinstance(env, Mapping):
raise TypeError("not a mapping: %r" % env)
template_new = self._template
parsed_new = self._parsed
while True:
template_old = template_new
parsed_old = parsed_new
try:
template_new = self._renderer.render(parsed_new, env)
except pystache.context.KeyNotFoundError as e:
_, _, exc_traceback = sys.exc_info()
new_e = VariableError("missing variable %s" % e.key)
six.reraise(new_e.__class__, new_e, exc_traceback)
except pystache.common.TemplateNotFoundError as e:
_, _, exc_traceback = sys.exc_info()
new_e = VariableError(str(e))
six.reraise(new_e.__class__, new_e, exc_traceback)
if not recursive:
break
elif template_old == template_new:
break
parsed_new = _parse(template_new, allow_nested=True)
return template_new
def translate_many(self, envs, recursive=True, ignore_variable_error=False,
with_envs=False):
"""Apply translation with given set of variables.
Parameters
----------
envs : sequence of dict
Sequence of variable names to value mappings to apply the
translation for.
recursive : bool, optional
Whether to apply translations recursively.
ignore_variable_error : bool, optional
Ignore translations for variable maps that have missing variables.
with_envs : bool, optional
Get the set of maps that led to each translation.
Returns
-------
list of str
Translations when ``with_envs`` is ``False``.
list of (str, [env])
Translations with their corresponding variable maps when
``with_envs`` is ``True``.
Notes
-----
The result is guaranteed to maintain the order of the elements of
`envs`.
Examples
--------
You can very easily translate a sequence of variable maps:
>>> t = Translator('Hello {{a}}')
>>> t.translate_many([{'a': 'you'}, {'a': 'them'}])
['Hello you', 'Hello them']
Multiple maps can also translate into the same text:
>>> t.translate_many([{'a': 'you'}, {'a': 'them', 'b': 1}, {'a': 'them', 'b': 2}])
['Hello you', 'Hello them']
But you can also get the maps that led to each translation:
>>> t = Translator('Hello {{a}}')
>>> translated = t.translate_many([{'a': 'you'}, {'a': 'them', 'b': 1},
... {'a': 'them', 'b': 2}], with_envs=True)
>>> translated == [('Hello you', [{'a': 'you'}]),
... ('Hello them', [{'a': 'them', 'b': 1},
... {'a': 'them', 'b': 2}])]
True
"""
if with_envs:
result = OrderedDict()
def add(key, val):
if key not in result:
result[key] = []
result[key].append(val)
else:
result_track = OrderedSet()
result = []
def add(key, val):
if key not in result_track:
result_track.add(key)
result.append(key)
for env in envs:
try:
text = self.translate(env)
except VariableError:
if not ignore_variable_error:
raise
else:
add(text, env)
if with_envs:
return list(result.items())
else:
return result
def translate(template, env, **kwargs):
"""Shorthand for ``Translator(template).translate(env, **kwargs)``."""
return Translator(template=template).translate(env=env, **kwargs)
def translate_many(template, envs, **kwargs):
"""Shorthand for ``Translator(template).translate_many(envs, **kwargs)``."""
return Translator(template=template).translate_many(envs=envs, **kwargs)
class Extractor(object):
"""Extract a dict with the variable values that match a given template.
Variables and sections on the template are used to define regular
expressions, following Python's `syntax
<http://docs.python.org/library/re.html#regular-expression-syntax>`_.
"""
def __init__(self, template):
"""
Parameters
----------
template : str
Template text to extract from.
"""
self._template = template
parsed = _parse(template, allow_nested=False, allow_inverted=False)
regex = ""
variables = {}
for elem in _get_parsed_elems(parsed):
if isinstance(elem, six.string_types):
regex += elem
elif isinstance(elem, pystache.parser._SectionNode):
if elem.key in variables:
raise ParseError(
"regex for variable %s has already been set: %s" % (
elem.key, variables[elem.key]))
elem_regex = _get_parsed_elems(elem.parsed)
if len(elem_regex) == 0:
raise ParseError(
"regex for variable %s cannot be empty" % elem.key)
elem_regex = elem_regex[0]
assert len(elem_regex) > 0, template
variables[elem.key] = elem_regex
regex += "(?P<%s>%s)" % (elem.key, elem_regex)
elif isinstance(elem, pystache.parser._EscapeNode):
if elem.key in variables:
regex += "(?P=%s)" % elem.key
else:
elem_regex = ".+"
variables[elem.key] = elem_regex
regex += "(?P<%s>%s)" % (elem.key, elem_regex)
else:
# silently ignore
pass
self._cre = re.compile(regex)
def extract(self, text):
"""Apply extraction to given text.
Parameters
----------
text : str
Text to extract from.
Examples
--------
You can perform simple text extractions, where variables correspond to
the simple regex ``.+``:
>>> e = Extractor('Hello {{a}}')
>>> e.extract('Hello world')
{'a': 'world'}
>>> e.extract('Hello 123!')
{'a': '123!'}
More complex regexes can be specified using section tags:
>>> Extractor('Hello {{#a}}[0-9]+{{/a}}.*').extract('Hello 123!')
{'a': '123'}
And using the same variable on multiple tags ensures they all match the
same contents:
>>> extracted = Extractor('{{#a}}[0-9]+{{/a}}.*{{a}}{{b}}').extract('123-123456')
>>> extracted == {'a': '123', 'b': '456'}
True
"""
match = self._cre.match(text)
if match is None:
raise ExtractError(
"could not extract variables from template %r (regex: %r)" % (
self._template, self._cre.pattern))
return match.groupdict()
def extract(template, text):
"""Shorthand for ``Extractor(template).extract(text)``."""
return Extractor(template).extract(text)
__all__ = [
"ParseError", "VariableError", "ExtractError",
"get_variables",
"Translator", "translate", "translate_many",
"Extractor", "extract",
]
|
PypiClean
|
/venture_ai-0.9.9-py3-none-any.whl/venture/logic/model_selector_logic.py
|
import venture.logic.token_count_logic as token_count_logic
from venture.metadata import Metadata
from venture.config import Config
from typing import List
def get_model_based_on_token_count(token_count:int):
token_count = int(token_count)
lowest_model_name = None
lowest_model_token_count = max(Config.MODEL_NAME_TO_MAX_TOKENS.values()) * 2
for model_name, max_tokens in Config.MODEL_NAME_TO_MAX_TOKENS.items():
if token_count < max_tokens and max_tokens < lowest_model_token_count:
lowest_model_name = model_name
lowest_model_token_count = max_tokens
return lowest_model_name
def get_model_name_for_function_name_extract(metadata:Metadata, summary_token_count:int) -> str:
token_count = metadata.token_counts.cosmos_function_name_all_prompt_count + summary_token_count
token_count = token_count + Config.FUNCTION_NAME_RESERVED_RESPONSE_TOKEN_COUNT
model_name = get_model_based_on_token_count(token_count)
return model_name
def get_model_name_for_ownership_extract(metadata:Metadata, processed_doc_token_count:int) -> str:
token_count = metadata.token_counts.cosmos_system_ownership_all_prompt_count + processed_doc_token_count
token_count = token_count + Config.OWNERSHIP_RESERVED_RESPONSE_TOKEN_COUNT
model_name = get_model_based_on_token_count(token_count)
return model_name
def get_model_name_for_summary_extract(metadata:Metadata, processed_doc_token_count:int) -> str:
token_count = metadata.token_counts.cosmos_system_summary_count + processed_doc_token_count
token_count = token_count + Config.SUMMARY_RESERVED_RESPONSE_TOKEN_COUNT
model_name = get_model_based_on_token_count(token_count)
return model_name
def get_model_name_for_retrieval_caller(metadata:Metadata, user_query:str, functions:List[object]) -> str:
token_count = metadata.token_counts.retrieval_caller_system_count + \
token_count_logic.count_tokens(user_query) + \
token_count_logic.count_tokens(Config.EXTRA_ROLE) + \
sum(metadata.token_counts.retrieval_function_name_to_count[f['name']] for f in functions)
token_count = token_count + Config.RESERVED_RESPONSE_TOKEN_COUNT
model_name = get_model_based_on_token_count(token_count)
return model_name
def get_model_name_for_retrieval_verdict(metadata:Metadata, user_message:str) -> str:
token_count = metadata.token_counts.retrieval_verdict_system_count + \
token_count_logic.count_tokens(user_message) + \
token_count_logic.count_tokens(Config.EXTRA_ROLE)
token_count = token_count + Config.RESERVED_RESPONSE_TOKEN_COUNT
model_name = get_model_based_on_token_count(token_count)
return model_name
def get_model_name_for_llm_response(metadata:Metadata, user_query:str, context_file_names_to_use:List[str]) -> str:
file_name_to_parsed_doc = metadata.file_name_to_parsed_doc
token_count = metadata.token_counts.explore_all_prompt_token_count + \
token_count_logic.count_tokens(Config.EXTRA_ROLE) + \
sum(file_name_to_parsed_doc[file_name].content_token_count for file_name in context_file_names_to_use) + \
token_count_logic.count_tokens(user_query)
token_count = token_count + Config.RESERVED_RESPONSE_TOKEN_COUNT
model_name = get_model_based_on_token_count(token_count)
return model_name
|
PypiClean
|
/thumbor-7.5.1.tar.gz/thumbor-7.5.1/docs/upscale.rst
|
Upscale
=======
Usage: `upscale()`
Description
-----------
This filter tells thumbor to upscale your images. This only makes sense with
"fit-in" or "adaptive-fit-in".
This means that if an original image is :math:`300px` width by :math:`200px` height and you
ask for a :math:`600x500` image, the filter will resize it to :math:`600x400`.
Arguments
---------
No arguments allowed.
Example
-------
::
http://localhost:8888/unsafe/fit-in/600x500/filters:upscale()/https://raw.githubusercontent.com/thumbor/thumbor/e86324e49d7e53acc2a8057e43f3fdd2ca5cea75/docs/images/dice_transparent_background.png
|
PypiClean
|
/tencentcloud-sdk-python-intl-en-3.0.786.tar.gz/tencentcloud-sdk-python-intl-en-3.0.786/tencentcloud/lighthouse/v20200324/errorcodes.py
|
# Error with CAM signature/authentication.
AUTHFAILURE = 'AuthFailure'
#
AUTHFAILURE_INVALIDREGION = 'AuthFailure.InvalidRegion'
# Operation failed.
FAILEDOPERATION = 'FailedOperation'
# Failed to create the image.
FAILEDOPERATION_CREATEBLUEPRINTFAILED = 'FailedOperation.CreateBlueprintFailed'
# Failed to create the instance.
FAILEDOPERATION_CREATEINSTANCESFAILED = 'FailedOperation.CreateInstancesFailed'
# Failed to create the key pair.
FAILEDOPERATION_CREATEKEYPAIRFAILED = 'FailedOperation.CreateKeyPairFailed'
# Failed to delete the key pair.
FAILEDOPERATION_DELETEKEYPAIRFAILED = 'FailedOperation.DeleteKeyPairFailed'
# Failed to query the image. Try again later.
FAILEDOPERATION_DESCRIBEBLUEPRINTSFAILED = 'FailedOperation.DescribeBlueprintsFailed'
# An error occurred when querying the instance status.
FAILEDOPERATION_DESCRIBEINSTANCESTATUS = 'FailedOperation.DescribeInstanceStatus'
# Failed to query the instance configuration adjustment.
FAILEDOPERATION_DESCRIBEINSTANCESMODIFICATIONERROR = 'FailedOperation.DescribeInstancesModificationError'
# An error occurred when querying whether the instance is returnable.
FAILEDOPERATION_DESCRIBEINSTANCESRETURNABLEERROR = 'FailedOperation.DescribeInstancesReturnableError'
# Failed to query traffic packages.
FAILEDOPERATION_DESCRIBEINSTANCESTRAFFICPACKAGESFAILED = 'FailedOperation.DescribeInstancesTrafficPackagesFailed'
#
FAILEDOPERATION_DESCRIBERESOURCESRETURNABLEERROR = 'FailedOperation.DescribeResourcesReturnableError'
# Unable to terminate the resource. Please retry later.
FAILEDOPERATION_DESTROYRESOURCESFAILED = 'FailedOperation.DestroyResourcesFailed'
# Failed to manipulate the firewall rule.
FAILEDOPERATION_FIREWALLRULESOPERATIONFAILED = 'FailedOperation.FirewallRulesOperationFailed'
# Failed to import the key pair.
FAILEDOPERATION_IMPORTKEYPAIRFAILED = 'FailedOperation.ImportKeyPairFailed'
# Failed to manipulate the instance.
FAILEDOPERATION_INSTANCEOPERATIONFAILED = 'FailedOperation.InstanceOperationFailed'
#
FAILEDOPERATION_INVALIDCOMMANDNOTFOUND = 'FailedOperation.InvalidCommandNotFound'
# Failed to return the resource.
FAILEDOPERATION_ISOLATERESOURCESFAILED = 'FailedOperation.IsolateResourcesFailed'
# Failed to change the instance bundle.
FAILEDOPERATION_MODIFYINSTANCESBUNDLEFAILED = 'FailedOperation.ModifyInstancesBundleFailed'
#
FAILEDOPERATION_MODIFYRESOURCESATTRIBUTEFAILED = 'FailedOperation.ModifyResourcesAttributeFailed'
# A request error occurred.
FAILEDOPERATION_REQUESTERROR = 'FailedOperation.RequestError'
# Failed to manipulate the snapshot.
FAILEDOPERATION_SNAPSHOTOPERATIONFAILED = 'FailedOperation.SnapshotOperationFailed'
#
FAILEDOPERATION_TRADECALLBILLINGGATEWAYFAILED = 'FailedOperation.TradeCallBillingGatewayFailed'
# Failed to query the price.
FAILEDOPERATION_TRADEGETPRICEFAILED = 'FailedOperation.TradeGetPriceFailed'
# The operation failed. The custom image could not be created.
FAILEDOPERATION_UNABLETOCREATEBLUEPRINT = 'FailedOperation.UnableToCreateBlueprint'
# Failed to create the instance
FAILEDOPERATION_UNABLETOCREATEINSTANCES = 'FailedOperation.UnableToCreateInstances'
# Internal error.
INTERNALERROR = 'InternalError'
# The disk query returned invalid content.
INTERNALERROR_DESCRIBEDISKSRETURNABLEERROR = 'InternalError.DescribeDisksReturnableError'
# Failed to query the instance status. Please try again later.
INTERNALERROR_DESCRIBEINSTANCESTATUS = 'InternalError.DescribeInstanceStatus'
# Failed to query whether the configuration of the instance can be modified.
INTERNALERROR_DESCRIBEINSTANCESMODIFICATION = 'InternalError.DescribeInstancesModification'
# Failed to query whether the configuration of the instance can be modified.
INTERNALERROR_DESCRIBEINSTANCESMODIFICATIONERROR = 'InternalError.DescribeInstancesModificationError'
# Failed to query whether the instance can be returned.
INTERNALERROR_DESCRIBEINSTANCESRETURNABLEERROR = 'InternalError.DescribeInstancesReturnableError'
# An error occurred while querying the instance traffic package.
INTERNALERROR_DESCRIBEINSTANCESTRAFFICPACKAGESFAILED = 'InternalError.DescribeInstancesTrafficPackagesFailed'
# Error querying the resource
INTERNALERROR_DESCRIBERESOURCESRETURNABLEERROR = 'InternalError.DescribeResourcesReturnableError'
# Failed to get the snapshot quota lock.
INTERNALERROR_GETSNAPSHOTALLOCQUOTALOCKERROR = 'InternalError.GetSnapshotAllocQuotaLockError'
# Failed to find this API
INTERNALERROR_INVALIDACTIONNOTFOUND = 'InternalError.InvalidActionNotFound'
# The package price is incorrect.
INTERNALERROR_INVALIDBUNDLEPRICE = 'InternalError.InvalidBundlePrice'
# The command `DescribeInstanceLoginKeyPair` could not be found.
INTERNALERROR_INVALIDCOMMANDNOTFOUND = 'InternalError.InvalidCommandNotFound'
# There was an error in the request.
INTERNALERROR_REQUESTERROR = 'InternalError.RequestError'
# Failed to call billing gateway service
INTERNALERROR_TRADECALLBILLINGGATEWAYFAILED = 'InternalError.TradeCallBillingGatewayFailed'
# Failed to get the price.
INTERNALERROR_TRADEGETPRICEFAILED = 'InternalError.TradeGetPriceFailed'
# Incorrect parameter.
INVALIDPARAMETER = 'InvalidParameter'
# The package and the image do not match.
INVALIDPARAMETER_BUNDLEANDBLUEPRINTNOTMATCH = 'InvalidParameter.BundleAndBlueprintNotMatch'
# Undefined service package ID.
INVALIDPARAMETER_BUNDLEIDNOTFOUND = 'InvalidParameter.BundleIdNotFound'
#
INVALIDPARAMETER_CONFLICT = 'InvalidParameter.Conflict'
# Invalid parameter: the number of `Values` in the `Filter` parameter exceeds the allowed maximum number.
INVALIDPARAMETER_FILTERVALUELIMITEXCEEDED = 'InvalidParameter.FilterValueLimitExceeded'
# Invalid parameter: the firewall rule is duplicated.
INVALIDPARAMETER_FIREWALLRULESDUPLICATED = 'InvalidParameter.FirewallRulesDuplicated'
# Invalid parameter: the firewall rule already exists.
INVALIDPARAMETER_FIREWALLRULESEXIST = 'InvalidParameter.FirewallRulesExist'
# Invalid parameter: the `Filter` parameter is invalid.
INVALIDPARAMETER_INVALIDFILTER = 'InvalidParameter.InvalidFilter'
# Invalid parameter: the value of `Name` in the `Filter` parameter is invalid.
INVALIDPARAMETER_INVALIDFILTERINVALIDKEY = 'InvalidParameter.InvalidFilterInvalidKey'
# Invalid parameter: the value of `Name` in the `Filter` parameter is not a string.
INVALIDPARAMETER_INVALIDFILTERINVALIDNAMENOTSTR = 'InvalidParameter.InvalidFilterInvalidNameNotStr'
# Invalid parameter: the `Values` in the `Filter` parameter is not a list.
INVALIDPARAMETER_INVALIDFILTERINVALIDVALUESNOTLIST = 'InvalidParameter.InvalidFilterInvalidValuesNotList'
# Invalid parameter: the `Filter` parameter is not a dictionary.
INVALIDPARAMETER_INVALIDFILTERNOTDICT = 'InvalidParameter.InvalidFilterNotDict'
# Invalid parameter: there are unsupported `Name` values in the `Filter` parameter.
INVALIDPARAMETER_INVALIDFILTERNOTSUPPORTEDNAME = 'InvalidParameter.InvalidFilterNotSupportedName'
#
INVALIDPARAMETER_MUSTSPECIFYONEATTRIBUTETOMODIFY = 'InvalidParameter.MustSpecifyOneAttributeToModify'
# Invalid parameter: only one attribute can be modified at a time.
INVALIDPARAMETER_ONLYALLOWMODIFYONEATTRIBUTE = 'InvalidParameter.OnlyAllowModifyOneAttribute'
# Invalid parameter: the parameters conflict.
INVALIDPARAMETER_PARAMETERCONFLICT = 'InvalidParameter.ParameterConflict'
# Incorrect parameter value.
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# The configuration of this instance does not meet the requirements of the specified image.
INVALIDPARAMETERVALUE_BLUEPRINTCONFIGNOTMATCH = 'InvalidParameterValue.BlueprintConfigNotMatch'
# The image ID is invalid, as instance reinstallation does not allow switching the OS type.
INVALIDPARAMETERVALUE_BLUEPRINTID = 'InvalidParameterValue.BlueprintId'
# Invalid parameter value: the image ID format is invalid.
INVALIDPARAMETERVALUE_BLUEPRINTIDMALFORMED = 'InvalidParameterValue.BlueprintIdMalformed'
# The package and the image do not match.
INVALIDPARAMETERVALUE_BUNDLEANDBLUEPRINTNOTMATCH = 'InvalidParameterValue.BundleAndBlueprintNotMatch'
# The ID format of the CCN instance is invalid.
INVALIDPARAMETERVALUE_CCNIDMALFORMED = 'InvalidParameterValue.CcnIdMalformed'
#
INVALIDPARAMETERVALUE_CLIENTTOKENTOOLONG = 'InvalidParameterValue.ClientTokenTooLong'
# The disk name is too long.
INVALIDPARAMETERVALUE_DISKNAMETOOLONG = 'InvalidParameterValue.DiskNameTooLong'
# The disk size has changed.
INVALIDPARAMETERVALUE_DISKSIZENOTMATCH = 'InvalidParameterValue.DiskSizeNotMatch'
# The parameter `KeyName` already exists and is duplicate.
INVALIDPARAMETERVALUE_DUPLICATEPARAMETERVALUE = 'InvalidParameterValue.DuplicateParameterValue'
# Invalid parameter value: duplicate values are not allowed.
INVALIDPARAMETERVALUE_DUPLICATED = 'InvalidParameterValue.Duplicated'
# The length of the firewall rule description exceeds the limit.
INVALIDPARAMETERVALUE_FIREWALLRULEDESCRIPTIONTOOLONG = 'InvalidParameterValue.FirewallRuleDescriptionTooLong'
# Invalid parameter value: the instance ID format is invalid.
INVALIDPARAMETERVALUE_INSTANCEIDMALFORMED = 'InvalidParameterValue.InstanceIdMalformed'
# Invalid parameter value: the length of the instance name exceeds the upper limit.
INVALIDPARAMETERVALUE_INSTANCENAMETOOLONG = 'InvalidParameterValue.InstanceNameTooLong'
# The image ID is invalid.
INVALIDPARAMETERVALUE_INVALIDBLUEPRINTID = 'InvalidParameterValue.InvalidBlueprintId'
# The type of the image OS is invalid.
INVALIDPARAMETERVALUE_INVALIDBLUEPRINTPLATFORMTYPE = 'InvalidParameterValue.InvalidBlueprintPlatformType'
# Invalid image status value
INVALIDPARAMETERVALUE_INVALIDBLUEPRINTSTATE = 'InvalidParameterValue.InvalidBlueprintState'
# The image type is invalid.
INVALIDPARAMETERVALUE_INVALIDBLUEPRINTTYPE = 'InvalidParameterValue.InvalidBlueprintType'
# Invalid package parameter.
INVALIDPARAMETERVALUE_INVALIDBUNDLE = 'InvalidParameterValue.InvalidBundle'
# The console display type is invalid.
INVALIDPARAMETERVALUE_INVALIDCONSOLEDISPLAYTYPES = 'InvalidParameterValue.InvalidConsoleDisplayTypes'
# Invalid parameter value: the disk ID format is invalid.
INVALIDPARAMETERVALUE_INVALIDDISKIDMALFORMED = 'InvalidParameterValue.InvalidDiskIdMalformed'
# The value of setting whether to use the default key pair for login is incorrect.
INVALIDPARAMETERVALUE_INVALIDINSTANCELOGINKEYPAIRPERMITLOGIN = 'InvalidParameterValue.InvalidInstanceLoginKeyPairPermitLogin'
# Invalid parametric value: the IP address format is invalid.
INVALIDPARAMETERVALUE_INVALIDIPFORMAT = 'InvalidParameterValue.InvalidIpFormat'
# Invalid parametric value.
INVALIDPARAMETERVALUE_INVALIDKEYPAIRNAMEEMPTY = 'InvalidParameterValue.InvalidKeyPairNameEmpty'
# The key pair name is invalid.
INVALIDPARAMETERVALUE_INVALIDKEYPAIRNAMEINCLUDEILLEGALCHAR = 'InvalidParameterValue.InvalidKeyPairNameIncludeIllegalChar'
# The parameter length is invalid.
INVALIDPARAMETERVALUE_INVALIDKEYPAIRNAMETOOLONG = 'InvalidParameterValue.InvalidKeyPairNameTooLong'
# Invalid package combination.
INVALIDPARAMETERVALUE_INVALIDPARAMETERCOMBINATION = 'InvalidParameterValue.InvalidParameterCombination'
# The password in the parameter is invalid.
INVALIDPARAMETERVALUE_INVALIDPASSWORD = 'InvalidParameterValue.InvalidPassword'
# Incorrect quota resource name.
INVALIDPARAMETERVALUE_INVALIDRESOURCEQUOTARESOURCENAME = 'InvalidParameterValue.InvalidResourceQuotaResourceName'
# Invalid scene ID
INVALIDPARAMETERVALUE_INVALIDSCENEIDMALFORMED = 'InvalidParameterValue.InvalidSceneIdMalformed'
# Invalid `Zone` value.
INVALIDPARAMETERVALUE_INVALIDZONE = 'InvalidParameterValue.InvalidZone'
# Invalid parametric value: the key pair ID format is invalid.
INVALIDPARAMETERVALUE_KEYPAIRIDMALFORMED = 'InvalidParameterValue.KeyPairIdMalformed'
# The public key of this key pair already exists in the system and cannot be reused.
INVALIDPARAMETERVALUE_KEYPAIRPUBLICKEYDUPLICATED = 'InvalidParameterValue.KeyPairPublicKeyDuplicated'
# The format of the specified public key is incorrect.
INVALIDPARAMETERVALUE_KEYPAIRPUBLICKEYMALFORMED = 'InvalidParameterValue.KeyPairPublicKeyMalformed'
# Invalid parametric value: the number of parameter values exceeds the upper limit.
INVALIDPARAMETERVALUE_LIMITEXCEEDED = 'InvalidParameterValue.LimitExceeded'
# Invalid parametric value: it cannot be negative.
INVALIDPARAMETERVALUE_NEGATIVE = 'InvalidParameterValue.Negative'
# It is not allowed to change the OS type.
INVALIDPARAMETERVALUE_NOTALLOWTOCHANGEPLATFORMTYPE = 'InvalidParameterValue.NotAllowToChangePlatformType'
# Invalid parametric value: it is not within the valid range.
INVALIDPARAMETERVALUE_OUTOFRANGE = 'InvalidParameterValue.OutOfRange'
# The region does not exist.
INVALIDPARAMETERVALUE_REGIONNOTFOUND = 'InvalidParameterValue.RegionNotFound'
# The regions do not match.
INVALIDPARAMETERVALUE_REGIONNOTMATCH = 'InvalidParameterValue.RegionNotMatch'
# Unsupported region.
INVALIDPARAMETERVALUE_REGIONNOTSUPPORTED = 'InvalidParameterValue.RegionNotSupported'
# The region is unavailable.
INVALIDPARAMETERVALUE_REGIONUNAVAILABLE = 'InvalidParameterValue.RegionUnavailable'
# Invalid parametric value: the snapshot ID format is invalid.
INVALIDPARAMETERVALUE_SNAPSHOTIDMALFORMED = 'InvalidParameterValue.SnapshotIdMalformed'
# Invalid parametric value: the length of the snapshot name exceeds the upper limit.
INVALIDPARAMETERVALUE_SNAPSHOTNAMETOOLONG = 'InvalidParameterValue.SnapshotNameTooLong'
# The length of the parameter value exceeds the upper limit.
INVALIDPARAMETERVALUE_TOOLONG = 'InvalidParameterValue.TooLong'
# Invalid AZ.
INVALIDPARAMETERVALUE_ZONEINVALID = 'InvalidParameterValue.ZoneInvalid'
# Reached the quota limit.
LIMITEXCEEDED = 'LimitExceeded'
# Reached the upper limit of attached data disks of the instance
LIMITEXCEEDED_ATTACHDATADISKQUOTALIMITEXCEEDED = 'LimitExceeded.AttachDataDiskQuotaLimitExceeded'
#
LIMITEXCEEDED_BLUEPRINTQUOTALIMITEXCEEDED = 'LimitExceeded.BlueprintQuotaLimitExceeded'
# The firewall rule quota is exceeded.
LIMITEXCEEDED_FIREWALLRULESLIMITEXCEEDED = 'LimitExceeded.FirewallRulesLimitExceeded'
# Run out of the instance quota.
LIMITEXCEEDED_INSTANCEQUOTALIMITEXCEEDED = 'LimitExceeded.InstanceQuotaLimitExceeded'
# Reached the upper limit of resources can be returned
LIMITEXCEEDED_ISOLATERESOURCESLIMITEXCEEDED = 'LimitExceeded.IsolateResourcesLimitExceeded'
# The key pair quota is exceeded.
LIMITEXCEEDED_KEYPAIRLIMITEXCEEDED = 'LimitExceeded.KeyPairLimitExceeded'
# The snapshot quota is exceeded.
LIMITEXCEEDED_SNAPSHOTQUOTALIMITEXCEEDED = 'LimitExceeded.SnapshotQuotaLimitExceeded'
# Missing parameter.
MISSINGPARAMETER = 'MissingParameter'
# This instance does not support upgrading packages.
OPERATIONDENIED_BUNDLENOTSUPPORTMODIFY = 'OperationDenied.BundleNotSupportModify'
# The disk is being created.
OPERATIONDENIED_DISKCREATING = 'OperationDenied.DiskCreating'
# The disk is being manipulated. Try again later.
OPERATIONDENIED_DISKOPERATIONINPROGRESS = 'OperationDenied.DiskOperationInProgress'
# The cloud disk type of the disk does not support this operation.
OPERATIONDENIED_DISKUSAGENOTSUPPORTOPERATION = 'OperationDenied.DiskUsageNotSupportOperation'
# It is not allowed to manipulate this instance, as it is being created.
OPERATIONDENIED_INSTANCECREATING = 'OperationDenied.InstanceCreating'
# It is not allowed to manipulate this instance, as the last operation is still in progress.
OPERATIONDENIED_INSTANCEOPERATIONINPROGRESS = 'OperationDenied.InstanceOperationInProgress'
#
OPERATIONDENIED_OPERATIONDENIEDCREATESNAPSHOT = 'OperationDenied.OperationDeniedCreateSnapshot'
# Instances using storage packages do not support snapshot creation.
OPERATIONDENIED_OPERATIONDENIEDCREATESNAPSHOTFORSTORAGEBUNDLE = 'OperationDenied.OperationDeniedCreateSnapshotForStorageBundle'
# The key pair is in use.
RESOURCEINUSE_KEYPAIRINUSE = 'ResourceInUse.KeyPairInUse'
# The resource does not exist.
RESOURCENOTFOUND = 'ResourceNotFound'
# The image ID does not exist.
RESOURCENOTFOUND_BLUEPRINTIDNOTFOUND = 'ResourceNotFound.BlueprintIdNotFound'
# The specified image does not exist. Please check whether the `BlueprintId` of the image is correct.
RESOURCENOTFOUND_BLUEPRINTNOTFOUND = 'ResourceNotFound.BlueprintNotFound'
# The disk ID does not exist.
RESOURCENOTFOUND_DISKIDNOTFOUND = 'ResourceNotFound.DiskIdNotFound'
# The disk does not exist.
RESOURCENOTFOUND_DISKNOTFOUND = 'ResourceNotFound.DiskNotFound'
# The firewall does not exist.
RESOURCENOTFOUND_FIREWALLNOTFOUND = 'ResourceNotFound.FirewallNotFound'
# The firewall rule does not exist.
RESOURCENOTFOUND_FIREWALLRULESNOTFOUND = 'ResourceNotFound.FirewallRulesNotFound'
# There are no data disks mounted to the instance.
RESOURCENOTFOUND_INSTANCEDATADISKNOTFOUND = 'ResourceNotFound.InstanceDataDiskNotFound'
# The instance ID does not exist.
RESOURCENOTFOUND_INSTANCEIDNOTFOUND = 'ResourceNotFound.InstanceIdNotFound'
# The instance does not exist.
RESOURCENOTFOUND_INSTANCENOTFOUND = 'ResourceNotFound.InstanceNotFound'
# The key pair ID does not exist.
RESOURCENOTFOUND_KEYIDNOTFOUND = 'ResourceNotFound.KeyIdNotFound'
# The custom image does not exist.
RESOURCENOTFOUND_PRIVATEBLUEPRINTNOTFOUND = 'ResourceNotFound.PrivateBlueprintNotFound'
# The service role does not exist. Please add it to the account.
RESOURCENOTFOUND_ROLENOTFOUND = 'ResourceNotFound.RoleNotFound'
# Scene ID not found.
RESOURCENOTFOUND_SCENEIDNOTFOUND = 'ResourceNotFound.SceneIdNotFound'
# The snapshot ID does not exist.
RESOURCENOTFOUND_SNAPSHOTIDNOTFOUND = 'ResourceNotFound.SnapshotIdNotFound'
# The snapshot does not exist.
RESOURCENOTFOUND_SNAPSHOTNOTFOUND = 'ResourceNotFound.SnapshotNotFound'
# The resource is unavailable.
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# The image resource is not available.
RESOURCEUNAVAILABLE_BLUEPRINTUNAVAILABLE = 'ResourceUnavailable.BlueprintUnavailable'
# The package is not available.
RESOURCEUNAVAILABLE_BUNDLEUNAVAILABLE = 'ResourceUnavailable.BundleUnavailable'
# There is no available configuration in the package.
RESOURCESSOLDOUT_PURCHASESOURCEHASNOBUNDLECONFIGS = 'ResourcesSoldOut.PurchaseSourceHasNoBundleConfigs'
# There is no available configuration in the package.
RESOURCESSOLDOUT_ZONESHASNOBUNDLECONFIGS = 'ResourcesSoldOut.ZonesHasNoBundleConfigs'
# MFA has expired.
UNAUTHORIZEDOPERATION_MFAEXPIRED = 'UnauthorizedOperation.MFAExpired'
# MFA does not exist.
UNAUTHORIZEDOPERATION_MFANOTFOUND = 'UnauthorizedOperation.MFANotFound'
# No permission
UNAUTHORIZEDOPERATION_NOPERMISSION = 'UnauthorizedOperation.NoPermission'
# Unknown parameter error.
UNKNOWNPARAMETER = 'UnknownParameter'
# Unsupported operation.
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
# Unable to associate with CCN: there is no instance in this region
UNSUPPORTEDOPERATION_ATTACHCCNCONDITIONUNSATISFIED = 'UnsupportedOperation.AttachCcnConditionUnsatisfied'
# Failed to associate the CCN instance. Please check the CCN status and try again later.
UNSUPPORTEDOPERATION_ATTACHCCNFAILED = 'UnsupportedOperation.AttachCcnFailed'
# The current status of the image does not support this operation.
UNSUPPORTEDOPERATION_BLUEPRINTCURSTATEINVALID = 'UnsupportedOperation.BlueprintCurStateInvalid'
# The image is in use, so this operation is not supported.
UNSUPPORTEDOPERATION_BLUEPRINTOCCUPIED = 'UnsupportedOperation.BlueprintOccupied'
# The CCN instance is already associated, and reassociation is not supported.
UNSUPPORTEDOPERATION_CCNALREADYATTACHED = 'UnsupportedOperation.CcnAlreadyAttached'
# No CCN instance has been associated yet, so this operation is not supported.
UNSUPPORTEDOPERATION_CCNNOTATTACHED = 'UnsupportedOperation.CcnNotAttached'
# Failed to query the status of the associated CCN instance. Please try again later.
UNSUPPORTEDOPERATION_DESCRIBECCNATTACHEDINSTANCESFAILED = 'UnsupportedOperation.DescribeCcnAttachedInstancesFailed'
# Failed to unassociate the CCN instance. Please check the CCN status and try again later.
UNSUPPORTEDOPERATION_DETACHCCNFAILED = 'UnsupportedOperation.DetachCcnFailed'
# The disk is busy.
UNSUPPORTEDOPERATION_DISKBUSY = 'UnsupportedOperation.DiskBusy'
# Unsupported operation: the last operation of the disk has not been completed.
UNSUPPORTEDOPERATION_DISKLATESTOPERATIONUNFINISHED = 'UnsupportedOperation.DiskLatestOperationUnfinished'
# The firewall is busy.
UNSUPPORTEDOPERATION_FIREWALLBUSY = 'UnsupportedOperation.FirewallBusy'
# The specified firewall version number does not match the current version.
UNSUPPORTEDOPERATION_FIREWALLVERSIONMISMATCH = 'UnsupportedOperation.FirewallVersionMismatch'
# Unsupported operation: the instance has expired.
UNSUPPORTEDOPERATION_INSTANCEEXPIRED = 'UnsupportedOperation.InstanceExpired'
# The password can not be set upon creation of a LinuxUnix instance.
UNSUPPORTEDOPERATION_INSTANCELINUXUNIXCREATINGNOTSUPPORTPASSWORD = 'UnsupportedOperation.InstanceLinuxUnixCreatingNotSupportPassword'
# The disk's status does not support this operation.
UNSUPPORTEDOPERATION_INVALIDDISKSTATE = 'UnsupportedOperation.InvalidDiskState'
# Unsupported operation: the instance status is invalid.
UNSUPPORTEDOPERATION_INVALIDINSTANCESTATE = 'UnsupportedOperation.InvalidInstanceState'
# Unsupported operation: the snapshot status is invalid.
UNSUPPORTEDOPERATION_INVALIDSNAPSHOTSTATE = 'UnsupportedOperation.InvalidSnapshotState'
# Unsupported operation: one key pair cannot be bound to the same instance repeatedly.
UNSUPPORTEDOPERATION_KEYPAIRBINDDUPLICATE = 'UnsupportedOperation.KeyPairBindDuplicate'
# Unsupported operation: the `KeyPair` has a binding relationship with the image. Before performing this operation, please delete the custom image bound to the key pair.
UNSUPPORTEDOPERATION_KEYPAIRBINDTOBLUEPRINTS = 'UnsupportedOperation.KeyPairBindToBlueprints'
# Unsupported operation: key pairs that are not bound to instances cannot be unbound from instances.
UNSUPPORTEDOPERATION_KEYPAIRNOTBOUNDTOINSTANCE = 'UnsupportedOperation.KeyPairNotBoundToInstance'
# Unsupported operation: the last operation of the instance has not been completed.
UNSUPPORTEDOPERATION_LATESTOPERATIONUNFINISHED = 'UnsupportedOperation.LatestOperationUnfinished'
# The shared image does not support this operation.
UNSUPPORTEDOPERATION_NOTSUPPORTSHAREDBLUEPRINT = 'UnsupportedOperation.NotSupportSharedBlueprint'
# Terminating a resource in the resource center failed.
UNSUPPORTEDOPERATION_POSTDESTROYRESOURCEFAILED = 'UnsupportedOperation.PostDestroyResourceFailed'
# Failed to reapply to associate a CCN instance. Please check the CCN status and try again later.
UNSUPPORTEDOPERATION_RESETATTACHCCNFAILED = 'UnsupportedOperation.ResetAttachCcnFailed'
# The resource cannot be returned.
UNSUPPORTEDOPERATION_RESOURCENOTRETURNABLE = 'UnsupportedOperation.ResourceNotReturnable'
# The snapshot is busy.
UNSUPPORTEDOPERATION_SNAPSHOTBUSY = 'UnsupportedOperation.SnapshotBusy'
# System busy
UNSUPPORTEDOPERATION_SYSTEMBUSY = 'UnsupportedOperation.SystemBusy'
# Windows instances do not support binding key pairs.
UNSUPPORTEDOPERATION_WINDOWSNOTALLOWTOASSOCIATEKEYPAIR = 'UnsupportedOperation.WindowsNotAllowToAssociateKeyPair'
# SSH key pair is not available for Windows instances
UNSUPPORTEDOPERATION_WINDOWSNOTSUPPORTKEYPAIR = 'UnsupportedOperation.WindowsNotSupportKeyPair'
|
PypiClean
|
/baiduads_sdk_auto-2023.1.0-py3-none-any.whl/baiduads/rtafeed/model/del_rta_setting_response_wrapper.py
|
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.common.model.api_response_header import ApiResponseHeader
from baiduads.rtafeed.model.del_rta_setting_response_wrapper_body import DelRtaSettingResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
globals()['DelRtaSettingResponseWrapperBody'] = DelRtaSettingResponseWrapperBody
class DelRtaSettingResponseWrapper(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'header': (ApiResponseHeader,), # noqa: E501
'body': (DelRtaSettingResponseWrapperBody,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'header': 'header', # noqa: E501
'body': 'body', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""DelRtaSettingResponseWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiResponseHeader): [optional] # noqa: E501
body (DelRtaSettingResponseWrapperBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DelRtaSettingResponseWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiResponseHeader): [optional] # noqa: E501
body (DelRtaSettingResponseWrapperBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/resources/v20190301/get_resource.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetResourceResult',
'AwaitableGetResourceResult',
'get_resource',
]
@pulumi.output_type
class GetResourceResult:
"""
Resource information.
"""
def __init__(__self__, id=None, identity=None, kind=None, location=None, managed_by=None, name=None, plan=None, properties=None, sku=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_by and not isinstance(managed_by, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", managed_by)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if plan and not isinstance(plan, dict):
raise TypeError("Expected argument 'plan' to be a dict")
pulumi.set(__self__, "plan", plan)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
The kind of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[str]:
"""
ID of the resource that manages this resource.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def plan(self) -> Optional['outputs.PlanResponse']:
"""
The plan of the resource.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter
def properties(self) -> Any:
"""
The resource properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The SKU of the resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetResourceResult(GetResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResourceResult(
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
managed_by=self.managed_by,
name=self.name,
plan=self.plan,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_resource(parent_resource_path: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
resource_provider_namespace: Optional[str] = None,
resource_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResourceResult:
"""
Use this data source to access information about an existing resource.
:param str parent_resource_path: The parent resource identity.
:param str resource_group_name: The name of the resource group containing the resource to get. The name is case insensitive.
:param str resource_name: The name of the resource to get.
:param str resource_provider_namespace: The namespace of the resource provider.
:param str resource_type: The resource type of the resource.
"""
__args__ = dict()
__args__['parentResourcePath'] = parent_resource_path
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['resourceProviderNamespace'] = resource_provider_namespace
__args__['resourceType'] = resource_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:resources/v20190301:getResource', __args__, opts=opts, typ=GetResourceResult).value
return AwaitableGetResourceResult(
id=__ret__.id,
identity=__ret__.identity,
kind=__ret__.kind,
location=__ret__.location,
managed_by=__ret__.managed_by,
name=__ret__.name,
plan=__ret__.plan,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
|
PypiClean
|
/torch-cgd-0.0.1.tar.gz/torch-cgd-0.0.1/torch_cgd/solvers/gmres.py
|
import torch
class GMRES:
def __init__ (self, tol=1e-10, atol=1e-16):
''' Conjugate gradient method, implementation based on
https://en.wikipedia.org/wiki/Conjugate_gradient_method.
Only works for positive definite matrices A! '''
self.tol = tol
self.atol = atol
def solve (self, A, b, x0=None, max_iter=1000):
''' Extremely important that none of the vectors requires grad. '''
# Transform inputs
b = torch.reshape(b, (-1, 1))
# Initial guess
if x0 is not None:
x = x0.clone()
else:
x = torch.zeros(b.shape, device=b.device)
r = b - A @ x
r_tol = self.tol * torch.norm(b)
new_v, rnorm = _safe_normalize(r)
beta = torch.zeros(max_iter + 1, device=b.device)
beta[0] = rnorm
V = []
V.append(new_v)
H = torch.zeros((max_iter + 1, max_iter + 1), device=b.device)
cs = torch.zeros(max_iter, device=b.device) # cosine values at each step
ss = torch.zeros(max_iter, device=b.device) # sine values at each step
for j in range(max_iter):
p = A @ V[j]
new_v = arnoldi(p, V, H, j + 1) # Arnoldi iteration to get the j+1 th batch
V.append(new_v)
H, cs, ss = apply_given_rotation(H, cs, ss, j)
beta[j + 1] = ss[j] * beta[j]
beta[j] = cs[j] * beta[j]
residual = torch.abs(beta[j + 1])
if residual < r_tol or residual < self.atol:
break
if j == max_iter - 1:
print('Warning: GMRES did not converge in {} iterations'.format(max_iter))
# y = torch.linalg.solve_triangular(H[0:j + 1, 0:j + 1], beta[0:j + 1].unsqueeze(-1), upper=True) # j x j
# WARNING! Above version does not work on older pytorch versions. So we use deprecated one.
y, _ = torch.triangular_solve(beta[0:j + 1].unsqueeze(-1), H[0:j + 1, 0:j + 1])
V = torch.stack(V[:-1], dim=0)[:,:,0]
sol = x + V.T @ y
return sol
def _safe_normalize (x, threshold=None):
norm = torch.norm(x)
if threshold is None:
threshold = torch.finfo(norm.dtype).eps
normalized_x = x / norm if norm > threshold else torch.zeros_like(x)
return normalized_x, norm
def arnoldi (vec, V, H, j):
'''
Arnoldi iteration to find the j th l2-orthonormal vector
compute the j-1 th column of Hessenberg matrix
'''
for i in range(j):
H[i, j - 1] = vec.T @ V[i]
vec = vec - H[i, j-1] * V[i]
new_v, vnorm = _safe_normalize(vec)
H[j, j - 1] = vnorm
return new_v
def apply_given_rotation (H, cs, ss, j):
# apply previous rotation to the 0->j-1 columns
for i in range(j):
tmp = cs[i] * H[i, j] - ss[i] * H[i + 1, j]
H[i + 1, j] = cs[i] * H[i+1, j] + ss[i] * H[i, j]
H[i, j] = tmp
cs[j], ss[j] = cal_rotation(H[j, j], H[j + 1, j])
H[j, j] = cs[j] * H[j, j] - ss[j] * H[j + 1, j]
H[j + 1, j] = 0
return H, cs, ss
def cal_rotation (a, b):
'''
Args:
a: element h in position j
b: element h in position j+1
Returns:
cosine = a / \sqrt{a^2 + b^2}
sine = - b / \sqrt{a^2 + b^2}
'''
c = torch.sqrt(a * a + b * b)
return a / c, - b / c
|
PypiClean
|
/paddlepaddle_gpu-2.5.1-cp38-cp38-win_amd64.whl/paddle/distributed/transpiler/geo_sgd_transpiler.py
|
import collections
from paddle import framework
from paddle.distributed.distribute_lookup_table import (
find_distributed_lookup_table,
)
from paddle.distributed.transpiler.details import (
VarsDistributed,
wait_server_ready,
)
from paddle.framework import Program, core
from paddle.incubate.distributed.fleet.parameter_server.ir.ps_dispatcher import (
PSDispatcher,
RoundRobin,
)
from paddle.incubate.distributed.fleet.parameter_server.mode import (
DistributedMode,
)
from paddle.static import (
Parameter,
default_main_program,
default_startup_program,
)
from .distribute_transpiler import (
DistributeTranspiler,
DistributeTranspilerConfig,
slice_variable,
)
RPC_OP_ROLE_ATTR_NAME = (
op_role_attr_name
) = core.op_proto_and_checker_maker.kOpRoleAttrName()
RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
class GeoSgdTranspiler(DistributeTranspiler):
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = DistributeTranspilerConfig()
self._set_server_config()
if self.config.split_method is None:
self.config.split_method = RoundRobin
assert self.config.min_block_size >= 8192
assert self.config.split_method.__bases__[0] == PSDispatcher
def transpile(
self,
trainer_id,
program=None,
pservers="127.0.0.1:6174",
trainers=1,
sync_mode=False,
startup_program=None,
current_endpoint="127.0.0.1:6174",
):
if program is None:
program = default_main_program()
if startup_program is None:
startup_program = default_startup_program()
self.origin_program = program
self.startup_program = startup_program
self.origin_startup_program = self.startup_program.clone()
self.trainer_num = trainers
# geo-sgd only supply async-mode
self.sync_mode = False
self.trainer_id = trainer_id
pserver_endpoints = pservers.split(",")
self.pserver_endpoints = pserver_endpoints
self.vars_overview = VarsDistributed()
self.optimize_ops, self.params_grads = self._get_optimize_pass()
ps_dispatcher = self.config.split_method(self.pserver_endpoints)
self.param_name_to_grad_name = {}
self.grad_name_to_param_name = {}
for param_var, grad_var in self.params_grads:
self.param_name_to_grad_name[param_var.name] = grad_var.name
self.grad_name_to_param_name[grad_var.name] = param_var.name
# distribute lookup table
self.table_name = find_distributed_lookup_table(self.origin_program)
self.has_distributed_lookup_table = self.table_name is not None
self.origin_program._distributed_lookup_table = (
self.table_name if self.table_name else None
)
# add distributed attrs to program
self.origin_program._is_distributed = True
self.origin_program._endpoints = self.pserver_endpoints
self.origin_program._ps_endpoint = current_endpoint
self.origin_program._is_chief = self.trainer_id == 0
# program info send to geo-sgd communicator
self.vars_info = collections.OrderedDict()
self.split_to_origin_mapping = collections.OrderedDict()
self.delta_vars_list = []
self.sparse_var_list = []
self.sparse_var_splited_list = []
# split and create vars, then put split vars in dicts for later use.
# step 1. split and create vars, then put split vars in dicts for later use.
self._init_splited_vars()
# step 3. create send recv var (param after optimize)
send_vars = []
ps_dispatcher.reset()
param_var_mapping_items = list(self.param_var_mapping.items())
# send_vars is the parameter which split by communicator and send to pserver,not the origin parameter
for _, splited_vars in param_var_mapping_items:
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = send_vars
ps_dispatcher.reset()
eplist = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eplist):
self.param_opt_ep_mapping[ep]["params"].append(recv_vars[i])
distributed_var = self.vars_overview.get_distributed_var_by_slice(
recv_vars[i].name
)
distributed_var.endpoint = ep
origin_name = self.split_to_origin_mapping[recv_vars[i].name]
self.vars_info[origin_name]["epmap"].append(ep)
self.origin_program._parameters_on_pservers = self.vars_overview
# send sparse id to communicator
self.sparse_var = []
self.sparse_tables = []
unique_sparse_var = {}
for op in self.origin_program.global_block().ops:
if "is_sparse" in op.all_attrs():
if op.type == "lookup_table":
op._set_attr('remote_prefetch', False)
for input_var_name, sparse_var_name in zip(
op.input("Ids"), op.input("W")
):
if sparse_var_name in self.sparse_var_list:
if input_var_name in unique_sparse_var:
if (
unique_sparse_var[input_var_name]
== sparse_var_name
):
continue
input_var = program.global_block().var(input_var_name)
self.sparse_var.append(input_var)
self.sparse_tables.append(sparse_var_name)
unique_sparse_var[input_var_name] = sparse_var_name
# batch training loop end flag
dummy_output = program.global_block().create_var(
name=framework.generate_control_dev_var_name()
)
program.global_block().append_op(
type="send",
inputs={"X": self.sparse_var},
outputs={"Out": dummy_output},
attrs={"send_varnames": self.sparse_tables},
)
# add param_init flag in trainer startup program
self.trainer_startup_program = self._get_trainer_startup_program(
recv_vars=recv_vars, eplist=eplist
)
for delta_var in self.delta_vars_list:
self.trainer_startup_program.global_block().create_var(
name=delta_var.name,
persistable=delta_var.persistable,
dtype=delta_var.dtype,
type=delta_var.type,
shape=delta_var.shape,
)
dummy_output = self.trainer_startup_program.global_block().create_var(
name=framework.generate_control_dev_var_name()
)
param_init = self.trainer_startup_program.global_block().create_var(
name="param_init"
)
self.trainer_startup_program.global_block().append_op(
type="send",
inputs={"X": [param_init]},
outputs={"Out": dummy_output},
attrs={"send_varnames": [param_init.name]},
)
def _get_vars_info(self):
return self.vars_info
def get_trainer_program(self, wait_port=True):
if wait_port:
wait_server_ready(self.pserver_endpoints)
return self.origin_program
def get_pserver_programs(self, endpoint):
pserver_prog = self.get_pserver_program(endpoint)
self.param_grad_ep_mapping = self.param_opt_ep_mapping
pserver_startup = self.get_startup_program(
endpoint, pserver_program=pserver_prog
)
return pserver_prog, pserver_startup
def get_pserver_program(self, endpoint):
# step1
pserver_program = Program()
pserver_program.random_seed = self.origin_program.random_seed
pserver_program._copy_dist_param_info_from(self.origin_program)
# step2: Create vars to receive vars at parameter servers.
recv_inputs = []
for v in self.param_opt_ep_mapping[endpoint]["params"]:
self._clone_var(pserver_program.global_block(), v)
optimize_block = []
param_to_block_id = []
sparse_grad_to_param = []
# append op to the current block
pre_block_idx = pserver_program.num_blocks - 1
for var in self.param_opt_ep_mapping[endpoint]["params"]:
per_opt_block = pserver_program._create_block(pre_block_idx)
optimize_block.append(per_opt_block)
var_name = var.name
pserver_block = per_opt_block.program.global_block()
param = pserver_block.vars[var_name]
delta_var_name = "%s.delta" % (param.name)
if var.name in self.sparse_var_splited_list:
delta_type = core.VarDesc.VarType.SELECTED_ROWS
sparse_grad_to_param.append(
":".join([delta_var_name, param.name])
)
else:
delta_type = param.type
delta_var = pserver_block.create_var(
name=delta_var_name,
persistable=False,
type=delta_type,
dtype=param.dtype,
shape=param.shape,
)
per_opt_block.append_op(
type="sum",
inputs={"X": [param, delta_var]},
outputs={"Out": param},
)
param_to_block_id.append(
delta_var_name + ":" + str(per_opt_block.idx)
)
attrs = {
"optimize_blocks": optimize_block,
"endpoint": endpoint,
"Fanin": self.trainer_num,
"distributed_mode": DistributedMode.GEO,
"grad_to_block_id": param_to_block_id,
"sparse_grad_to_param": sparse_grad_to_param,
"rpc_get_thread_num": self.server_config._rpc_get_thread_num,
"rpc_send_thread_num": self.server_config._rpc_send_thread_num,
"rpc_prefetch_thread_num": self.server_config._rpc_prefetch_thread_num,
}
# step5 append the listen_and_serv op
pserver_program.global_block().append_op(
type="listen_and_serv",
inputs={'X': recv_inputs},
outputs={},
attrs=attrs,
)
pserver_program._sync_with_cpp()
# save pserver program to generate pserver side startup relatively.
self.pserver_program = pserver_program
return pserver_program
def _init_splited_vars(self):
param_list = []
grad_list = []
param_grad_set = set()
# step 1. create param_list
for p, g in self.params_grads:
if type(p) == Parameter and p.trainable is False:
continue
if p.name not in param_grad_set:
param_list.append(p)
param_grad_set.add(p.name)
if g.name not in param_grad_set:
grad_list.append(g)
param_grad_set.add(g.name)
if g.type == core.VarDesc.VarType.SELECTED_ROWS:
self.sparse_var_list.append(p.name)
# step 2. Slice vars into numbers of piece with block_size
# when we slice var up into blocks, we will slice the var according to
# pserver services' count. A pserver may have two or more listening ports.
param_blocks = slice_variable(
param_list, len(self.pserver_endpoints), self.config.min_block_size
)
# step 3. Create split param from split blocks
# origin_param_name -> [splited_param_vars]
# Todo: update _create_vars_from_blocklist
self.param_var_mapping = self._create_vars_from_blocklist(
self.origin_program, param_blocks
)
# step 4. Create mapping of endpoint -> split var to create pserver side program
self.param_opt_ep_mapping = collections.OrderedDict()
[
self.param_opt_ep_mapping.update(
{
ep: {
"params": [],
}
}
)
for ep in self.pserver_endpoints
]
# step 5. Create delta var of Geo-Sgd & record vars information
for origin_name, splited_vars in self.param_var_mapping.items():
origin_var = self.origin_program.global_block().var(origin_name)
self.vars_info[origin_name] = collections.OrderedDict()
self.vars_info[origin_name]["var_names"] = []
vars_section = self._get_splited_var_sections(splited_vars)
self.vars_info[origin_name]["sections"] = [
str(i) for i in vars_section
]
self.vars_info[origin_name]["epmap"] = []
self.vars_info[origin_name]["is_sparse"] = []
# todo: add var shape(may be no need,because recv scope have)
if origin_name in self.sparse_var_list:
delta_type = core.VarDesc.VarType.SELECTED_ROWS
self.vars_info[origin_name]["is_sparse"].append("True")
else:
delta_type = origin_var.type
self.vars_info[origin_name]["is_sparse"].append("False")
delta_var = self.origin_program.global_block().create_var(
name=".".join([origin_name, "delta"]),
persistable=False,
dtype=origin_var.dtype,
type=delta_type,
shape=origin_var.shape,
)
self.delta_vars_list.append(delta_var)
for splited_var in splited_vars:
is_slice, block_id, offset = self._get_slice_var_info(
splited_var
)
self.vars_overview.add_distributed_var(
origin_var=origin_var,
slice_var=splited_var,
block_id=block_id,
offset=offset,
is_slice=is_slice,
vtype="Param",
)
self.split_to_origin_mapping[splited_var.name] = origin_name
if origin_name in self.sparse_var_list:
self.sparse_var_splited_list.append(splited_var.name)
self.vars_info[origin_name]["var_names"].append(
splited_var.name
)
if len(splited_vars) != 1:
self.origin_program.global_block().create_var(
name=".".join([splited_var.name, "delta"]),
persistable=False,
dtype=splited_var.dtype,
type=delta_type,
shape=splited_var.shape,
)
|
PypiClean
|
/jarray-0.0.0.tar.gz/jarray-0.0.0/jina/flow/asyncio.py
|
from .base import Flow
from ..clients.mixin import AsyncPostMixin
class AsyncFlow(AsyncPostMixin, Flow):
"""
:class:`AsyncFlow` is the asynchronous version of the :class:`Flow`. They share the same interface, except
in :class:`AsyncFlow` :meth:`train`, :meth:`index`, :meth:`search` methods are coroutines
(i.e. declared with the async/await syntax), simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an eventloop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncFlow` can be very useful in
the integration settings, where Jina/Jina Flow is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Flow`
is controlling and wrapping the eventloop internally, making the Flow looks synchronous from outside.
In particular, :class:`AsyncFlow` makes Jina usage in Jupyter Notebook more natural and reliable.
For example, the following code
will use the eventloop that already spawned in Jupyter/ipython to run Jina Flow (instead of creating a new one).
.. highlight:: python
.. code-block:: python
from jina import AsyncFlow
from jina.types.document.generators import from_ndarray
import numpy as np
with AsyncFlow().add() as f:
await f.index(from_ndarray(np.random.random([5, 4])), on_done=print)
Notice that the above code will NOT work in standard Python REPL, as only Jupyter/ipython implements "autoawait".
.. seealso::
Asynchronous in REPL: Autoawait
https://ipython.readthedocs.io/en/stable/interactive/autoawait.html
Another example is when using Jina as an integration. Say you have another IO-bounded job ``heavylifting()``, you
can use this feature to schedule Jina ``index()`` and ``heavylifting()`` concurrently.
One can think of :class:`Flow` as Jina-managed eventloop, whereas :class:`AsyncFlow` is self-managed eventloop.
"""
|
PypiClean
|
/empire-platform-api-public-client-1.0.0.tar.gz/empire-platform-api-public-client-1.0.0/empire_platform_api_public_client/models/transmission_rights_overview_mtus_inner_values_inner_timescales_inner.py
|
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import BaseModel, Field, StrictInt
from empire_platform_api_public_client.models.auction_timescale import AuctionTimescale
class TransmissionRightsOverviewMtusInnerValuesInnerTimescalesInner(BaseModel):
"""
TransmissionRightsOverviewMtusInnerValuesInnerTimescalesInner
"""
timescale: AuctionTimescale = Field(...)
value: Optional[StrictInt] = Field(None, description="Capacity value in kilowatts (kW) - the required system precision allows for handling capacity values as integers")
__properties = ["timescale", "value"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> TransmissionRightsOverviewMtusInnerValuesInnerTimescalesInner:
"""Create an instance of TransmissionRightsOverviewMtusInnerValuesInnerTimescalesInner from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> TransmissionRightsOverviewMtusInnerValuesInnerTimescalesInner:
"""Create an instance of TransmissionRightsOverviewMtusInnerValuesInnerTimescalesInner from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return TransmissionRightsOverviewMtusInnerValuesInnerTimescalesInner.parse_obj(obj)
_obj = TransmissionRightsOverviewMtusInnerValuesInnerTimescalesInner.parse_obj({
"timescale": obj.get("timescale"),
"value": obj.get("value")
})
return _obj
|
PypiClean
|
/webos-emulator-0.8.9.tar.gz/webos-emulator-0.8.9/docs/installation.rst
|
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install webos-emulator, run this command in your terminal:
.. code-block:: console
$ pip install webos-emulator
This is the preferred method to install webos-emulator, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for webos-emulator can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/webosose/webos-emulator
Or download the `tarball`_:
.. code-block:: console
$ curl -OJL https://github.com/webosose/webos-emulator/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/webosose/webos-emulator
.. _tarball: https://github.com/webosose/webos-emulator/tarball/master
|
PypiClean
|
/FF-Flask-Security-3.1.0.tar.gz/FF-Flask-Security-3.1.0/ff_flask_security/confirmable.py
|
from flask import current_app as app
from werkzeug.local import LocalProxy
from .signals import confirm_instructions_sent, user_confirmed
from .utils import config_value, get_token_status, hash_data, send_mail, \
url_for_security, verify_hash
# Convenient references
_security = LocalProxy(lambda: app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
def generate_confirmation_link(user):
token = generate_confirmation_token(user)
return (
url_for_security('confirm_email', token=token, _external=True),
token
)
def send_confirmation_instructions(user):
"""Sends the confirmation instructions email for the specified user.
:param user: The user to send the instructions to
"""
confirmation_link, token = generate_confirmation_link(user)
send_mail(config_value('EMAIL_SUBJECT_CONFIRM'), user.email,
'confirmation_instructions', user=user,
confirmation_link=confirmation_link)
confirm_instructions_sent.send(app._get_current_object(), user=user,
token=token)
def generate_confirmation_token(user):
"""Generates a unique confirmation token for the specified user.
:param user: The user to work with
"""
data = [str(user.id), hash_data(user.email)]
return _security.confirm_serializer.dumps(data)
def requires_confirmation(user):
"""Returns `True` if the user requires confirmation."""
return (_security.confirmable and
not _security.login_without_confirmation and
user.confirmed_at is None)
def confirm_email_token_status(token):
"""Returns the expired status, invalid status, and user of a confirmation
token. For example::
expired, invalid, user = confirm_email_token_status('...')
:param token: The confirmation token
"""
expired, invalid, user, token_data = \
get_token_status(token, 'confirm', 'CONFIRM_EMAIL', return_data=True)
if not invalid and user:
user_id, token_email_hash = token_data
invalid = not verify_hash(token_email_hash, user.email)
return expired, invalid, user
def confirm_user(user):
"""Confirms the specified user
:param user: The user to confirm
"""
if user.confirmed_at is not None:
return False
user.confirmed_at = _security.datetime_factory()
_datastore.put(user)
user_confirmed.send(app._get_current_object(), user=user)
return True
|
PypiClean
|
/pensando_ent-1.28.1.tar.gz/pensando_ent-1.28.1/pensando_ent/psm/model/cluster_node.py
|
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from pensando_ent.psm.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from pensando_ent.psm.model.api_object_meta import ApiObjectMeta
from pensando_ent.psm.model.cluster_node_spec import ClusterNodeSpec
from pensando_ent.psm.model.cluster_node_status import ClusterNodeStatus
globals()['ApiObjectMeta'] = ApiObjectMeta
globals()['ClusterNodeSpec'] = ClusterNodeSpec
globals()['ClusterNodeStatus'] = ClusterNodeStatus
class ClusterNode(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'api_version': (str,), # noqa: E501
'kind': (str,), # noqa: E501
'meta': (ApiObjectMeta,), # noqa: E501
'spec': (ClusterNodeSpec,), # noqa: E501
'status': (ClusterNodeStatus,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'api_version': 'api-version', # noqa: E501
'kind': 'kind', # noqa: E501
'meta': 'meta', # noqa: E501
'spec': 'spec', # noqa: E501
'status': 'status', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ClusterNode - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
api_version (str): [optional] # noqa: E501
kind (str): [optional] # noqa: E501
meta (ApiObjectMeta): [optional] # noqa: E501
spec (ClusterNodeSpec): [optional] # noqa: E501
status (ClusterNodeStatus): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
PypiClean
|
/gsmtasks_client-0.4.0-py3-none-any.whl/gsmtasks/components/schemas/tasks_states_count.py
|
from __future__ import annotations
import typing
import lapidary.runtime
import pydantic
import lapidary.runtime.absent
class TasksStatesCount(pydantic.BaseModel):
unassigned: typing.Annotated[
typing.Union[
int,
lapidary.runtime.absent.Absent,
],
pydantic.Field(
direction=lapidary.runtime.ParamDirection.read,
),
] = lapidary.runtime.absent.ABSENT
assigned: typing.Annotated[
typing.Union[
int,
lapidary.runtime.absent.Absent,
],
pydantic.Field(
direction=lapidary.runtime.ParamDirection.read,
),
] = lapidary.runtime.absent.ABSENT
accepted: typing.Annotated[
typing.Union[
int,
lapidary.runtime.absent.Absent,
],
pydantic.Field(
direction=lapidary.runtime.ParamDirection.read,
),
] = lapidary.runtime.absent.ABSENT
transit: typing.Annotated[
typing.Union[
int,
lapidary.runtime.absent.Absent,
],
pydantic.Field(
direction=lapidary.runtime.ParamDirection.read,
),
] = lapidary.runtime.absent.ABSENT
active: typing.Annotated[
typing.Union[
int,
lapidary.runtime.absent.Absent,
],
pydantic.Field(
direction=lapidary.runtime.ParamDirection.read,
),
] = lapidary.runtime.absent.ABSENT
completed: typing.Annotated[
typing.Union[
int,
lapidary.runtime.absent.Absent,
],
pydantic.Field(
direction=lapidary.runtime.ParamDirection.read,
),
] = lapidary.runtime.absent.ABSENT
failed: typing.Annotated[
typing.Union[
int,
lapidary.runtime.absent.Absent,
],
pydantic.Field(
direction=lapidary.runtime.ParamDirection.read,
),
] = lapidary.runtime.absent.ABSENT
cancelled: typing.Annotated[
typing.Union[
int,
lapidary.runtime.absent.Absent,
],
pydantic.Field(
direction=lapidary.runtime.ParamDirection.read,
),
] = lapidary.runtime.absent.ABSENT
class Config(pydantic.BaseConfig):
use_enum_values = True
extra = pydantic.Extra.allow
TasksStatesCount.update_forward_refs()
|
PypiClean
|
/d2r-image-0.2.6.tar.gz/d2r-image-0.2.6/d2r_image/nip_helpers.py
|
from parse import compile as compile_pattern
from d2r_image.d2data_lookup import find_base_item_from_magic_item_text, find_pattern_match, find_set_item_by_name, find_unique_item_by_name, get_base, get_rune, is_base, is_rune
from d2r_image.data_models import HoveredItem, ItemQuality
from d2r_image.nip_data import NIP_ALIAS_STAT_PATTERNS, NIP_PATTERNS, NIP_RE_PATTERNS
from d2r_image.processing_data import Runeword
def parse_item(quality, item):
item_is_identified = True
item_is_ethereal = False
item_modifiers = {}
lines = item.split('\n')
cleaned_lines = []
for line in lines:
if line and 'SELL VALUE' not in line and 'COST' not in line:
cleaned_lines.append(line)
lines = cleaned_lines
for line in lines:
if line == 'UNIDENTIFIED':
item_is_identified = False
if 'ETHEREAL' in line:
item_is_ethereal = True
if item_is_identified:
for line in lines:
match = find_pattern_match(line)
if match:
# Store the property values
# if match["property_id"] not in parsed_item:
# parsed_item[match["property_id"]] = []
# parsed_item[match["property_id"]].append(match["property_values"])
if match["property_id"] not in item_modifiers:
item_modifiers[match["property_id"]] = []
item_modifiers[match["property_id"]].append(match["property_values"])
# The first line is usually the item name
# parsed_item["display_name"] = item[0]
# The second line is usually the type. Map it to be sure, (for now just setting to base_type)
# parsed_item["base_item"] = item[1]
base_name = lines[1] if item_is_identified and quality not in [ItemQuality.Gray.value, ItemQuality.Normal.value, ItemQuality.Magic.value, ItemQuality.Crafted.value] else lines[0]
base_name = base_name.upper().replace(' ', '')
base_item = None
if quality == ItemQuality.Magic.value:
base_item = find_base_item_from_magic_item_text(cleaned_lines[0], item_is_identified)
else:
if quality == ItemQuality.Crafted.value and is_rune(base_name):
base_item = get_rune(base_name)
quality = ItemQuality.Rune.value
else:
if not is_base(base_name):
raise Exception('Unable to find item base')
base_item = get_base(base_name)
# Add matches from item data
found_item = None
ntip_alias_stat = None
if item_is_identified:
if quality == ItemQuality.Unique.value:
found_item = find_unique_item_by_name(lines[0].replace(' ', ''))
elif quality == ItemQuality.Set.value:
found_item = find_set_item_by_name(lines[0].replace(' ', ''))
elif quality in [ItemQuality.Gray.value, ItemQuality.Normal.value, ItemQuality.Rune.value]:
found_item = base_item
if not found_item and quality not in [ItemQuality.Magic.value, ItemQuality.Rare.value]:
if quality == ItemQuality.Unique.value:
if not Runeword(lines[0]):
raise Exception('Unable to find item')
quality = ItemQuality.Runeword.value
found_item = {
'DisplayName': lines[0]
}
else:
raise Exception('Unable to find item')
# parsed_item["item_data_matches"] = find_unique_item_by_name(parsed_item["display_name"]) | find_set_item_by_name(parsed_item["display_name"]) | get_base(parsed_item["base_item"])
# The next few lines help us determine
ntip_alias_stat = find_nip_pattern_match(lines)
else:
if quality == ItemQuality.Set.value and len(base_item['sets']) == 1:
found_item = find_set_item_by_name(base_item['sets'][0].replace('_', ' ').upper(), True)
elif quality == ItemQuality.Unique.value and len(base_item['uniques']) == 1:
found_item = find_unique_item_by_name(base_item['uniques'][0].replace('_', ' ').upper(), True)
ntip_alias_quality_map = {
ItemQuality.Rune.value: 10,
ItemQuality.Runeword.value: 9,
ItemQuality.Crafted.value: 8,
ItemQuality.Unique.value: 7,
ItemQuality.Rare.value: 6,
ItemQuality.Set.value: 5,
ItemQuality.Magic.value: 4,
ItemQuality.Normal.value: 2
}
# nip_item = Nip(
# NTIPAliasType=base_item['NTIPAliasType'],
# NTIPAliasClassID = base_item['NTIPAliasClassID'],
# NTIPAliasClass = None if 'item_class' not in base_item else 2 if base_item['item_class'] == 'elite' else 1 if base_item['item_class'] == 'exceptional' else 0,
# NTIPAliasQuality=ntip_alias_quality_map[quality],
# NTIPAliasStat=ntip_alias_stat,
# NTIPAliasFlag={
# '0x10': item_is_identified,
# '0x4000000': item_is_ethereal
# }
# )
# d2_data = D2Data(
# BaseItem=base_item,
# Item=found_item,
# ItemModifiers=item_modifiers if item_modifiers else None
# )
# return {
# 'name': lines[0],
# 'quality': quality,
# 'text': '|'.join(lines),
# 'baseItem': base_item,
# 'item': found_item,
# 'itemModifiers': ntip_alias_stat
# }
name = found_item['DisplayName'] if found_item else base_item['DisplayName']
if quality in [ItemQuality.Magic.value, ItemQuality.Rare.value]:
if item_is_identified:
name = lines[0]
else:
name = base_item['DisplayName']
return HoveredItem(
Name=name,
Quality=quality,
Text='|'.join(lines),
BaseItem=base_item,
Item=found_item,
NTIPAliasType=base_item['NTIPAliasType'],
NTIPAliasClassID=base_item['NTIPAliasClassID'],
NTIPAliasClass = None if 'item_class' not in base_item else 2 if base_item['item_class'] == 'elite' else 1 if base_item['item_class'] == 'exceptional' else 0,
NTIPAliasQuality=ntip_alias_quality_map[quality],
NTIPAliasStat=ntip_alias_stat,
NTIPAliasFlag={
'0x10': item_is_identified,
'0x400000': item_is_ethereal,
'0x4000000': quality == ItemQuality.Runeword.value
}
)
def find_nip_pattern_match(item_lines):
nip_alias_stat = {}
for pattern, keys in NIP_ALIAS_STAT_PATTERNS.items():
if pattern not in NIP_RE_PATTERNS:
NIP_PATTERNS[pattern] = compile_pattern(pattern)
for line in item_lines:
result = NIP_PATTERNS[pattern].parse(line.replace('%', ''))
if result:
# if len(keys) != len(match.groups(1)):
# raise Exception('Mismatch between regex groups and configured NIP keys')
if len(result.fixed) > 1 and len(keys) == 1:
response = None
if 'CHARGES' in line:
response = {
'level': result.fixed[0],
'skill': result.fixed[1],
'current': result.fixed[2],
'max': result.fixed[3]
}
elif 'CHANCE' in line:
response = {
'chance': result.fixed[0],
'level': result.fixed[1],
'skill': result.fixed[2]
}
if response:
nip_alias_stat[keys[0]] = response
continue
for i in range(len(keys)):
key = keys[i]
if isinstance(key, list):
for split_key in key:
nip_alias_stat[split_key] = result.fixed[i]
else:
if result.fixed:
nip_alias_stat[key] = result.fixed[i]
else:
nip_alias_stat[key] = True
return nip_alias_stat
|
PypiClean
|
/metricflow_lite-0.130.2-py3-none-any.whl/metricflow/sql_clients/postgres.py
|
import logging
from typing import ClassVar, Mapping, Optional, Sequence, Union
import sqlalchemy
from metricflow.protocols.sql_client import SqlEngineAttributes, SqlEngine
from metricflow.sql.render.postgres import PostgresSQLSqlQueryPlanRenderer
from metricflow.sql.render.sql_plan_renderer import SqlQueryPlanRenderer
from metricflow.sql_clients.common_client import SqlDialect, not_empty
from metricflow.sql_clients.sqlalchemy_dialect import SqlAlchemySqlClient
logger = logging.getLogger(__name__)
class PostgresEngineAttributes(SqlEngineAttributes):
"""Engine-specific attributes for the Postgres query engine
This is an implementation of the SqlEngineAttributes protocol for Postgres
"""
sql_engine_type: ClassVar[SqlEngine] = SqlEngine.POSTGRES
# SQL Engine capabilities
date_trunc_supported: ClassVar[bool] = True
full_outer_joins_supported: ClassVar[bool] = True
indexes_supported: ClassVar[bool] = True
multi_threading_supported: ClassVar[bool] = True
timestamp_type_supported: ClassVar[bool] = True
timestamp_to_string_comparison_supported: ClassVar[bool] = True
# Cancelling should be possible, but not yet implemented.
cancel_submitted_queries_supported: ClassVar[bool] = False
# SQL Dialect replacement strings
double_data_type_name: ClassVar[str] = "DOUBLE PRECISION"
timestamp_type_name: ClassVar[Optional[str]] = "TIMESTAMP"
# MetricFlow attributes
sql_query_plan_renderer: ClassVar[SqlQueryPlanRenderer] = PostgresSQLSqlQueryPlanRenderer()
class PostgresSqlClient(SqlAlchemySqlClient):
"""Implements Postgres."""
@staticmethod
def from_connection_details(url: str, password: Optional[str]) -> SqlAlchemySqlClient: # noqa: D
parsed_url = sqlalchemy.engine.url.make_url(url)
dialect = SqlDialect.POSTGRESQL.value
if parsed_url.drivername != dialect:
raise ValueError(f"Expected dialect '{dialect}' in {url}")
if password is None:
raise ValueError(f"Password not supplied for {url}")
return PostgresSqlClient(
host=not_empty(parsed_url.host, "host", url),
port=not_empty(parsed_url.port, "port", url),
username=not_empty(parsed_url.username, "username", url),
password=password,
database=not_empty(parsed_url.database, "database", url),
query=parsed_url.query,
)
def __init__( # noqa: D
self,
port: int,
database: str,
username: str,
password: str,
host: str,
query: Optional[Mapping[str, Union[str, Sequence[str]]]] = None,
) -> None:
super().__init__(
engine=self.create_engine(
dialect=SqlDialect.POSTGRESQL.value,
driver="psycopg2",
port=port,
database=database,
username=username,
password=password,
host=host,
query=query,
)
)
@property
def sql_engine_attributes(self) -> SqlEngineAttributes:
"""Collection of attributes and features specific to the Postgres SQL engine"""
return PostgresEngineAttributes()
def cancel_submitted_queries(self) -> None: # noqa: D
raise NotImplementedError
|
PypiClean
|
/moog-games-1.4.tar.gz/moog-games-1.4/moog_demos/example_configs/parallelogram_catch.py
|
import collections
import numpy as np
from moog import action_spaces
from moog import game_rules
from moog import observers
from moog import physics as physics_lib
from moog import shapes
from moog import sprite
from moog import tasks
# Width and height of each cell in the background grid
_GRID_SIZE = 0.3
def get_parallelogram(min_axis_ratio=0.4):
"""Get parallelogram vertices centered around 0 with maximum radius 1."""
angles = np.pi * (np.array([0., 0.5, 1., 1.5]) + np.random.uniform(0, 2))
vertices = np.stack((np.sin(angles), np.cos(angles)), axis=1)
axis_ratio = np.random.uniform(min_axis_ratio, 1.)
vertices *= np.array([[1.], [axis_ratio], [1.], [axis_ratio]])
return vertices
def get_prey(centered_vertices, scale=1., max_vel=0., sprite_scale=0.1):
"""Get prey sprites.
Args:
centered_vertices: Numpy array of shape [num_vertices, 2] containing
vertex positions.
scale: Re-scaling factor of centered_vertices for the global space.
max_vel: Maximum velocity of the sprites.
sprite_scale: Re-scaling factor of centered_vertices for the individual
sprite shapes.
"""
sprite_shape = sprite_scale * centered_vertices
sprite_positions = scale * centered_vertices
sprite_positions += np.array([0.5, 0.5]) - sprite_positions[0]
# We sample each sprite's velocity independently so that the entire tethered
# configuration may rotate
prey = [
sprite.Sprite(
x=pos[0], y=pos[1], shape=sprite_shape, scale=1.,
x_vel=np.random.uniform(-1 * max_vel, max_vel),
y_vel=np.random.uniform(-1 * max_vel, max_vel),
c0=0.2, c1=1., c2=1.)
for pos in sprite_positions
]
return prey
def _get_config(max_vel):
"""Get environment config."""
############################################################################
# Sprite initialization
############################################################################
# Grid
grid = shapes.grid_lines(
grid_x=_GRID_SIZE, grid_y=_GRID_SIZE, buffer_border=1., c0=0., c1=0.,
c2=0.5)
def state_initializer():
agent = sprite.Sprite(
x=0.5, y=0.5, shape='circle', scale=0.04, c0=0.33, c1=1., c2=0.66)
annulus_shape = shapes.annulus_vertices(0.15, 2.)
agent_annulus = sprite.Sprite(
x=0.5, y=0.5, shape=annulus_shape, scale=1., c0=0.6, c1=1., c2=1.)
prey = get_prey(
get_parallelogram(min_axis_ratio=0.5),
scale=0.4,
max_vel=max_vel,
sprite_scale=0.075,
)
state = collections.OrderedDict([
('grid', grid),
('prey', prey),
('agent', [agent]),
('agent_annulus', [agent_annulus]),
])
return state
############################################################################
# Physics
############################################################################
force = (physics_lib.Drag(coeff_friction=0.25), ['agent', 'agent_annulus'])
corrective_physics = physics_lib.Tether(('prey',), update_angle_vel=True)
physics = physics_lib.Physics(
force,
updates_per_env_step=10,
corrective_physics=corrective_physics,
)
############################################################################
# Task
############################################################################
prey_task = tasks.ContactReward(
1, layers_0='agent', layers_1='prey',
condition=lambda s_agent, s_prey: s_prey.c1 > 0.5,
)
reset_trial_task = tasks.Reset(
condition=lambda state: all([s.c1 < 0.5 for s in state['prey']]),
steps_after_condition=10,
)
task = tasks.CompositeTask(prey_task, reset_trial_task, timeout_steps=500)
############################################################################
# Action space
############################################################################
action_space = action_spaces.Joystick(
scaling_factor=0.01, action_layers=('agent', 'agent_annulus'))
############################################################################
# Observer
############################################################################
_polygon_modifier = observers.polygon_modifiers.FirstPersonAgent(
agent_layer='agent')
observer = observers.PILRenderer(
image_size=(64, 64),
anti_aliasing=1,
color_to_rgb='hsv_to_rgb',
polygon_modifier=_polygon_modifier,
)
############################################################################
# Game rules
############################################################################
# Make prey gray upon contact
def _make_prey_gray(prey):
prey.c1 = 0.
prey.c2 = 0.6
make_prey_gray = game_rules.ModifyOnContact(
layers_0='agent',
layers_1='prey',
modifier_1=_make_prey_gray,
)
# Keep agent near center
keep_near_center = game_rules.KeepNearCenter(
agent_layer='agent',
layers_to_center=['agent_annulus', 'prey'],
grid_x=_GRID_SIZE,
)
rules = (make_prey_gray, keep_near_center)
############################################################################
# Final config
############################################################################
config = {
'state_initializer': state_initializer,
'physics': physics,
'task': task,
'action_space': action_space,
'observers': {'image': observer},
'game_rules': rules,
}
return config
def get_config(level):
"""Get config dictionary of kwargs for environment constructor.
Args:
level: Int. Different values yield different velocities of the prey.
"""
if level == 0:
return _get_config(max_vel=0.)
elif level == 1:
return _get_config(max_vel=0.01)
elif level == 2:
return _get_config(max_vel=0.02)
else:
raise ValueError('Invalid level {}'.format(level))
|
PypiClean
|
/plone.recipe.dzhandle-1.0-beta2.tar.gz/plone.recipe.dzhandle-1.0-beta2/src/plone/recipe/dzhandle/recipe.py
|
__author__ = """Jens Klein <[email protected]>"""
__docformat__ = 'plaintext'
import logging
import os
import re
import shutil
import copy
import zc.buildout
import zc.recipe.egg
ZODBLOCALTEMPLATE = """\
<filestorage>
path $INSTANCE/var/%(file)s
</filestorage>
mount-point %(mountpoint)s
"""
ZODBCLIENTTEMPLATE = """\
mount-point %(mountpoint)s
cache-size %(cachesize)s
<zeoclient>
server %(server)s
storage %(storage)s
name zeostorage
var $INSTANCE/var
cache-size 20MB
</zeoclient>
"""
clientpatterns = [
{'type': 'line',
'mode': 'replace',
'pattern' : 'define HTTPPORT',
'replace' : '%%define HTTPPORT %s\n',
'option' : 'httpport'
},
{'type': 'line',
'mode': 'replace',
'pattern' : 'define ZOPE_USER',
'replace' : '%%define ZOPE_USER %s\n',
'option' : 'systemuser'
},
{'type': 'line',
'mode': 'replace_or_add',
'pattern' : 'zserver-threads ',
'replace' : 'zserver-threads %s\n',
'option' : 'threads',
},
{'type': 'line',
'mode': 'replace_or_add',
'pattern' : 'debug-mode ',
'replace' : 'debug-mode %s\n',
'option' : 'debugmode',
},
{'type': 'line',
'mode': 'replace_or_add',
'pattern' : 'verbose-security ',
'replace' : 'verbose-security %s\n',
'option' : 'verbosesecurity',
},
{'type': 'line',
'mode': 'replace_or_add',
'pattern' : 'products ',
'replace' : 'products %s',
'optiontype': 'list',
'option' : 'products',
},
{'type': 'sections',
'mode': 'replace_or_add',
'sectionname' : 'zodb_db',
'optionprefix' : 'zodb_',
'optiontype': 'dict',
'templates': {'local': ZODBLOCALTEMPLATE,
'zeo': ZODBCLIENTTEMPLATE,},
},
]
serverpatterns = []
class Recipe(object):
"""recipe for dzhandle usage."""
dzhandle = '/usr/bin/dzhandle'
def __init__(self, buildout, name, options):
self.logger = logging.getLogger(name)
self.buildout = buildout
self.name = name
self.egg = zc.recipe.egg.Egg(buildout, options['recipe'], options)
options['zopeuser'] = options.get('zopeuser', 'admin:admin')
self.options = options
# validation
if self.options.get('zopetype', '') not in ('standalone', 'client',
'server'):
raise zc.buildout.UserError, "wrong 'zopetype' given."
def install(self):
"""Install creates a new instance and configuration."""
if self.options['zopetype'] == 'server':
self.createZEOServer()
#self.createSymlinks()
else:
self.createInstance()
self.update()
return self.instancehome
def update(self):
"""Update creates new configuration files in an existing instance"""
if self.options['zopetype'] == 'server':
self.createZEOServerConfiguration()
else:
self.createSymlinks()
self.patchBinaries()
self.createZEOClientConfiguration()
self.createPackageIncludes()
def createInstance(self):
"""creates a blank instance using dzhandle."""
task = 'make-instance'
args = []
ltype = self.options.get('locationtype', 'buildout')
if ltype != 'buildout':
args.append(self.options.get('instancename', self.name))
else:
args.append(self.instancehome)
sudo = ltype=='system'
args.append('-m manual')
args.append('-u %s' % self.options['zopeuser'])
self.execute(task, sudo=sudo, *args)
def createZEOServer(self):
"""creates a blank zeoserver using dzhandle."""
# XXX TODO
task = 'make-zeoinstance'
args = []
args.append(self.instancehome)
self.execute(task, *args)
def createZEOClientConfiguration(self):
"""creates a fresh zope.conf according to the settigs given."""
confdir = os.path.join(self.instancehome, 'etc')
confdeb = os.path.join(confdir, 'zope.conf.debian')
conftarget = os.path.join(confdir, 'zope.conf')
# if zope.conf.debian does not exist copy zope.conf to zope.conf.debian
if not os.path.exists(confdeb):
assert(os.path.exists(conftarget))
assert(os.path.isfile(conftarget))
shutil.copyfile(conftarget, confdeb)
self.logger.info('copied zope.conf to zope.conf.debian. latter will'
' be used a template for zope.conf')
assert(os.path.isfile(confdeb))
# use the .debian as template
deblines = open(confdeb, 'r').readlines()
targetlines = self.handleConf(deblines, clientpatterns)
targetfile = open(conftarget, 'w')
targetfile.writelines(targetlines)
targetfile.close()
def handleConf(self, lines, patterns):
"""takes a conf file and apply patterns to it."""
###############################
# first pass: simple line replacement
newlines = []
handled = []
# 1.1 replace
for line in lines:
if line.strip().startswith('#') or line.strip() == '':
continue
newline = line
for lpat in patterns:
if lpat['type'] != 'line':
continue
if 'replace' in lpat['mode'] and \
lpat['pattern'] in line and \
self.options.get(lpat['option'], None):
newline = self.createLineFromOption(lpat)
handled.append(lpat['option'])
break
newlines.append(newline)
# 1.2 handle options left to add
for lpat in patterns:
if lpat['type'] != 'line':
continue
if lpat['option'] in handled or \
'add' not in lpat['mode'] or \
self.options.get(lpat['option'], None) is None:
continue
newline = self.createLineFromOption(lpat)
newlines.append(newline)
##############################
# second pass: handle sections
lines = copy.copy(newlines)
newlines = []
handled = []
currentsection = None
replaced = False
# 1.1 replace existing
for line in lines:
if currentsection is not None:
# handle inside section
name, lpat, sectiondef = currentsection
sectiondef['name'] = name
if not replaced:
tpl = lpat['templates'][sectiondef['type'].strip()]
result = tpl % sectiondef
newlines.append(result)
handled.append( '%s_%s' % (lpat['optionprefix'], name) )
replaced = True
if line.find('</%s>' % lpat['sectionname']) >= 0:
newlines.append(line)
currentsection = None
replaced = False
continue
else:
newlines.append(line)
# find sections
for lpat in patterns:
if lpat['type'] != 'sections':
continue
sections = self.sections(lpat['optionprefix'], 'dict')
if line.find('<') >= line.find(lpat['sectionname']):
continue
namestart = line.find(lpat['sectionname']) + \
len(lpat['sectionname'])
name = line[namestart: line.find('>')].strip()
if name in sections.keys():
# found the section
currentsection = name, lpat, sections[name]
# 1.2 add sections not replaced
for lpat in patterns:
if lpat['type'] != 'sections':
continue
sections = self.sections(lpat['optionprefix'], 'dict')
for name in sections:
if '%s_%s' % (lpat['optionprefix'], name) in handled:
continue
sectiondef = sections[name]
sectiondef['name'] = name
newlines.append('<%s %s>\n' % (lpat['sectionname'], name))
tpl = lpat['templates'][sectiondef['type'].strip()]
result = tpl % sectiondef
newlines.append(result)
newlines.append('</%s>\n' % lpat['sectionname'])
return newlines
def createLineFromOption(self, lpat):
otype = lpat.get('optiontype', None)
if otype is not None:
option = self.complexOption(lpat['option'], otype)
else:
option = self.options[lpat['option']]
if otype == 'list':
newline = [(lpat['replace'] % l) for l in option]
newline = '\n'.join(newline) + '\n'
else:
newline = lpat['replace'] % option
return newline
def sections(self, prefix, type):
"""list all sections from options"""
result = {}
for optionkey in self.options.keys():
if optionkey.startswith(prefix):
key = optionkey[len(prefix):]
value = self.complexOption(optionkey, type)
result[key] = value
return result
def complexOption(self, name, type='list'):
"""a way to make a list or dict from a dumb string.
for a list just put one value per line
foobar =
foo
bar
baz
results in ['foo', 'bar', 'baz']
for a dict put one definiton per line:
foobar =
foo: bar
baz: baaz
results in {'foo': 'bar', 'baz': 'baaz'}
*sigh*, would like to have this in zc.buildout itself.
@param type: one out of ('list', 'dict').
"""
value = self.options.get(name, None)
if value is None:
return None
value = [l.strip() for l in value.split('\n') if l.strip()]
if type == 'dict':
result = {}
for item in value:
key, item = item.split(':', 1)
result[key.strip()] = item.strip()
value = result
return value
def createPackageIncludes(self):
"""ZCML to include packages. Taken from plone.recipe.zope2install.
This method is Copyright (c) 2006-2007 Zope Corporation and Contributors.
"""
location = self.instancehome
zcml = self.options.get('zcml')
if zcml:
sitezcml_path = os.path.join(location, 'etc', 'site.zcml')
if not os.path.exists(sitezcml_path):
# Zope 2.9 does not have a site.zcml so we copy the
# one out from Five.
zope2_location = self.options['zope2-location']
skel_path = os.path.join(zope2_location, 'lib', 'python',
'Products', 'Five', 'skel',
'site.zcml')
shutil.copyfile(skel_path, sitezcml_path)
includes_path = os.path.join(location, 'etc', 'package-includes')
if not os.path.exists(includes_path):
# Zope 2.9 does not have a package-includes so we
# create one.
os.mkdir(includes_path)
zcml = zcml.split()
if '*' in zcml:
zcml.remove('*')
else:
shutil.rmtree(includes_path)
os.mkdir(includes_path)
n = 0
package_match = re.compile('\w+([.]\w+)*$').match
for package in zcml:
n += 1
orig = package
if ':' in package:
package, filename = package.split(':')
else:
filename = None
if '-' in package:
package, suff = package.split('-')
if suff not in ('configure', 'meta', 'overrides'):
raise ValueError('Invalid zcml', orig)
else:
suff = 'configure'
if filename is None:
filename = suff + '.zcml'
if not package_match(package):
raise ValueError('Invalid zcml', orig)
path = os.path.join(
includes_path,
"%3.3d-%s-%s.zcml" % (n, package, suff),
)
open(path, 'w').write(
'<include package="%s" file="%s" />\n'
% (package, filename)
)
def createZEOServerConfiguration(self):
"""creates a fresh zeo.conf according to the settings given."""
# XXX TODO
confdir = os.path.join(self.instancehome, 'etc')
confdeb = os.path.join(confdir, 'zeo.conf.debian')
conftarget = os.path.join(confdir, 'zeo.conf')
# if zope.conf.debian does not exist copy zeo.conf to zeo.conf.debian
if not os.path.exists(confdeb):
assert(os.path.exists(conftarget))
assert(os.path.isfile(conftarget))
shutil.copyfile(conftarget, confdeb)
assert(os.path.isfile(confdeb))
def patchBinaries(self):
requirements, ws = self.egg.working_set()
egg_locations = [e.location for e in ws]
location = self.instancehome
path =":".join(egg_locations)
for script_name in ('runzope', 'zopectl'):
script_path = os.path.join(location, 'bin', script_name)
script = open(script_path).readlines()
newscript = []
for line in script:
line = line.rstrip()
if line.strip().startswith("PYTHONPATH="):
newscript.append('PYTHONPATH=$SOFTWARE_HOME:'+path+':$PYTHONPATH')
else:
newscript.append(line)
newscript = '\n'.join(newscript)
f = open(script_path, 'w')
f.write(newscript)
f.close()
# some helpers below here
def execute(self, task, *args, **kw):
"""executes dzhandle."""
args = ' '.join(args)
serviceuser = self.options.get('serviceuser', '')
if serviceuser != '':
serviceuser = "-u %s " % serviceuser
command = '%s -z%s %s%s %s' % (self.dzhandle,
self.options.get('version'),
serviceuser,
task,
args)
if kw.get('sudo', False):
command = 'sudo %s' % command
self.logger.info('command: %s' % command)
os.system(command)
def createSymlinks(self):
"""creates symlinks in bin and if needed in parts"""
ltype = self.options.get('locationtype', 'buildout').strip()
assert(ltype in ('system', 'user', 'buildout'))
if ltype != 'buildout':
# symlink instance to parts directory
target = os.path.join(
self.buildout['buildout']['parts-directory'],
self.name
)
ln = "ln -s %s %s" % (self.instancehome, target)
os.system(ln)
# symlink zopectl
source = os.path.join(self.instancehome, 'bin', 'zopectl')
target = os.path.join(
self.buildout['buildout']['bin-directory'],
('%sctl' % self.options.get('instancename', self.name)),
)
ln = "ln -s %s %s" % (source, target)
os.system(ln)
@property
def instancehome(self):
if self.options.get('location', None) is not None:
return self.options['location']
ltype = self.options.get('locationtype', 'buildout').strip()
assert(ltype in ('system', 'user', 'buildout'))
if ltype == 'buildout':
self.options['location'] = os.path.join(
self.buildout['buildout']['parts-directory'],
self.options.get('instancename', self.name),
)
return self.options['location']
elif ltype == 'system':
base = '/var/lib/zope%s/instance' % self.options.get('version').strip()
elif ltype == 'user':
base = os.path.expanduser('~/zope/instance')
base += '/zope%s' % self.options.get('version').strip()
self.options['location'] = '%s/%s' % (base,
self.options.get('instancename',
self.name))
return self.options.get('location', None)
|
PypiClean
|
/robotframework-corerpahive-1.2.0.tar.gz/robotframework-corerpahive-1.2.0/README.rst
|
CoreRPAHive
===============
.. contents::
Introduction
------------
CoreRPAHive_ is a Robotic Process Automation library (RPA) for `RobotFramework <http://code.google.com/p/robotframework/>`_ that allow the developer create RPA script easier and reduce complexity under robot script layer.
The project is hosted on `GitHub <https://github.com/qahive/robotframework-CoreRPAHive>`_ and downloads can be found from `PyPI <https://pypi.org/project/CoreRPAHive/>`_.
Inspired by: https://github.com/Snooz82/robotframework-datadriver
Keyword documentation
------------
See `keyword documentation <https://qahive.github.io/robotframework-CoreRPAHive/CoreRPAHive.html>`_ for available keywords and more information about the library in general.
Installation
------------
The recommended installation method is using pip::
pip install --upgrade robotframework-corerpahive
Manual download source code to your local computer and running following command to install using python::
python setup.py install --force -v
Directory Layout
----------------
Examples/
A simple demonstration, with a web application and RF test suite
docs/
Keyword documentation
CoreRPAHive/
Python source code
tests/
Python nose test scripts
Usage
-----
To write tests with Robot Framework and CoreRPAHive,
CoreRPAHive must be imported into your RF test suite.
1. Create Excel file by copy from template (`download <https://github.com/qahive/robotframework-CoreRPAHive/raw/master/Examples/test_data/DefaultDemoData.xlsx>`_).
Mandatory Columns:
- [Status] For report test result Pass/Fail
- [Log Message] Error message or any message after test done
- [Screenshot] Screenshot (Support only 1 screenshot)
- [Tags] Robot Tag
Test data Columns:
User can add their own test data columns without limit
Example:
- Username
- Password
2. Create RF test suite
.. code:: robotframework
*** Setting ***
Library CoreRPAHive ./test_data/BasicDemoData.xlsx capture_screenshot=Skip
Test Template Validate user data template
*** Test Cases ***
Verify valid user '${username}' ${None} ${None} ${None}
*** Keywords ***
Validate user data template
[Arguments] ${username} ${password} ${email}
Log ${username}
Log ${password}
Log ${email}
Should Be True '${password}' != '${None}'
Should Match Regexp ${email} [A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}
Enhancement and release
------------
- Create update keyword documents
.. code:: python
python -m robot.libdoc -f html CoreRPAHive docs/CoreRPAHive.html
- Extended (In-progress)
Limitation
------------
``Eclipse plug-in RED``
There are known issues if the Eclipse plug-in RED is used. Because the debugging Listener of this tool pre-calculates the number of test cases before the creation of test cases by the Data Driver. This leads to the situation that the RED listener throws exceptions because it is called for each test step but the RED GUI already stopped debugging so that the listener cannot send Information to the GUI.
This does not influence the execution in any way but produces a lot of unwanted exceptions in the Log.
|
PypiClean
|
/lightning-2022.10.25-py3-none-any.whl/lightning_app/frontend/panel/app_state_comm.py
|
# Todo: Refactor with Streamlit
# Note: It would be nice one day to just watch changes within the Flow scope instead of whole app
from __future__ import annotations
import asyncio
import os
from threading import Thread
from typing import Callable
import websockets
from lightning_app.core.constants import APP_SERVER_PORT
from lightning_app.utilities.app_helpers import Logger
_logger = Logger(__name__)
_CALLBACKS = []
_THREAD: Thread = None
def _get_ws_port():
if "LIGHTNING_APP_STATE_URL" in os.environ:
return 8080
return APP_SERVER_PORT
def _get_ws_url():
port = _get_ws_port()
return f"ws://localhost:{port}/api/v1/ws"
def _run_callbacks():
for callback in _CALLBACKS:
callback()
def _target_fn():
async def update_fn():
ws_url = _get_ws_url()
_logger.debug("connecting to web socket %s", ws_url)
async with websockets.connect(ws_url) as websocket: # pylint: disable=no-member
while True:
await websocket.recv()
# Note: I have not seen use cases where the two lines below are needed
# Changing '< 0.2' to '< 1' makes the App very sluggish to the end user
# Also the implementation can cause the App state to lag behind because only 1 update
# is received per 0.2 second (or 1 second).
# while (time.time() - last_updated) < 0.2:
# time.sleep(0.05)
# Todo: Add some kind of throttling. If 10 messages are received within 100ms then
# there is no need to trigger the app state changed, request state and update
# 10 times.
_logger.debug("App State Changed. Running callbacks")
_run_callbacks()
asyncio.run(update_fn())
def _start_websocket():
global _THREAD # pylint: disable=global-statement
if not _THREAD:
_logger.debug("Starting the watch_app_state thread.")
_THREAD = Thread(target=_target_fn)
_THREAD.setDaemon(True)
_THREAD.start()
_logger.debug("thread started")
def watch_app_state(callback: Callable):
"""Start the process that serves the UI at the given hostname and port number.
Arguments:
callback: A function to run when the App state changes. Must be thread safe.
Example:
.. code-block:: python
def handle_state_change():
print("The App State changed.")
watch_app_state(handle_state_change)
"""
_CALLBACKS.append(callback)
_start_websocket()
|
PypiClean
|
/uniohomeassistant-0.1.3.tar.gz/uniohomeassistant-0.1.3/homeassistant/components/smarty/binary_sensor.py
|
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN, SIGNAL_UPDATE_SMARTY
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Smarty Binary Sensor Platform."""
smarty = hass.data[DOMAIN]["api"]
name = hass.data[DOMAIN]["name"]
sensors = [
AlarmSensor(name, smarty),
WarningSensor(name, smarty),
BoostSensor(name, smarty),
]
async_add_entities(sensors, True)
class SmartyBinarySensor(BinarySensorEntity):
"""Representation of a Smarty Binary Sensor."""
def __init__(self, name, device_class, smarty):
"""Initialize the entity."""
self._name = name
self._state = None
self._sensor_type = device_class
self._smarty = smarty
@property
def device_class(self):
"""Return the class of the sensor."""
return self._sensor_type
@property
def should_poll(self) -> bool:
"""Do not poll."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
async def async_added_to_hass(self):
"""Call to update."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_SMARTY, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
class BoostSensor(SmartyBinarySensor):
"""Boost State Binary Sensor."""
def __init__(self, name, smarty):
"""Alarm Sensor Init."""
super().__init__(name=f"{name} Boost State", device_class=None, smarty=smarty)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.boost
class AlarmSensor(SmartyBinarySensor):
"""Alarm Binary Sensor."""
def __init__(self, name, smarty):
"""Alarm Sensor Init."""
super().__init__(
name=f"{name} Alarm", device_class=DEVICE_CLASS_PROBLEM, smarty=smarty
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.alarm
class WarningSensor(SmartyBinarySensor):
"""Warning Sensor."""
def __init__(self, name, smarty):
"""Warning Sensor Init."""
super().__init__(
name=f"{name} Warning", device_class=DEVICE_CLASS_PROBLEM, smarty=smarty
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.warning
|
PypiClean
|
/gears-less-0.3.3.tar.gz/gears-less-0.3.3/gears_less/node_modules/less/node_modules/source-map/lib/source-map/base64-vlq.js
|
* Copyright 2011 Mozilla Foundation and contributors
* Licensed under the New BSD license. See LICENSE or:
* http://opensource.org/licenses/BSD-3-Clause
*
* Based on the Base 64 VLQ implementation in Closure Compiler:
* https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java
*
* Copyright 2011 The Closure Compiler Authors. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
if (typeof define !== 'function') {
var define = require('amdefine')(module, require);
}
define(function (require, exports, module) {
var base64 = require('./base64');
// A single base 64 digit can contain 6 bits of data. For the base 64 variable
// length quantities we use in the source map spec, the first bit is the sign,
// the next four bits are the actual value, and the 6th bit is the
// continuation bit. The continuation bit tells us whether there are more
// digits in this value following this digit.
//
// Continuation
// | Sign
// | |
// V V
// 101011
var VLQ_BASE_SHIFT = 5;
// binary: 100000
var VLQ_BASE = 1 << VLQ_BASE_SHIFT;
// binary: 011111
var VLQ_BASE_MASK = VLQ_BASE - 1;
// binary: 100000
var VLQ_CONTINUATION_BIT = VLQ_BASE;
/**
* Converts from a two-complement value to a value where the sign bit is
* is placed in the least significant bit. For example, as decimals:
* 1 becomes 2 (10 binary), -1 becomes 3 (11 binary)
* 2 becomes 4 (100 binary), -2 becomes 5 (101 binary)
*/
function toVLQSigned(aValue) {
return aValue < 0
? ((-aValue) << 1) + 1
: (aValue << 1) + 0;
}
/**
* Converts to a two-complement value from a value where the sign bit is
* is placed in the least significant bit. For example, as decimals:
* 2 (10 binary) becomes 1, 3 (11 binary) becomes -1
* 4 (100 binary) becomes 2, 5 (101 binary) becomes -2
*/
function fromVLQSigned(aValue) {
var isNegative = (aValue & 1) === 1;
var shifted = aValue >> 1;
return isNegative
? -shifted
: shifted;
}
/**
* Returns the base 64 VLQ encoded value.
*/
exports.encode = function base64VLQ_encode(aValue) {
var encoded = "";
var digit;
var vlq = toVLQSigned(aValue);
do {
digit = vlq & VLQ_BASE_MASK;
vlq >>>= VLQ_BASE_SHIFT;
if (vlq > 0) {
// There are still more digits in this value, so we must make sure the
// continuation bit is marked.
digit |= VLQ_CONTINUATION_BIT;
}
encoded += base64.encode(digit);
} while (vlq > 0);
return encoded;
};
/**
* Decodes the next base 64 VLQ value from the given string and returns the
* value and the rest of the string.
*/
exports.decode = function base64VLQ_decode(aStr) {
var i = 0;
var strLen = aStr.length;
var result = 0;
var shift = 0;
var continuation, digit;
do {
if (i >= strLen) {
throw new Error("Expected more digits in base 64 VLQ value.");
}
digit = base64.decode(aStr.charAt(i++));
continuation = !!(digit & VLQ_CONTINUATION_BIT);
digit &= VLQ_BASE_MASK;
result = result + (digit << shift);
shift += VLQ_BASE_SHIFT;
} while (continuation);
return {
value: fromVLQSigned(result),
rest: aStr.slice(i)
};
};
});
|
PypiClean
|
/py_lambda-1.1.5.tar.gz/py_lambda-1.1.5/py_lambda/request.py
|
import sys, os, time, json
import base64
REQ_PATH_STR = "path"
REQ_HTTP_METH = "httpMethod"
REQ_HTTP_HEADER = "headers"
REQ_HTTP_HEADER_COOKIE = "cookie"
REQ_QUERY_STR_PARAM = "queryStringParameters"
REQ_BODY = "body"
REQ_IS_BASE64 = "isBase64Encoded"
class Request(object):
"""
The request class initilized by ALB event.
"""
def __init__(self, eventDict):
self.eventDict = None
self.pathList = None
self.pathListLen = 0
if not isinstance(eventDict, dict):
try:
# print("eventDict:", eventDict)
self.eventDict = json.loads(eventDict)
except Exception as e:
print e
else:
self.eventDict = eventDict
self._getPathList()
def _getPathList(self):
# print("self.eventDict[path]", self.eventDict["path"], type(self.eventDict))
if REQ_PATH_STR in self.eventDict:
# if isinstance(self.eventDict["path"], str):
self.eventDict[REQ_PATH_STR] = self.eventDict[REQ_PATH_STR].encode("utf-8")
self.eventDict[REQ_PATH_STR] = self.eventDict[REQ_PATH_STR].lstrip('/').rstrip('/')
self.pathList = self.eventDict[REQ_PATH_STR].split("/")
self.pathListLen = len(self.pathList)
def getPathList(self):
return self.pathList
def getPathStr(self):
if REQ_PATH_STR in self.eventDict:
return self.eventDict[REQ_PATH_STR]
def httpMeth(self):
if REQ_HTTP_METH in self.eventDict:
return self.eventDict[REQ_HTTP_METH].encode("utf-8").upper()
def isAlb(self):
if "requestContext" in self.eventDict:
if "elb" in self.eventDict["requestContext"]:
return True
return False
def getCookies(self):
if REQ_HTTP_HEADER in self.eventDict:
if REQ_HTTP_HEADER_COOKIE in self.eventDict[REQ_HTTP_HEADER]:
return self.eventDict[REQ_HTTP_HEADER][REQ_HTTP_HEADER_COOKIE]
def getQueryParams(self, inpList):
retDict = {}
if REQ_QUERY_STR_PARAM in self.eventDict:
for eachParam in inpList:
if eachParam in self.eventDict[REQ_QUERY_STR_PARAM]:
retDict[eachParam] = self.eventDict[REQ_QUERY_STR_PARAM][eachParam]
return retDict
def getAllQueryParam(self):
retDict = {}
for eachParam in self.eventDict[REQ_QUERY_STR_PARAM]:
self.eventDict[eachParam] = self.eventDict[REQ_QUERY_STR_PARAM][eachParam]
return retDict
def isBase64(self):
if REQ_IS_BASE64 in self.eventDict:
return self.eventDict[REQ_IS_BASE64]
def getBody(self):
if REQ_BODY in self.eventDict:
return self.eventDict[REQ_BODY]
def getHeaderParm(self, parmList):
retDict = {}
# print("header:", self.eventDict[REQ_HTTP_HEADER])
if REQ_HTTP_HEADER in self.eventDict:
for eachInp in parmList:
if eachInp in self.eventDict[REQ_HTTP_HEADER]:
retDict[eachInp] = self.eventDict[REQ_HTTP_HEADER][eachInp]
return retDict
def main():
None
if __name__ == "__main__":
main()
|
PypiClean
|
/plbm_liumou_stable-1.4.0.tar.gz/plbm_liumou_stable-1.4.0/src/plbm_liumou_Stable/NewNetStatus.py
|
import socket
from os import path, getcwd
from sys import platform
from . import NewFileManagement, NewCommand, get
from .logger import ColorLogger
class NewNetworkCardInfo:
def __init__(self, eth=None, debug=False):
"""
获取本地网卡信息
:param eth: 设置网卡名,当不设置的时候则自动检测
"""
# Dns地址列表
self.dns = None
self.debug = debug
self.eth = eth
self.os = platform.lower()
self.linux = False
if self.os.lower() == 'linux'.lower():
self.linux = True
# 设置网关地址
self.gw = None
# 设置IP地址
self.ip = None
# 设置子网信息
self.sub = None
# 设置mac地址
self.mac = None
# 设置子网掩码
self.mask = 24
# 设置连接名称
self.connect = None
# 连接速率
self.rate = None
self.cmd = NewCommand(password="pd")
self.logger = ColorLogger()
def show(self):
"""
显示网卡信息
:return:
"""
self.logger.info("Eth_self.eth :", self.eth)
self.logger.info("Gateway_self.gw : ", self.gw)
self.logger.info("IP_self.ip: ", self.ip)
self.logger.info("Subnet Mask_self.mask : ", self.mask)
self.logger.info("Dns List_self.dns: ", str(self.dns))
self.logger.info("Mac _self.mac : ", self.mac)
self.logger.info("Connect Rate_self.rate: ", self.rate)
self.logger.info("Connect Name_self.connect: ", self.connect)
def get_dev_list(self):
"""
获取网卡列表并检测网卡信息
:return:
"""
c = "nmcli device | awk '{print $1}'"
g = self.cmd.getout(cmd=c).split("\n")
if self.eth is not None:
if str(self.eth) in g:
self.logger.info("The set device name is in the existing list")
return True
else:
self.logger.warning("Device not found: ", self.eth)
self.logger.debug("Automatic detection will be used")
self.getip_request()
return False
def get_all(self):
"""
获取所有网卡信息
:return: 获取结果(bool)
"""
existence = self.get_dev_list()
if self.linux:
if not existence:
# 使用自动检测网卡信息
self.logger.debug("Use automatic network card detection")
try:
# 如果设置的网卡不存在,则使用自动的方式检测网卡信息
dev_ = str("""ip r | grep default | grep %s | awk '{print $5}'""" % self.sub)
# 获取网卡名称
self.eth = self.cmd.getout(cmd=dev_)
except Exception as e:
print(e)
return False
else:
# 检测指定网卡信息
self.logger.debug("Detect the specified network card information")
if not self.getip_dev():
self.logger.error("Query failed")
return False
else:
self.logger.error("当前设备不是Linux")
return False
# 获取连接参数
connect_arg_ = "nmcli device show %s | grep IP4" % self.eth
connect_arg = self.cmd.getout(connect_arg_).split("\n")
# 子网掩码
self.mask = int(str(connect_arg[0]).split("/")[1])
# 网关查询命令
self.gw = connect_arg[1]
# 设备信息
search_ = """nmcli device show %s | grep GENERAL | awk '{print $2}'""" % self.eth
search_info = self.cmd.getout(cmd=search_).split("\n")
# Mac地址
self.mac = search_info[2]
# 连接速率
self.rate = search_info[4]
# 连接名称
self.connect = search_info[5]
# Dns列表
d_ = """nmcli device show %s | grep IP4 | grep DNS | awk '{print $2}'""" % self.eth
self.dns = str(self.cmd.shell(cmd=d_)).split('\n')
self.show()
def getip_dev(self):
"""
使用指定设备的方式获取IP
:return:
"""
c = "nmcli device show %s | grep IP4 | sed -n 1p | awk '{print $2}'" % self.eth
info = self.cmd.getout(c)
# info 会得到这样的数据: 10.16.17.103/24
if self.cmd.code == 0:
self.logger.debug("query was successful")
ni = str(info).split("/")
self.ip = ni[0]
self.mask = ni[1]
return True
else:
self.logger.error("Query failed. Please check the connection status of the network card")
return False
def getip_request(self):
"""
使用网络请求的方式获取IP
:return:
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('119.29.29.29', 53))
(addr, port) = csock.getsockname()
csock.close()
self.ip = addr
tu = str(self.ip).split('.')
self.sub = str("%s.%s.%s." % (tu[0], tu[1], tu[2]))
return True
except Exception as e:
self.logger.error(str(e))
return False
class NewNetStatus:
def __init__(self, ip=None, port=80, log_file=None, txt_log=False, debug=True):
"""
网络工具,用于判断网络是否正常
:param ip: 需要判断的IP
:param port: 需要判断的端口. Defaults to None.
:param log_file: 日志文件
:param txt_log: 是否开启文本日志
"""
self.debug = debug
self.ip = ip
self.port = port
self.status = False
#
self.headers = {}
self.headers = get.headers
self.cmd = NewCommand(password='Gxxc@123')
self.fm = NewFileManagement()
self.logger = ColorLogger(file=log_file, txt=txt_log)
def ping_status(self, server=None):
"""
使用ping检测网络连接
:param server: 设置服务器地址. Defaults to self.ip.
:return:
"""
self.status = False
if server is None:
server = self.ip
self.logger.info('正在检测: %s' % server)
cmd = 'ping %s -c 5' % server
if platform.lower() == 'win32':
cmd = 'ping %s ' % server
if self.cmd.shell(cmd=cmd):
self.logger.info("Ping 连接成功: %s" % server)
self.status = True
else:
self.logger.error("Ping 连接失败: %s" % server)
return self.status
def downfile(self, url, filename=None, cover=False, md5=None):
"""
下载文件
:param url: 下载链接
:param filename: 保存文件名,默认当前目录下以URL最后一组作为文件名保存
:param cover: 是否覆盖已有文件. Defaults to False.
:param md5: 检查下载文件MD5值
:return: 下载结果(bool)
"""
if filename is None:
filename = str(url).split("/")[-1]
filename = path.join(getcwd(), filename)
filename = path.abspath(filename)
if path.exists(filename):
if not cover:
self.logger.info("检测到已存在路径: %s" % filename)
self.logger.info("放弃下载: %s" % url)
return True
self.logger.debug("检测到已存在路径,正在删除...")
c = 'rm -rf ' + filename
if self.cmd.shell(cmd=c):
self.logger.info("删除成功: %s" % filename)
else:
self.logger.warning("删除失败,跳过下载")
return False
c = str("wget -c -O %s %s" % (filename, url))
self.cmd.shell(cmd=c, terminal=False)
if int(self.cmd.code) == 0:
self.logger.info("下载成功: %s" % filename)
if md5:
get_ = self.fm.get_md5(filename=filename)
if get_:
if str(md5).lower() == str(self.fm.md5).lower():
return True
else:
return False
return True
self.logger.error("下载失败: %s" % filename)
self.logger.error("下载链接: ", url)
self.logger.error("保存路径: ", filename)
return False
def tcp_port_status(self, port, timeout=5, ip=None):
"""
检测TCP端口是否开放
:param ip:
:param port:
:param timeout:
:return:
"""
if ip is None:
if self.ip is None:
self.logger.error("未设置对端IP地址")
else:
self.logger.debug("使用实例配置IP进行检测")
ip = self.ip
else:
self.logger.debug("使用函数传入IP")
self.logger.info("正在检测地址: [", ip, "] - 端口: [ ", port, " ]")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
try:
sock.connect((self.ip, port))
return True
except socket.error:
return False
if __name__ == "__main__":
up = NewNetStatus()
up.ping_status(server='baidu.com')
|
PypiClean
|
/teste_gces-0.2.7.tar.gz/teste_gces-0.2.7/README.md
|
# Trabalho individual de GCES 2022-2
Os conhecimentos de Gestão de Configuração de Software são fundamentais no ciclo de vida de um produto de software. As técnicas para a gestão vão desde o controle de versão, automação de build e de configuração de ambiente, testes automatizados, isolamento do ambiente até o deploy do sistema. Todo este ciclo nos dias de hoje são integrados em um pipeline de DevOps com as etapas de Integração Contínua (CI) e Deploy Contínuo (CD) implementadas e automatizada.
Para exercitar estes conhecimentos, neste trabalho, você deverá aplicar os conceitos estudados ao longo da disciplina no produto de software contido neste repositório.
O sistema se trata de uma biblioteca python para executar pipelines de dados de forma customizável em bancos de dados.
Para executar a aplicação em sua máquina, basta seguir o passo-a-passo descritos abaixo.
# Resumo da aplicação
A biblioteca desenvolvida auxilia desenvolvedores a explorar os dados com funções essenciais para a identificação de outliers e anomalias e uma interface que auxilia a visualizar as informações de acordo com o arquivo de configuração.
A biblioteca recebe um arquivo yaml com as configurações de cada etapa do pipeline de dados, e do endereço do banco de dados.
Após a execução do banco de dados, o banco de dados de dados é atualizado com os resultados da análise e os resultados podem ser visualizados por meio de dashboards no metabase.
# Etapas do Trabalho
O trabalho deve ser elaborado através de etapas. Cada uma das etapas deve ser realizada em um commit separado com o resultado funcional desta etapa.
As etapas de 1 a 3 são relacionadas ao isolamento do ambiente utilizando a ferramenta Docker e Docker Compose. Neste sentido o tutorial abaixo cobre os conceitos fundamentais para o uso destas tecnologias.
[Tutorial de Docker](https://github.com/FGA-GCES/Workshop-Docker-Entrega-01/tree/main/tutorial_docker)
As etapas de 4 e 5 são relacionadas à configuração do pipeline de CI e CD.
[Tutorial CI - Gitlab](https://github.com/FGA-GCES/Workshop-CI-Entrega-02/tree/main/gitlab-ci_tutorial)
## Containerização do Banco
A versão inicial do sistema contém o metabase no backend cujo funcionamento requer uma instalação de um banco de dados Mongo. A primeira etapa do trabalho é de configurar um container somente para o banco de dados com as credenciais especificadas na descrição da aplicação e testar o funcionamento do mesmo.
## Containerização da aplicação + metabase
Nesta etapa, tanto o a aplicação python quanto o metabase/banco deverão estar funcionando em containers individuais.
Deverá ser utilizado um orquestrador (Docker Compose) para gerenciar comunicação entre os containers além do uso de credenciais, networks, volumes, entre outras configurações necessárias para a correta execução da aplicação.
## Gestão de dependencias e pacotes python
Configurar o gerenciador de dependencias e pacotes python, o poetry, para gerar um pacote pip da solução. Publicar a biblioteca
https://python-poetry.org
## Documentação automatizada
Gerar a documentação da biblioteca de forma automatizada utilizando o doxygen para gerar informacoes da biblioteca e o sphinx para criar documentação https://www.sphinx-doc.org
## Integração Contínua (CI)
Para a realização desta etapa, a aplicação já deverá ter seu ambiente completamente containerizado.
Deverá ser utilizada uma ferramenta de Integração Contínua para garantir o build, os testes e o deploy para o https://pypi.org .
Esta etapa do trabalho poderá ser realizada utilizado os ambientes de CI do GitLab-CI ou Github Actions.
Requisitos da configuração da Integração Contínua (Gitlab ou Github) incluem:
Build (Poetry)
Test - unitários
Lint -
Documentação (sphinx)
## Avaliação
A avaliação do trabalho será feita à partir da correta implementação de cada etapa. A avaliação será feita de maneira **quantitativa** (se foi realizado a implementação + documentação), e **qualitativa** (como foi implementado, entendimento dos conceitos na prática, complexidade da solução). Para isso, faça os **commits atômicos, bem documentados, completos** a fim de facilitar o entendimento e avaliação do seu trabalho. Lembrando o trabalho é individual.
**Observações**:
1. A data final de entrega do trabalho é o dia 28/01/2023;
2. O trabalho deve ser desenvolvido em um **repositório PESSOAL e PRIVADO** que deverá ser tornado público somente após a data de entrega do trabalho (no dia 28/01/2023);
3. Cada etapa do trabalho deverá ser entregue em commits progressivos (pendendo ser mais de um commit por etapa);
4. Os **commits devem estar espaçados em dias ao longo do desenvolvimento do trabalho**. Commits feitos todos juntos na data de entrega não serão descontados da nota final.
| Item | Peso |
|---|---|
| 1. Containerização do Banco | 1.0 |
| 2. Containerização da biblioteca + Banco | 1.5 |
| 3. Publicação da biblioteca | 1.5 |
| 4. Documentação automatiza | 1.5 |
| 5. Integração Contínua (Build, Test, Lint, documentacao) | 3.0 |
| 6. Deploy Contínuo | 1.5 |
## Exemplo de Trabalhos Anteriores
Alguns trabalhos de trabalhos anteriores:
- [2020/2](https://github.com/FGA-GCES/Trabalho-Individual-2020-2)
- [2021/1](https://github.com/FGA-GCES/Workshop-Docker-Entrega-01)
- [2021/2](https://github.com/FGA-GCES/Trabalho-Individual-2021-2)
### Requisitos de instação
```
python -m venv env
source env/bin/activate
pip install -r requirements.txt
```
### Rodando a aplicação
```
python src/main.py
```
### Testando
```
pytest --cov
```
### Metabase
O metabase ajuda a visualizar e a modelar o processamento dos dados, a engenharia de features e monitoramento do modelo.
| Keywords | Descrição |
|-----------|-------------|
| CSV | Um arquivo CSV é um arquivo de texto simples que armazena informações de tabelas e planilhas. Os arquivos CSV podem ser facilmente importados e exportados usando programas que armazenam dados em tabelas.|
| Collection (coleção)| Uma coleção é um agrupamento de documentos do MongoDB. Os documentos dentro de uma coleção podem ter campos diferentes. Uma coleção é o equivalente a uma tabela em um sistema de banco de dados relacional.|
| Database | Um banco de dados armazena uma ou mais coleções de documentos.|
| Mongo| É um banco de dados NoSQL desenvolvido pela MongoDB Inc. O banco de dados MongoDB foi criado para armazenar uma grande quantidade de dados e também executar rapidamente.|
**Connect the database to the metabase**
- step 1: Open localhost:3000
- step 2: Click Admin setting
- step 3: Click Database
- step 4: Adicione os dados de autenticação de banco de dados
**Exemplo da conexão mongo metabase**
| metabase | credential |
|------------|-------------|
| host | mongo |
|dabase_name | use the name you define in make migrate|
| user | lappis |
| password | lappis |
|
PypiClean
|
/pulumi_fortios-0.0.9.tar.gz/pulumi_fortios-0.0.9/pulumi_fortios/wireless_controller_hotspot20_h2_qp_wan_metric.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['WirelessControllerHotspot20H2QpWanMetricArgs', 'WirelessControllerHotspot20H2QpWanMetric']
@pulumi.input_type
class WirelessControllerHotspot20H2QpWanMetricArgs:
def __init__(__self__, *,
downlink_load: Optional[pulumi.Input[int]] = None,
downlink_speed: Optional[pulumi.Input[int]] = None,
link_at_capacity: Optional[pulumi.Input[str]] = None,
link_status: Optional[pulumi.Input[str]] = None,
load_measurement_duration: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
symmetric_wan_link: Optional[pulumi.Input[str]] = None,
uplink_load: Optional[pulumi.Input[int]] = None,
uplink_speed: Optional[pulumi.Input[int]] = None,
vdomparam: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WirelessControllerHotspot20H2QpWanMetric resource.
"""
if downlink_load is not None:
pulumi.set(__self__, "downlink_load", downlink_load)
if downlink_speed is not None:
pulumi.set(__self__, "downlink_speed", downlink_speed)
if link_at_capacity is not None:
pulumi.set(__self__, "link_at_capacity", link_at_capacity)
if link_status is not None:
pulumi.set(__self__, "link_status", link_status)
if load_measurement_duration is not None:
pulumi.set(__self__, "load_measurement_duration", load_measurement_duration)
if name is not None:
pulumi.set(__self__, "name", name)
if symmetric_wan_link is not None:
pulumi.set(__self__, "symmetric_wan_link", symmetric_wan_link)
if uplink_load is not None:
pulumi.set(__self__, "uplink_load", uplink_load)
if uplink_speed is not None:
pulumi.set(__self__, "uplink_speed", uplink_speed)
if vdomparam is not None:
pulumi.set(__self__, "vdomparam", vdomparam)
@property
@pulumi.getter(name="downlinkLoad")
def downlink_load(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "downlink_load")
@downlink_load.setter
def downlink_load(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "downlink_load", value)
@property
@pulumi.getter(name="downlinkSpeed")
def downlink_speed(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "downlink_speed")
@downlink_speed.setter
def downlink_speed(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "downlink_speed", value)
@property
@pulumi.getter(name="linkAtCapacity")
def link_at_capacity(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "link_at_capacity")
@link_at_capacity.setter
def link_at_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "link_at_capacity", value)
@property
@pulumi.getter(name="linkStatus")
def link_status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "link_status")
@link_status.setter
def link_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "link_status", value)
@property
@pulumi.getter(name="loadMeasurementDuration")
def load_measurement_duration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "load_measurement_duration")
@load_measurement_duration.setter
def load_measurement_duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "load_measurement_duration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="symmetricWanLink")
def symmetric_wan_link(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "symmetric_wan_link")
@symmetric_wan_link.setter
def symmetric_wan_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "symmetric_wan_link", value)
@property
@pulumi.getter(name="uplinkLoad")
def uplink_load(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "uplink_load")
@uplink_load.setter
def uplink_load(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "uplink_load", value)
@property
@pulumi.getter(name="uplinkSpeed")
def uplink_speed(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "uplink_speed")
@uplink_speed.setter
def uplink_speed(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "uplink_speed", value)
@property
@pulumi.getter
def vdomparam(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "vdomparam")
@vdomparam.setter
def vdomparam(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vdomparam", value)
@pulumi.input_type
class _WirelessControllerHotspot20H2QpWanMetricState:
def __init__(__self__, *,
downlink_load: Optional[pulumi.Input[int]] = None,
downlink_speed: Optional[pulumi.Input[int]] = None,
link_at_capacity: Optional[pulumi.Input[str]] = None,
link_status: Optional[pulumi.Input[str]] = None,
load_measurement_duration: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
symmetric_wan_link: Optional[pulumi.Input[str]] = None,
uplink_load: Optional[pulumi.Input[int]] = None,
uplink_speed: Optional[pulumi.Input[int]] = None,
vdomparam: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering WirelessControllerHotspot20H2QpWanMetric resources.
"""
if downlink_load is not None:
pulumi.set(__self__, "downlink_load", downlink_load)
if downlink_speed is not None:
pulumi.set(__self__, "downlink_speed", downlink_speed)
if link_at_capacity is not None:
pulumi.set(__self__, "link_at_capacity", link_at_capacity)
if link_status is not None:
pulumi.set(__self__, "link_status", link_status)
if load_measurement_duration is not None:
pulumi.set(__self__, "load_measurement_duration", load_measurement_duration)
if name is not None:
pulumi.set(__self__, "name", name)
if symmetric_wan_link is not None:
pulumi.set(__self__, "symmetric_wan_link", symmetric_wan_link)
if uplink_load is not None:
pulumi.set(__self__, "uplink_load", uplink_load)
if uplink_speed is not None:
pulumi.set(__self__, "uplink_speed", uplink_speed)
if vdomparam is not None:
pulumi.set(__self__, "vdomparam", vdomparam)
@property
@pulumi.getter(name="downlinkLoad")
def downlink_load(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "downlink_load")
@downlink_load.setter
def downlink_load(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "downlink_load", value)
@property
@pulumi.getter(name="downlinkSpeed")
def downlink_speed(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "downlink_speed")
@downlink_speed.setter
def downlink_speed(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "downlink_speed", value)
@property
@pulumi.getter(name="linkAtCapacity")
def link_at_capacity(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "link_at_capacity")
@link_at_capacity.setter
def link_at_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "link_at_capacity", value)
@property
@pulumi.getter(name="linkStatus")
def link_status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "link_status")
@link_status.setter
def link_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "link_status", value)
@property
@pulumi.getter(name="loadMeasurementDuration")
def load_measurement_duration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "load_measurement_duration")
@load_measurement_duration.setter
def load_measurement_duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "load_measurement_duration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="symmetricWanLink")
def symmetric_wan_link(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "symmetric_wan_link")
@symmetric_wan_link.setter
def symmetric_wan_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "symmetric_wan_link", value)
@property
@pulumi.getter(name="uplinkLoad")
def uplink_load(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "uplink_load")
@uplink_load.setter
def uplink_load(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "uplink_load", value)
@property
@pulumi.getter(name="uplinkSpeed")
def uplink_speed(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "uplink_speed")
@uplink_speed.setter
def uplink_speed(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "uplink_speed", value)
@property
@pulumi.getter
def vdomparam(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "vdomparam")
@vdomparam.setter
def vdomparam(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vdomparam", value)
class WirelessControllerHotspot20H2QpWanMetric(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
downlink_load: Optional[pulumi.Input[int]] = None,
downlink_speed: Optional[pulumi.Input[int]] = None,
link_at_capacity: Optional[pulumi.Input[str]] = None,
link_status: Optional[pulumi.Input[str]] = None,
load_measurement_duration: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
symmetric_wan_link: Optional[pulumi.Input[str]] = None,
uplink_load: Optional[pulumi.Input[int]] = None,
uplink_speed: Optional[pulumi.Input[int]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a WirelessControllerHotspot20H2QpWanMetric resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[WirelessControllerHotspot20H2QpWanMetricArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a WirelessControllerHotspot20H2QpWanMetric resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param WirelessControllerHotspot20H2QpWanMetricArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WirelessControllerHotspot20H2QpWanMetricArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
downlink_load: Optional[pulumi.Input[int]] = None,
downlink_speed: Optional[pulumi.Input[int]] = None,
link_at_capacity: Optional[pulumi.Input[str]] = None,
link_status: Optional[pulumi.Input[str]] = None,
load_measurement_duration: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
symmetric_wan_link: Optional[pulumi.Input[str]] = None,
uplink_load: Optional[pulumi.Input[int]] = None,
uplink_speed: Optional[pulumi.Input[int]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WirelessControllerHotspot20H2QpWanMetricArgs.__new__(WirelessControllerHotspot20H2QpWanMetricArgs)
__props__.__dict__["downlink_load"] = downlink_load
__props__.__dict__["downlink_speed"] = downlink_speed
__props__.__dict__["link_at_capacity"] = link_at_capacity
__props__.__dict__["link_status"] = link_status
__props__.__dict__["load_measurement_duration"] = load_measurement_duration
__props__.__dict__["name"] = name
__props__.__dict__["symmetric_wan_link"] = symmetric_wan_link
__props__.__dict__["uplink_load"] = uplink_load
__props__.__dict__["uplink_speed"] = uplink_speed
__props__.__dict__["vdomparam"] = vdomparam
super(WirelessControllerHotspot20H2QpWanMetric, __self__).__init__(
'fortios:index/wirelessControllerHotspot20H2QpWanMetric:WirelessControllerHotspot20H2QpWanMetric',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
downlink_load: Optional[pulumi.Input[int]] = None,
downlink_speed: Optional[pulumi.Input[int]] = None,
link_at_capacity: Optional[pulumi.Input[str]] = None,
link_status: Optional[pulumi.Input[str]] = None,
load_measurement_duration: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
symmetric_wan_link: Optional[pulumi.Input[str]] = None,
uplink_load: Optional[pulumi.Input[int]] = None,
uplink_speed: Optional[pulumi.Input[int]] = None,
vdomparam: Optional[pulumi.Input[str]] = None) -> 'WirelessControllerHotspot20H2QpWanMetric':
"""
Get an existing WirelessControllerHotspot20H2QpWanMetric resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WirelessControllerHotspot20H2QpWanMetricState.__new__(_WirelessControllerHotspot20H2QpWanMetricState)
__props__.__dict__["downlink_load"] = downlink_load
__props__.__dict__["downlink_speed"] = downlink_speed
__props__.__dict__["link_at_capacity"] = link_at_capacity
__props__.__dict__["link_status"] = link_status
__props__.__dict__["load_measurement_duration"] = load_measurement_duration
__props__.__dict__["name"] = name
__props__.__dict__["symmetric_wan_link"] = symmetric_wan_link
__props__.__dict__["uplink_load"] = uplink_load
__props__.__dict__["uplink_speed"] = uplink_speed
__props__.__dict__["vdomparam"] = vdomparam
return WirelessControllerHotspot20H2QpWanMetric(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="downlinkLoad")
def downlink_load(self) -> pulumi.Output[int]:
return pulumi.get(self, "downlink_load")
@property
@pulumi.getter(name="downlinkSpeed")
def downlink_speed(self) -> pulumi.Output[int]:
return pulumi.get(self, "downlink_speed")
@property
@pulumi.getter(name="linkAtCapacity")
def link_at_capacity(self) -> pulumi.Output[str]:
return pulumi.get(self, "link_at_capacity")
@property
@pulumi.getter(name="linkStatus")
def link_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "link_status")
@property
@pulumi.getter(name="loadMeasurementDuration")
def load_measurement_duration(self) -> pulumi.Output[int]:
return pulumi.get(self, "load_measurement_duration")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="symmetricWanLink")
def symmetric_wan_link(self) -> pulumi.Output[str]:
return pulumi.get(self, "symmetric_wan_link")
@property
@pulumi.getter(name="uplinkLoad")
def uplink_load(self) -> pulumi.Output[int]:
return pulumi.get(self, "uplink_load")
@property
@pulumi.getter(name="uplinkSpeed")
def uplink_speed(self) -> pulumi.Output[int]:
return pulumi.get(self, "uplink_speed")
@property
@pulumi.getter
def vdomparam(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "vdomparam")
|
PypiClean
|
/django-whatever-0.3.1.tar.gz/django-whatever-0.3.1/README.rst
|
Unobtrusive test models creation for django
Ivelum's repo is a friendly fork of the ``django-any`` package written by
Mikhail Podgurskiy (kmmbvnr) and the original ``django-whatever`` repository
created and maintained by Ilya Baryshev (coagulant)
The purpose of the fork is to fix most annoying bugs and add `some features <http://django-whatever.readthedocs.org/en/latest/changelog.html>`_
To remain compatible with original package ``django-whatever`` retains same namespace: ``django_any``.
``django-whatever`` is explicit replacement for old-style, big and error-prone
implicit fixture files.
``django-whatever`` allows you to specify only fields important for tests
and fills the rest randomly with acceptable values.
It makes tests clean and easy to understand, without reading fixture files.::
from django_any import any_model
class TestMyShop(TestCase):
def test_order_updates_user_account(self):
account = any_model(Account, amount=25, user__is_active=True)
order = any_model(Order, user=account.user, amount=10)
order.proceed()
account = Account.objects.get(pk=account.pk)
self.assertEquals(15, account.amount)
Read more at the docs: http://django-whatever.readthedocs.org/
|
PypiClean
|
/berval_distributions-1.2.tar.gz/berval_distributions-1.2/distributions/Gaussiandistribution.py
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
PypiClean
|
/aws-orbit-1.4.0.tar.gz/aws-orbit-1.4.0/aws_orbit/plugins/helpers.py
|
import base64
import logging
import os
import pickle
import shutil
import sys
from typing import TYPE_CHECKING, Any, Dict, List, Type, cast
from aws_orbit import cdk, sh
from aws_orbit.models.context import Context, ContextSerDe, TeamContext
from aws_orbit.services import cfn
if TYPE_CHECKING:
from aws_cdk.core import Stack
_logger: logging.Logger = logging.getLogger(__name__)
def _serialize_parameters(parameters: Dict[str, Any]) -> str:
pickled: bytes = pickle.dumps(obj=parameters)
return base64.b64encode(pickled).decode("utf-8")
def _deserialize_parameters(parameters: str) -> Dict[str, Any]:
data: bytes = base64.b64decode(parameters.encode("utf-8"))
return cast(Dict[str, Any], pickle.loads(data))
def cdk_handler(stack_class: Type["Stack"]) -> None:
_logger.debug("sys.argv: %s", sys.argv)
if len(sys.argv) != 5:
raise ValueError(f"Unexpected number of values in sys.argv ({len(sys.argv)}) - {sys.argv}.")
stack_name: str = sys.argv[1]
team_name: str = sys.argv[3]
parameters: Dict[str, Any] = _deserialize_parameters(parameters=sys.argv[4])
context: "Context" = ContextSerDe.load_context_from_ssm(env_name=sys.argv[2], type=Context)
team_context = context.get_team_by_name(name=team_name)
if team_context is None:
raise ValueError(f"Team {team_name} not found in the context.")
outdir = os.path.join(
".orbit.out",
context.name,
"cdk",
stack_name,
)
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
# Can't be imported globally because we only have CDK installed on CodeBuild
from aws_cdk.core import App
app = App(outdir=outdir)
stack_class(app, stack_name, context, team_context, parameters) # type: ignore
app.synth(force=True)
def cdk_prep_team_handler(stack_class: Type["Stack"]) -> None:
_logger.debug("sys.argv: %s", sys.argv)
if len(sys.argv) != 5:
raise ValueError(f"Unexpected number of values in sys.argv ({len(sys.argv)}) - {sys.argv}.")
stack_name: str = sys.argv[1]
# team_name: str = sys.argv[3]
parameters: Dict[str, Any] = _deserialize_parameters(parameters=sys.argv[4])
context: "Context" = ContextSerDe.load_context_from_ssm(env_name=sys.argv[2], type=Context)
# Can not find /orbit/env_name/teams ssm param.
# team_context = context.get_team_by_name(name=team_name)
# if team_context is None:
# raise ValueError(f"Team {team_name} not found in the context.")
outdir = os.path.join(
".orbit.out",
context.name,
"cdk",
stack_name,
)
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir, exist_ok=True)
# Can't be imported globally because we only have CDK installed on CodeBuild
from aws_cdk.core import App
app = App(outdir=outdir)
stack_class(app, stack_name, context, parameters) # type: ignore
app.synth(force=True)
def cdk_deploy(
stack_name: str,
app_filename: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
if context.cdk_toolkit.stack_name is None:
raise ValueError(f"context.cdk_toolkit_stack_name: {context.cdk_toolkit.stack_name}")
args: List[str] = [stack_name, context.name, team_context.name, _serialize_parameters(parameters=parameters)]
cmd: str = (
"cdk deploy --require-approval never --progress events "
f"--toolkit-stack-name {context.cdk_toolkit.stack_name} "
f"{cdk.get_app_argument(app_filename, args)} "
f"{cdk.get_output_argument(context, stack_name)}"
)
sh.run(cmd=cmd)
def cdk_destroy(
stack_name: str,
app_filename: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
if cfn.does_stack_exist(stack_name=stack_name) is False:
_logger.debug("Skipping CDK destroy for %s, because the stack was not found.", stack_name)
return
if context.cdk_toolkit.stack_name is None:
raise ValueError(f"context.cdk_toolkit_stack_name: {context.cdk_toolkit.stack_name}")
args: List[str] = [stack_name, context.name, team_context.name, _serialize_parameters(parameters=parameters)]
cmd: str = (
"cdk destroy --force "
f"--toolkit-stack-name {context.cdk_toolkit.stack_name} "
f"{cdk.get_app_argument(app_filename, args)} "
f"{cdk.get_output_argument(context, stack_name)}"
)
sh.run(cmd=cmd)
|
PypiClean
|
/ais_dom_frontend-20230831.0-py3-none-any.whl/hass_frontend/frontend_es5/37351-269NJCsakwI.js
|
"use strict";(self.webpackChunkhome_assistant_frontend=self.webpackChunkhome_assistant_frontend||[]).push([[37351,4600,47243,71454,19612,56700,19044,46857],{58014:function(e,r,t){function i(e,r){if(e.closest)return e.closest(r);for(var t=e;t;){if(o(t,r))return t;t=t.parentElement}return null}function o(e,r){return(e.matches||e.webkitMatchesSelector||e.msMatchesSelector).call(e,r)}t.d(r,{oq:function(){return i},wB:function(){return o}})},53918:function(e,r,t){t.r(r),t.d(r,{Button:function(){return p}});var i=t(33368),o=t(71650),c=t(69205),n=t(70906),a=t(87480),s=t(79932),d=t(3071),l=t(3712),p=function(e){(0,c.Z)(t,e);var r=(0,n.Z)(t);function t(){return(0,o.Z)(this,t),r.apply(this,arguments)}return(0,i.Z)(t)}(d.X);p.styles=[l.W],p=(0,a.__decorate)([(0,s.Mo)("mwc-button")],p)},30879:function(e,r,t){t.d(r,{D:function(){return z}});var i,o,c,n,a,s=t(33368),d=t(71650),l=t(69205),p=t(70906),u=t(87480),m=t(79932),h=t(88962),g=t(34541),b=t(47838),f=t(38103),_=t(68144),y=t(83448),v=t(30153),k=t(47501),x=function(e){(0,l.Z)(t,e);var r=(0,p.Z)(t);function t(){var e;return(0,d.Z)(this,t),(e=r.apply(this,arguments)).indeterminate=!1,e.progress=0,e.density=0,e.closed=!1,e}return(0,s.Z)(t,[{key:"open",value:function(){this.closed=!1}},{key:"close",value:function(){this.closed=!0}},{key:"render",value:function(){var e={"mdc-circular-progress--closed":this.closed,"mdc-circular-progress--indeterminate":this.indeterminate},r=48+4*this.density,t={width:"".concat(r,"px"),height:"".concat(r,"px")};return(0,_.dy)(i||(i=(0,h.Z)([' <div class="mdc-circular-progress ','" style="','" role="progressbar" aria-label="','" aria-valuemin="0" aria-valuemax="1" aria-valuenow="','"> '," "," </div>"])),(0,y.$)(e),(0,k.V)(t),(0,v.o)(this.ariaLabel),(0,v.o)(this.indeterminate?void 0:this.progress),this.renderDeterminateContainer(),this.renderIndeterminateContainer())}},{key:"renderDeterminateContainer",value:function(){var e=48+4*this.density,r=e/2,t=this.density>=-3?18+11*this.density/6:12.5+5*(this.density+3)/4,i=6.2831852*t,c=(1-this.progress)*i,n=this.density>=-3?4+this.density*(1/3):3+(this.density+3)*(1/6);return(0,_.dy)(o||(o=(0,h.Z)([' <div class="mdc-circular-progress__determinate-container"> <svg class="mdc-circular-progress__determinate-circle-graphic" viewBox="0 0 '," ",'"> <circle class="mdc-circular-progress__determinate-track" cx="','" cy="','" r="','" stroke-width="','"></circle> <circle class="mdc-circular-progress__determinate-circle" cx="','" cy="','" r="','" stroke-dasharray="','" stroke-dashoffset="','" stroke-width="','"></circle> </svg> </div>'])),e,e,r,r,t,n,r,r,t,6.2831852*t,c,n)}},{key:"renderIndeterminateContainer",value:function(){return(0,_.dy)(c||(c=(0,h.Z)([' <div class="mdc-circular-progress__indeterminate-container"> <div class="mdc-circular-progress__spinner-layer"> '," </div> </div>"])),this.renderIndeterminateSpinnerLayer())}},{key:"renderIndeterminateSpinnerLayer",value:function(){var e=48+4*this.density,r=e/2,t=this.density>=-3?18+11*this.density/6:12.5+5*(this.density+3)/4,i=6.2831852*t,o=.5*i,c=this.density>=-3?4+this.density*(1/3):3+(this.density+3)*(1/6);return(0,_.dy)(n||(n=(0,h.Z)([' <div class="mdc-circular-progress__circle-clipper mdc-circular-progress__circle-left"> <svg class="mdc-circular-progress__indeterminate-circle-graphic" viewBox="0 0 '," ",'"> <circle cx="','" cy="','" r="','" stroke-dasharray="','" stroke-dashoffset="','" stroke-width="','"></circle> </svg> </div> <div class="mdc-circular-progress__gap-patch"> <svg class="mdc-circular-progress__indeterminate-circle-graphic" viewBox="0 0 '," ",'"> <circle cx="','" cy="','" r="','" stroke-dasharray="','" stroke-dashoffset="','" stroke-width="','"></circle> </svg> </div> <div class="mdc-circular-progress__circle-clipper mdc-circular-progress__circle-right"> <svg class="mdc-circular-progress__indeterminate-circle-graphic" viewBox="0 0 '," ",'"> <circle cx="','" cy="','" r="','" stroke-dasharray="','" stroke-dashoffset="','" stroke-width="','"></circle> </svg> </div>'])),e,e,r,r,t,i,o,c,e,e,r,r,t,i,o,.8*c,e,e,r,r,t,i,o,c)}},{key:"update",value:function(e){(0,g.Z)((0,b.Z)(t.prototype),"update",this).call(this,e),e.has("progress")&&(this.progress>1&&(this.progress=1),this.progress<0&&(this.progress=0))}}]),t}(_.oi);(0,u.__decorate)([(0,m.Cb)({type:Boolean,reflect:!0})],x.prototype,"indeterminate",void 0),(0,u.__decorate)([(0,m.Cb)({type:Number,reflect:!0})],x.prototype,"progress",void 0),(0,u.__decorate)([(0,m.Cb)({type:Number,reflect:!0})],x.prototype,"density",void 0),(0,u.__decorate)([(0,m.Cb)({type:Boolean,reflect:!0})],x.prototype,"closed",void 0),(0,u.__decorate)([f.L,(0,m.Cb)({type:String,attribute:"aria-label"})],x.prototype,"ariaLabel",void 0);var w=(0,_.iv)(a||(a=(0,h.Z)([".mdc-circular-progress__determinate-circle,.mdc-circular-progress__indeterminate-circle-graphic{stroke:#6200ee;stroke:var(--mdc-theme-primary,#6200ee)}.mdc-circular-progress__determinate-track{stroke:transparent}@keyframes mdc-circular-progress-container-rotate{to{transform:rotate(360deg)}}@keyframes mdc-circular-progress-spinner-layer-rotate{12.5%{transform:rotate(135deg)}25%{transform:rotate(270deg)}37.5%{transform:rotate(405deg)}50%{transform:rotate(540deg)}62.5%{transform:rotate(675deg)}75%{transform:rotate(810deg)}87.5%{transform:rotate(945deg)}100%{transform:rotate(1080deg)}}@keyframes mdc-circular-progress-color-1-fade-in-out{from{opacity:.99}25%{opacity:.99}26%{opacity:0}89%{opacity:0}90%{opacity:.99}to{opacity:.99}}@keyframes mdc-circular-progress-color-2-fade-in-out{from{opacity:0}15%{opacity:0}25%{opacity:.99}50%{opacity:.99}51%{opacity:0}to{opacity:0}}@keyframes mdc-circular-progress-color-3-fade-in-out{from{opacity:0}40%{opacity:0}50%{opacity:.99}75%{opacity:.99}76%{opacity:0}to{opacity:0}}@keyframes mdc-circular-progress-color-4-fade-in-out{from{opacity:0}65%{opacity:0}75%{opacity:.99}90%{opacity:.99}to{opacity:0}}@keyframes mdc-circular-progress-left-spin{from{transform:rotate(265deg)}50%{transform:rotate(130deg)}to{transform:rotate(265deg)}}@keyframes mdc-circular-progress-right-spin{from{transform:rotate(-265deg)}50%{transform:rotate(-130deg)}to{transform:rotate(-265deg)}}.mdc-circular-progress{display:inline-flex;position:relative;direction:ltr;line-height:0;transition:opacity 250ms 0s cubic-bezier(.4,0,.6,1)}.mdc-circular-progress__determinate-container,.mdc-circular-progress__indeterminate-circle-graphic,.mdc-circular-progress__indeterminate-container,.mdc-circular-progress__spinner-layer{position:absolute;width:100%;height:100%}.mdc-circular-progress__determinate-container{transform:rotate(-90deg)}.mdc-circular-progress__indeterminate-container{font-size:0;letter-spacing:0;white-space:nowrap;opacity:0}.mdc-circular-progress__determinate-circle-graphic,.mdc-circular-progress__indeterminate-circle-graphic{fill:transparent}.mdc-circular-progress__determinate-circle{transition:stroke-dashoffset .5s 0s cubic-bezier(0,0,.2,1)}.mdc-circular-progress__gap-patch{position:absolute;top:0;left:47.5%;box-sizing:border-box;width:5%;height:100%;overflow:hidden}.mdc-circular-progress__gap-patch .mdc-circular-progress__indeterminate-circle-graphic{left:-900%;width:2000%;transform:rotate(180deg)}.mdc-circular-progress__circle-clipper{display:inline-flex;position:relative;width:50%;height:100%;overflow:hidden}.mdc-circular-progress__circle-clipper .mdc-circular-progress__indeterminate-circle-graphic{width:200%}.mdc-circular-progress__circle-right .mdc-circular-progress__indeterminate-circle-graphic{left:-100%}.mdc-circular-progress--indeterminate .mdc-circular-progress__determinate-container{opacity:0}.mdc-circular-progress--indeterminate .mdc-circular-progress__indeterminate-container{opacity:1}.mdc-circular-progress--indeterminate .mdc-circular-progress__indeterminate-container{animation:mdc-circular-progress-container-rotate 1.568s linear infinite}.mdc-circular-progress--indeterminate .mdc-circular-progress__spinner-layer{animation:mdc-circular-progress-spinner-layer-rotate 5332ms cubic-bezier(.4,0,.2,1) infinite both}.mdc-circular-progress--indeterminate .mdc-circular-progress__color-1{animation:mdc-circular-progress-spinner-layer-rotate 5332ms cubic-bezier(.4,0,.2,1) infinite both,mdc-circular-progress-color-1-fade-in-out 5332ms cubic-bezier(.4,0,.2,1) infinite both}.mdc-circular-progress--indeterminate .mdc-circular-progress__color-2{animation:mdc-circular-progress-spinner-layer-rotate 5332ms cubic-bezier(.4,0,.2,1) infinite both,mdc-circular-progress-color-2-fade-in-out 5332ms cubic-bezier(.4,0,.2,1) infinite both}.mdc-circular-progress--indeterminate .mdc-circular-progress__color-3{animation:mdc-circular-progress-spinner-layer-rotate 5332ms cubic-bezier(.4,0,.2,1) infinite both,mdc-circular-progress-color-3-fade-in-out 5332ms cubic-bezier(.4,0,.2,1) infinite both}.mdc-circular-progress--indeterminate .mdc-circular-progress__color-4{animation:mdc-circular-progress-spinner-layer-rotate 5332ms cubic-bezier(.4,0,.2,1) infinite both,mdc-circular-progress-color-4-fade-in-out 5332ms cubic-bezier(.4,0,.2,1) infinite both}.mdc-circular-progress--indeterminate .mdc-circular-progress__circle-left .mdc-circular-progress__indeterminate-circle-graphic{animation:mdc-circular-progress-left-spin 1333ms cubic-bezier(.4,0,.2,1) infinite both}.mdc-circular-progress--indeterminate .mdc-circular-progress__circle-right .mdc-circular-progress__indeterminate-circle-graphic{animation:mdc-circular-progress-right-spin 1333ms cubic-bezier(.4,0,.2,1) infinite both}.mdc-circular-progress--closed{opacity:0}:host{display:inline-flex}.mdc-circular-progress__determinate-track{stroke:transparent;stroke:var(--mdc-circular-progress-track-color,transparent)}"]))),z=function(e){(0,l.Z)(t,e);var r=(0,p.Z)(t);function t(){return(0,d.Z)(this,t),r.apply(this,arguments)}return(0,s.Z)(t)}(x);z.styles=[w],z=(0,u.__decorate)([(0,m.Mo)("mwc-circular-progress")],z)},20210:function(e,r,t){var i,o,c,n,a=t(33368),s=t(71650),d=t(69205),l=t(70906),p=t(87480),u=t(79932),m=t(88962),h=(t(27763),t(38103)),g=t(98734),b=t(68144),f=t(30153),_=function(e){(0,d.Z)(t,e);var r=(0,l.Z)(t);function t(){var e;return(0,s.Z)(this,t),(e=r.apply(this,arguments)).disabled=!1,e.icon="",e.shouldRenderRipple=!1,e.rippleHandlers=new g.A((function(){return e.shouldRenderRipple=!0,e.ripple})),e}return(0,a.Z)(t,[{key:"renderRipple",value:function(){return this.shouldRenderRipple?(0,b.dy)(i||(i=(0,m.Z)([' <mwc-ripple .disabled="','" unbounded> </mwc-ripple>'])),this.disabled):""}},{key:"focus",value:function(){var e=this.buttonElement;e&&(this.rippleHandlers.startFocus(),e.focus())}},{key:"blur",value:function(){var e=this.buttonElement;e&&(this.rippleHandlers.endFocus(),e.blur())}},{key:"render",value:function(){return(0,b.dy)(o||(o=(0,m.Z)(['<button class="mdc-icon-button mdc-icon-button--display-flex" aria-label="','" aria-haspopup="','" ?disabled="','" @focus="','" @blur="','" @mousedown="','" @mouseenter="','" @mouseleave="','" @touchstart="','" @touchend="','" @touchcancel="','">'," "," <span><slot></slot></span> </button>"])),this.ariaLabel||this.icon,(0,f.o)(this.ariaHasPopup),this.disabled,this.handleRippleFocus,this.handleRippleBlur,this.handleRippleMouseDown,this.handleRippleMouseEnter,this.handleRippleMouseLeave,this.handleRippleTouchStart,this.handleRippleDeactivate,this.handleRippleDeactivate,this.renderRipple(),this.icon?(0,b.dy)(c||(c=(0,m.Z)(['<i class="material-icons">',"</i>"])),this.icon):"")}},{key:"handleRippleMouseDown",value:function(e){var r=this;window.addEventListener("mouseup",(function e(){window.removeEventListener("mouseup",e),r.handleRippleDeactivate()})),this.rippleHandlers.startPress(e)}},{key:"handleRippleTouchStart",value:function(e){this.rippleHandlers.startPress(e)}},{key:"handleRippleDeactivate",value:function(){this.rippleHandlers.endPress()}},{key:"handleRippleMouseEnter",value:function(){this.rippleHandlers.startHover()}},{key:"handleRippleMouseLeave",value:function(){this.rippleHandlers.endHover()}},{key:"handleRippleFocus",value:function(){this.rippleHandlers.startFocus()}},{key:"handleRippleBlur",value:function(){this.rippleHandlers.endFocus()}}]),t}(b.oi);(0,p.__decorate)([(0,u.Cb)({type:Boolean,reflect:!0})],_.prototype,"disabled",void 0),(0,p.__decorate)([(0,u.Cb)({type:String})],_.prototype,"icon",void 0),(0,p.__decorate)([h.L,(0,u.Cb)({type:String,attribute:"aria-label"})],_.prototype,"ariaLabel",void 0),(0,p.__decorate)([h.L,(0,u.Cb)({type:String,attribute:"aria-haspopup"})],_.prototype,"ariaHasPopup",void 0),(0,p.__decorate)([(0,u.IO)("button")],_.prototype,"buttonElement",void 0),(0,p.__decorate)([(0,u.GC)("mwc-ripple")],_.prototype,"ripple",void 0),(0,p.__decorate)([(0,u.SB)()],_.prototype,"shouldRenderRipple",void 0),(0,p.__decorate)([(0,u.hO)({passive:!0})],_.prototype,"handleRippleMouseDown",null),(0,p.__decorate)([(0,u.hO)({passive:!0})],_.prototype,"handleRippleTouchStart",null);var y=(0,b.iv)(n||(n=(0,m.Z)(['.material-icons{font-family:var(--mdc-icon-font, "Material Icons");font-weight:400;font-style:normal;font-size:var(--mdc-icon-size,24px);line-height:1;letter-spacing:normal;text-transform:none;display:inline-block;white-space:nowrap;word-wrap:normal;direction:ltr;-webkit-font-smoothing:antialiased;text-rendering:optimizeLegibility;-moz-osx-font-smoothing:grayscale;font-feature-settings:"liga"}.mdc-icon-button{font-size:24px;width:48px;height:48px;padding:12px}.mdc-icon-button .mdc-icon-button__focus-ring{display:none}.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring{display:block;max-height:48px;max-width:48px}@media screen and (forced-colors:active){.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring{pointer-events:none;border:2px solid transparent;border-radius:6px;box-sizing:content-box;position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);height:100%;width:100%}}@media screen and (forced-colors:active)and (forced-colors:active){.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring{border-color:CanvasText}}@media screen and (forced-colors:active){.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring::after,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring::after{content:"";border:2px solid transparent;border-radius:8px;display:block;position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);height:calc(100% + 4px);width:calc(100% + 4px)}}@media screen and (forced-colors:active)and (forced-colors:active){.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring::after,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring::after{border-color:CanvasText}}.mdc-icon-button.mdc-icon-button--reduced-size .mdc-icon-button__ripple{width:40px;height:40px;margin-top:4px;margin-bottom:4px;margin-right:4px;margin-left:4px}.mdc-icon-button.mdc-icon-button--reduced-size.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring,.mdc-icon-button.mdc-icon-button--reduced-size:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring{max-height:40px;max-width:40px}.mdc-icon-button .mdc-icon-button__touch{position:absolute;top:50%;height:48px;left:50%;width:48px;transform:translate(-50%,-50%)}.mdc-icon-button:disabled{color:rgba(0,0,0,.38);color:var(--mdc-theme-text-disabled-on-light,rgba(0,0,0,.38))}.mdc-icon-button img,.mdc-icon-button svg{width:24px;height:24px}.mdc-icon-button{display:inline-block;position:relative;box-sizing:border-box;border:none;outline:0;background-color:transparent;fill:currentColor;color:inherit;text-decoration:none;cursor:pointer;user-select:none;z-index:0;overflow:visible}.mdc-icon-button .mdc-icon-button__touch{position:absolute;top:50%;height:48px;left:50%;width:48px;transform:translate(-50%,-50%)}.mdc-icon-button:disabled{cursor:default;pointer-events:none}.mdc-icon-button--display-flex{align-items:center;display:inline-flex;justify-content:center}.mdc-icon-button__icon{display:inline-block}.mdc-icon-button__icon.mdc-icon-button__icon--on{display:none}.mdc-icon-button--on .mdc-icon-button__icon{display:none}.mdc-icon-button--on .mdc-icon-button__icon.mdc-icon-button__icon--on{display:inline-block}.mdc-icon-button__link{height:100%;left:0;outline:0;position:absolute;top:0;width:100%}.mdc-icon-button{display:inline-block;position:relative;box-sizing:border-box;border:none;outline:0;background-color:transparent;fill:currentColor;color:inherit;text-decoration:none;cursor:pointer;user-select:none;z-index:0;overflow:visible}.mdc-icon-button .mdc-icon-button__touch{position:absolute;top:50%;height:48px;left:50%;width:48px;transform:translate(-50%,-50%)}.mdc-icon-button:disabled{cursor:default;pointer-events:none}.mdc-icon-button--display-flex{align-items:center;display:inline-flex;justify-content:center}.mdc-icon-button__icon{display:inline-block}.mdc-icon-button__icon.mdc-icon-button__icon--on{display:none}.mdc-icon-button--on .mdc-icon-button__icon{display:none}.mdc-icon-button--on .mdc-icon-button__icon.mdc-icon-button__icon--on{display:inline-block}.mdc-icon-button__link{height:100%;left:0;outline:0;position:absolute;top:0;width:100%}:host{display:inline-block;outline:0}:host([disabled]){pointer-events:none}.mdc-icon-button ::slotted(*),.mdc-icon-button i,.mdc-icon-button img,.mdc-icon-button svg{display:block}:host{--mdc-ripple-color:currentcolor;-webkit-tap-highlight-color:transparent}.mdc-icon-button,:host{vertical-align:top}.mdc-icon-button{width:var(--mdc-icon-button-size,48px);height:var(--mdc-icon-button-size,48px);padding:calc((var(--mdc-icon-button-size,48px) - var(--mdc-icon-size,24px))/ 2)}.mdc-icon-button ::slotted(*),.mdc-icon-button i,.mdc-icon-button img,.mdc-icon-button svg{display:block;width:var(--mdc-icon-size,24px);height:var(--mdc-icon-size,24px)}']))),v=function(e){(0,d.Z)(t,e);var r=(0,l.Z)(t);function t(){return(0,s.Z)(this,t),r.apply(this,arguments)}return(0,a.Z)(t)}(_);v.styles=[y],v=(0,p.__decorate)([(0,u.Mo)("mwc-icon-button")],v)},47501:function(e,r,t){t.d(r,{V:function(){return i.V}});var i=t(84298)}}]);
//# sourceMappingURL=37351-269NJCsakwI.js.map
|
PypiClean
|
/ploy_ansible-2.0.0b4.tar.gz/ploy_ansible-2.0.0b4/README.rst
|
Overview
========
The ploy_ansible plugin provides integration of `Ansible`_ with `ploy`_.
It automatically builds an `inventory`_ and provides a custom connection plugin.
.. _Ansible: http://docs.ansible.com
.. _ploy: https://github.com/ployground
.. _inventory: http://docs.ansible.com/intro_inventory.html
Installation
============
ploy_ansible is best installed with easy_install, pip or with zc.recipe.egg in a buildout.
Commands
========
The plugin adds the following commands to ploy.
``configure``
Configures an instance.
There are three ways to specify how to configure an instance.
Applying the roles given by the ``roles`` option of an instance, a playbook set by the ``playbook`` option or a playbook with the unique name of the instance found in the ``playbooks-directory``.
Using ``roles`` or a playbook is mutually exclusive.
If you specify a playbook and there is also a playbook in the default location, you will get a warning.
``inventory``
Lists all known groups and their associated hosts, including regular default groups, such as ``all`` but also implicit, ``ploy_ansible`` groups such as instances of a particular ``master`` (i.e. all ``ez-instances`` of an ``ez-master``)
``ansible``
Runs an Ansible command.
This basically reflects the ``ansible`` script of Ansible.
``playbook``
Applies a playbook.
This basically reflects the ``ansible-playbook`` script of Ansible.
``vault``
Manages file encryption.
This basically reflects the ``ansible-vault`` script of Ansible, but handles the encryption key source via ``ploy.conf``.
``vault-key``
Manages the vault key.
Options
=======
Global
------
playbooks-directory
~~~~~~~~~~~~~~~~~~~
The ``playbooks-directory`` option of the ``ansible`` section allows you to specify the directory where playbooks, roles, host_vars etc are looked up.
If you specify a relative path, then it's always relative to the ``ploy.conf`` directory.
If you have a structure like this::
project
|-- deployment
| |-- roles
| |-- host_vars
|
|-- etc
|-- ploy.conf
Then you would put the following into your ``ploy.conf``::
[ansible]
playbooks-directory = ../deployment
By default it is set to the parent directory of the directory the ``ploy.conf`` is located at like this::
project
|-- roles
|-- host_vars
|-- etc
|-- ploy.conf
vault-password-source
~~~~~~~~~~~~~~~~~~~~~
Using the `keyring <https://pypi.python.org/pypi/keyring/4.0/>`_ library, you can store the encryption key for the Ansible vault in your keychain.
The ``vault-password-source`` option is the id used in your keychain.
The id must be unique among all people who have to use the feature, as it is used as an identifier in their keychain.
If in doubt, use a speaking prefix and add a guid by running ``python -c "import uuid; print(uuid.uuid4().hex)"``.
If you want to rekey your files, you have to put the old id into the ``vault-password-old-source`` option and set a new id in ``vault-password-source``.
Just incrementing a number or appending a new guid is best.
Example:
.. code-block:: ini
[ansible]
vault-password-old-source = my-domain-deployment-0da2c8296f744c90a236721486dbd258
vault-password-source = my-domain-deployment-042a98b666ec4e4e8e06de7d42688f3b
You can manage your key with the ``vault-key`` command.
For easy exchange with other developers, you can also export and import the key via gpg using the ``vault-key export`` and ``vault-key import`` commands.
Per instance
------------
``groups``
Whitespace separated list of Ansible group names this instance should be added to in addition to the default ones.
``roles``
Used by the ``configure`` command.
This allows you to configure an instance by applying the whitespace separated roles.
This is like creating a playbook which only specifies a host and a list of roles names.
If the ``sudo`` option is set, it's also set for the generated playbook.
``playbook``
Allows you to explicitly specify a playbook to use for this instance.
If you need ``sudo``, then you have to add it yourself in that playbook.
Any option starting with ``ansible_`` is passed through to Ansible as is. This can be used for settings like ``ansible_python_interpreter``.
Any option starting with ``ansible-`` is stripped of the ``ansible-`` prefix and then passed through to Ansible.
This is the main way to set Ansible variables for use in playbooks and roles.
All other options are prefixed with ``ploy_`` and made available to Ansible.
Ansible inventory
=================
All instances in ``ploy.conf`` are available to Ansible via their **unique id**.
The variables for each instance are gathered from ``group_vars``, ``host_vars`` and the ``ploy.conf``.
Ansible lookup plugins
======================
The ``ploy_crypted`` lookup plugin can be used in playbooks to read the content of encrypted files.
This is another way to access encrypted data where you don't have to move that data into yml files.
An added benefit is, that the file is only decrypted when it is actually accessed.
If you run tasks filtered by tags and those tasks don't access the encrypted data, then it's not decrypted at all.
.. warning::
This lookup plugin only works with files that are plain ascii or utf-8.
It's a limitation caused by the way ansible handles variable substitution.
API usage
=========
On the Python side, each ploy instance gains the following methods:
``apply_playbook(self, playbook, *args, **kwargs)``
Applies the ``playbook`` to the instance.
``has_playbook``
Return ``True`` if the instance has either of the ``roles`` or a playbook option set.
``get_playbook(*args, **kwargs)``
Returns an instance of the Ansible internal ``PlayBook`` class.
This is either from a file (from ``playbook`` option or the playbook kwarg), or dynamically generated from the ``roles`` option.
``configure(*args, **kwargs)``
Configures the instance with the same semantics as the ``configure`` command.
``get_ansible_variables``
Returns the Ansible variables from the inventory.
This does not include *facts*, as it doesn't connect to the instance.
This is particularly useful in Fabric scripts.
``get_vault_lib``
Returns a readily usable Ansible VaultLib class.
Use the ``encrypt`` and ``decrypt`` methods do encrypt/decrypt strings.
|
PypiClean
|
/advisory-parser-1.12.tar.gz/advisory-parser-1.12/advisory_parser/parsers/chrome.py
|
import re
from datetime import datetime
from advisory_parser.exceptions import AdvisoryParserTextException
from advisory_parser.flaw import Flaw
from .utils import get_text_from_url, CVE_REGEX
# Chromium does not publish CVSS scores with their CVEs so these values are
# best-effort guesses based on impact.
CVSS3_MAP = {
"critical": "9.6/CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:C/C:H/I:H/A:H",
"important": "8.8/CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H",
"moderate": "6.5/CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:N/A:N",
"low": "4.3/CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:N/A:L",
}
def parse_chrome_advisory(url):
advisory_text = get_text_from_url(url)
# Workaround for advisories that do not use <div>s for each CVE entry. E.g.:
# https://chromereleases.googleblog.com/2018/04/stable-channel-update-for-desktop.html
advisory_text = re.sub(r"(.)\[\$", r"\1\n[$", advisory_text)
if "Security Fixes" not in advisory_text:
raise AdvisoryParserTextException("No security fixes found in {}".format(url))
# Throw away parts of the text after the blog post
flaws_text = advisory_text.split("Labels:\nStable updates")[0].strip()
# Parse out public date
match = re.search("^Stable Channel Update for Desktop\n(.+)", flaws_text, re.MULTILINE)
if not match:
raise AdvisoryParserTextException("Could not find public date in {}".format(url))
try:
public_date = datetime.strptime(match.group(1), "%A, %B %d, %Y")
except ValueError:
raise AdvisoryParserTextException(
"Could not parse public date ({}) from {}".format(match.group(1), url)
)
# Find Chrome version, e.g. 46.0.2490.71
try:
fixed_in = re.search(r"\d{2,3}\.\d\.\d{4}\.\d{2,3}", flaws_text).group(0)
except ValueError:
raise AdvisoryParserTextException("Could not find fixed-in version in {}".format(url))
# Filter out lines that contain CVEs
cve_lines = [line.strip() for line in flaws_text.split("\n") if CVE_REGEX.search(line)]
if not cve_lines:
raise AdvisoryParserTextException("Could not find any CVEs in {}".format(url))
flaws, warnings = [], []
for line in cve_lines:
# Parse each line containing information about a CVE, e.g.:
# [$7500][590275] High CVE-2016-1652: XSS in X. Credit to anonymous.
# First, split into two groups by first encountered colon.
metadata, text = line.split(":", maxsplit=1)
if not metadata or not text:
warnings.append("Could not parse line: {}".format(line))
continue
# If a line contains Various, it describes internal fixes, e.g.:
# [563930] CVE-2015-6787: Various fixes from internal audits...
if "Various" in text:
impact = "important"
else:
match = re.search(r"(Critical|High|Medium|Low)", metadata)
if not match:
print("Could not find impact; skipping: {}".format(line))
continue
else:
impact = match.group(1)
impact = impact.lower()
impact = impact.replace("high", "important")
impact = impact.replace("medium", "moderate")
bug_ids = re.findall(r"\d{6,}", metadata)
cves = CVE_REGEX.findall(metadata)
if not bug_ids and not cves:
warnings.append("Could not find CVEs or bugs; skipping: {}".format(line))
continue
summary = text.split(".")[0].strip()
if " in " in summary:
issue, component = summary.split(" in ", 1)
article = "An" if issue.lower()[0] in "aeiou" else "A"
description = (
"{} {} flaw was found in the {} component of the Chromium browser.".format(
article, issue.lower(), component
)
)
elif "various fixes" in summary.lower():
description = summary + "."
summary = "various fixes from internal audits"
else:
description = "The following flaw was identified in the Chromium browser: {}.".format(
summary
)
summary = "chromium-browser: " + summary
description += "\n\nUpstream bug(s):\n"
for bug in bug_ids:
description += "\nhttps://code.google.com/p/chromium/issues/detail?id=" + bug
com_url = (
url if "blogspot.com" in url else re.sub(r"blogspot\.[^/]*/", "blogspot.com/", url)
)
cvss3 = CVSS3_MAP[impact]
flaws.append(
Flaw(
from_url=com_url,
cves=cves,
summary=summary,
public_date=public_date,
cvss3=cvss3,
impact=impact,
fixed_in={"chromium-browser": [fixed_in]},
description=description,
)
)
return flaws, warnings
|
PypiClean
|
/pymorphy-0.5.6.tar.gz/pymorphy-0.5.6/docs/ref/gram_info_ru.rst
|
.. _parameter-format:
Обозначения для грамматической информации в pymorphy
----------------------------------------------------
Реализовано 2 формата выдачи результатов: формат по умолчанию и упрощенный
стандартизованный формат, согласованный на конференции ДИАЛОГ-2010.
Полный формат
^^^^^^^^^^^^^
Это формат по умолчанию.
Обозначения соответствуют тем, что описаны тут:
http://www.aot.ru/docs/rusmorph.html
При указании в качстве параметров к методам их следует указывать через
запятую без пробелов, порядок - произвольный, регистр учитывается::
"им,мр"
Часть речи обычно идет отдельным параметром и не передается в строках с грам.
информацией.
.. _Russian-cases:
Краткая справка по падежам
##########################
:им: Именительный (Кто? Что?)
:рд: Родительный (Кого? Чего?)
:дт: Дательный (Кому? Чему?)
:вн: Винительный (Кого? Что?)
:тв: Творительный (Кем? Чем?)
:пр: Предложный (О ком? О чём? и т.п.)
:зв: Звательный (Его формы используются при обращении к человеку - им. падеж: Аня — звательный: Ань!)
Все используемые граммемы
#########################
:мр, жр, ср: мужской, женский, средний род
:од, но: одушевленность, неодушевленность
:ед, мн: единственное, множественное число
:им, рд, дт, вн, тв, пр, зв: падежи (см. :ref:`информацию по падежам<Russian-cases>`)
:2: обозначает второй родительный или второй предложный падежи
:св, нс: совершенный, несовершенный вид
:пе, нп: переходный, непереходный глагол
:дст, стр: действительный, страдательный залог
:нст, прш, буд: настоящее, прошедшее, будущее время
:пвл: повелительная форма глагола
:1л, 2л, 3л: первое, второе, третье лицо
:0: неизменяемое
:кр: краткость (для прилагательных и причастий)
:сравн: сравнительная форма (для прилагательных)
:имя, фам, отч: имя, фамилия, отчество
:лок, орг: локативность, организация
:кач: качественное прилагательное
:вопр,относ: вопросительность и относительность (для наречий)
:дфст: слово обычно не имеет множественного числа
:опч: частая опечатка или ошибка
:жарг, арх, проф: жаргонизм, архаизм, профессионализм
:аббр: аббревиатура
:безл: безличный глагол
Части речи
##########
============== ================= ==================
Части речи Пример Расшифровка
============== ================= ==================
C мама существительное
П красный прилагательное
МС он местоимение-существительное
Г идет глагол в личной форме
ПРИЧАСТИЕ идущий причастие
ДЕЕПРИЧАСТИЕ идя деепричастие
ИНФИНИТИВ идти инфинитив
МС-ПРЕДК нечего местоимение-предикатив
МС-П всякий местоименное прилагательное
ЧИСЛ восемь числительное (количественное)
ЧИСЛ-П восьмой порядковое числительное
Н круто наречие
ПРЕДК интересно предикатив
ПРЕДЛ под предлог
СОЮЗ и союз
МЕЖД ой междометие
ЧАСТ же, бы частица
ВВОДН конечно вводное слово
КР_ПРИЛ красива краткое прилагательное
КР_ПРИЧАСТИЕ построена краткое причастие
ПОСЛ пословица
ФРАЗ
============== ================= ==================
Упрощенный формат
^^^^^^^^^^^^^^^^^
Данные в этом формате возвращает функция get_graminfo, вызванная с параметром
standard=True. Формат был согласован на конференции Диалог-2010.
.. note::
Получение результатов в этом формате НЕ быстрее, чем в полном.
Разбор "внутри" все равно идет в "полном" формате,
и лишь перед выводом данные преобразуются в упрощенный.
Части речи
##########
Для разметки используется упрощенная система частей речи:
:S: существительное (яблоня, лошадь, корпус, вечность)
:A: прилагательное (коричневый, таинственный, морской)
:V: глагол (пользоваться, обрабатывать)
:PR: предлог (под, напротив)
:CONJ: союз (и, чтобы)
:ADV: — прочие не няемые слова (частицы, междометия, вводные слова)
Могут быть размечены любым образом:
:Местоимения: (включая наречные и предикативные)
:Числительные:
Морфология (грамматические_признаки)
####################################
В категориях ADV,PR,CONJ поле остается пустым. Морфология указывается
только для S,A,V.
Здесь также используется сокращенный набор признаков:
:род: m, f, n
:падеж: nom, gen, dat, acc, ins, loc
:число: sg, pl
:время/наклонение/причастие/деепричастие: pres, past, imper, inf, partcp, ger
:залог: act, pass
:лицо: 1p, 2p, 3p
|
PypiClean
|
/djpykafka-0.0.39.tar.gz/djpykafka-0.0.39/README.md
|
# djpykafka
## 🚧 This project is WIP and is subject to change at any time
This project is currently in the alpha state, even though it can be used in production with some caution. Make sure to fix the version in your requirements.txt and review changes frequently.
## Installation
`pip install djpykafka`
## Examples
Add the `KafkaPublishMixin` to your model class.
```python
from django.db import models
from djpykafka.models import KafkaPublishMixin
class MyModel(KafkaPublishMixin, models.Model):
...
```
Create a publisher class, e.g. at `events/publish/{modelname_plural}.py`
```python
from djpykafka.events.publish import EventPublisher, DataChangePublisher
from ... import models
from ...schemas import response
from . import connection
class MyModelPublisher(
DataChangePublisher,
EventPublisher,
orm_model=models.MyModel,
event_schema=response.MyModel,
connection=connection,
topic='bizberry.access.users',
data_type='access.user',
):
pass
```
## AsyncAPI Docs
This feature is WIP.
Install djpykafka with the extra `docs` (`pip install djpykafka[docs]`) and add the following snippets to your `asgi.py`:
```python
from starlette.responses import HTMLResponse, JSONResponse
from starlette.routing import Route
from fastapi.encoders import jsonable_encoder
from asyncapi_docgen.docs import get_asyncapi_ui_html
# ! Import after django's get_asgi_application()
from djpykafka.docs.asyncapi import get_asyncapi
def asyncapi_html(request):
return HTMLResponse(get_asyncapi_ui_html(asyncapi_url='/asyncapi.json'))
def asyncapi_json(request):
return JSONResponse(jsonable_encoder(get_asyncapi(), by_alias=True, exclude_none=True))
app = FastAPI(
...,
routes=[
Route('/asyncapi', asyncapi_html),
Route('/asyncapi.json', asyncapi_json),
],
)
```
|
PypiClean
|
/sifflet_sdk-0.3.2-py3-none-any.whl/client/model/big_query_partitioning_properties.py
|
import re # noqa: F401
import sys # noqa: F401
from client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from client.exceptions import ApiAttributeError
class BigQueryPartitioningProperties(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('partitioning_type',): {
'INTEGER_RANGE': "INTEGER_RANGE",
'TIME_UNIT_COLUMN': "TIME_UNIT_COLUMN",
'INGESTION_TIME': "INGESTION_TIME",
'NO_PARTITIONING': "NO_PARTITIONING",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'partitioning_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'partitioning_type': val}
attribute_map = {
'partitioning_type': 'partitioningType', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, partitioning_type, *args, **kwargs): # noqa: E501
"""BigQueryPartitioningProperties - a model defined in OpenAPI
Args:
partitioning_type (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.partitioning_type = partitioning_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, partitioning_type, *args, **kwargs): # noqa: E501
"""BigQueryPartitioningProperties - a model defined in OpenAPI
Args:
partitioning_type (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.partitioning_type = partitioning_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/BlueWhale3-Educational-0.4.1.tar.gz/BlueWhale3-Educational-0.4.1/orangecontrib/educational/widgets/owcreatetable.py
|
import copy
from typing import Optional
import numpy as np
from AnyQt.QtCore import Qt, QAbstractTableModel, QModelIndex, QSize
from AnyQt.QtWidgets import QTableView, QItemDelegate, QLineEdit, QCompleter
from Orange.data import (
Table,
Domain,
ContinuousVariable,
DiscreteVariable,
TimeVariable,
)
from Orange.widgets import gui
from Orange.widgets.settings import (
Setting,
ContextSetting,
PerfectDomainContextHandler,
ContextHandler,
)
from Orange.widgets.utils import vartype
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.widget import OWWidget, Output, Input, Msg
from orangecontrib.educational.i18n_config import *
def __(key):
return i18n.t('educational.owcreatetable.' + key)
DEFAULT_DATA = [[None] * 3 for y in range(3)]
class EditableTableItemDelegate(QItemDelegate):
def createEditor(self, parent, options, index: QModelIndex):
model = index.model() # type: EditableTableModel
if not model.is_discrete(index.column()):
return super().createEditor(parent, options, index)
vals = model.discrete_vals(index.column())
edit = QLineEdit(parent)
edit.setCompleter(QCompleter(vals, edit, filterMode=Qt.MatchContains))
def save():
if edit.text():
model.setData(index, edit.text())
edit.editingFinished.connect(save)
return edit
def setEditorData(self, editor, index):
current_val = index.model().data(index, Qt.DisplayRole)
editor.setText(current_val)
class EditableTableModel(QAbstractTableModel):
def __init__(self, parent=None):
super().__init__(parent=parent)
self._table = [[None]]
self._domain = None
def _var(self, index):
# todo: remove if/when domain[idx] is fixed
return (self._domain.variables + self._domain.metas)[index]
def set_domain(self, domain: Optional[Domain]):
self._domain = domain
if domain is not None:
# Todo: change to len(domain) when Domain len's behaviour is fixed
n_columns = len(domain.variables) + len(domain.metas)
else:
n_columns = 3
self.setColumnCount(n_columns)
self.clear()
def is_discrete(self, column):
column_data = set(row[column] for row in self._table) - {None}
def is_number(x):
"""
Check if x is number
x.is_digit only works for usigned ints this on works for ints and
floats
"""
try:
float(x)
return True
except ValueError:
return False
return (
self._domain is not None
and self._var(column).is_discrete
or (
column_data
and not self.is_time_variable(column)
and not all(map(lambda s: is_number(s), column_data))
)
)
def is_time_variable(self, column):
values = self.time_vals(column)
return values and not all(e is None for e in values)
def time_vals(self, column):
column_data = [row[column] for row in self._table]
try:
tvar = TimeVariable("_")
values = [
tvar.parse_exact_iso(d) if d is not None else None
for d in column_data
]
return values
except ValueError:
return None
def discrete_vals(self, column):
if self._domain is not None and self._var(column).is_discrete:
return self._var(column).values
else:
return list(set(row[column] for row in self._table) - {None})
def rowCount(self, parent=None):
return len(self._table)
def columnCount(self, parent=None):
return len(self._table[0])
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsEditable
def data(self, index: QModelIndex, role=None):
value = self._table[index.row()][index.column()]
if role == Qt.DisplayRole and value is not None:
return str(value)
elif role == Qt.TextAlignmentRole:
return Qt.AlignVCenter | Qt.AlignLeft
def setData(self, index: QModelIndex, value: str, role=None):
value = None if not value else value
self._table[index.row()][index.column()] = value
self.dataChanged.emit(index, index)
return True
def set_table(self, table):
self.beginResetModel()
# must be a copy since model changes it inplace
self._table = copy.deepcopy(table)
self.endResetModel()
def get_raw_table(self):
# the model updates the table inplace
return copy.deepcopy(self._table)
def setRowCount(self, n_rows):
diff = n_rows - self.rowCount()
if diff > 0:
self.beginInsertRows(QModelIndex(), self.rowCount(), n_rows - 1)
self._table += [[None] * self.columnCount() for _ in range(diff)]
self.endInsertRows()
elif diff < 0:
self.beginRemoveRows(QModelIndex(), n_rows, self.rowCount() - 1)
del self._table[n_rows:]
self.endRemoveRows()
def setColumnCount(self, n_columns):
diff = n_columns - self.columnCount()
if diff > 0:
self.beginInsertColumns(
QModelIndex(), self.columnCount(), n_columns - 1)
for row in self._table:
row += [None] * diff
self.endInsertColumns()
elif diff < 0:
self.beginRemoveColumns(
QModelIndex(), n_columns, self.columnCount() - 1)
for row in self._table:
del row[n_columns:]
self.endRemoveColumns()
def headerData(self, section, orientation, role=None):
if orientation == Qt.Vertical:
return super().headerData(section, orientation, role)
if role == Qt.DisplayRole:
if self._domain is None:
return str(section + 1)
else:
return self._var(section).name
def clear(self):
self.set_table(
[[None] * self.columnCount()
for _ in range(self.rowCount())]
)
def get_table(self):
domain = self.get_domain()
data = np.array(self._table) # type:
for ci in range(data.shape[1]):
if isinstance((domain.variables + domain.metas)[ci], TimeVariable):
data[:, ci] = self.time_vals(ci)
return Table.from_list(domain, data)
def get_domain(self):
if self._domain is not None:
return self._domain
vars = []
for ci in range(self.columnCount()):
if self.is_discrete(ci):
values = set(
row[ci] for row in self._table if row[ci] is not None
)
var = DiscreteVariable(name=str(ci + 1), values=values)
elif self.is_time_variable(ci):
var = TimeVariable(name=str(ci + 1))
else:
var = ContinuousVariable(name=str(ci + 1))
vars.append(var)
return Domain(vars)
class CreateTableContextHandler(PerfectDomainContextHandler):
def open_context(self, widget, domain):
ContextHandler.open_context(
self, widget, domain, *self.encode_domain(domain)
)
def encode_domain(self, domain):
"""
Encode domain into tuples (name, type)
A tuple is returned for each of attributes, class_vars and metas.
"""
if self.match_values == self.MATCH_VALUES_ALL:
def _encode(attrs):
return tuple(
(v.name, list(v.values) if v.is_discrete else vartype(v))
for v in attrs
)
else:
def _encode(attrs):
return tuple((v.name, vartype(v)) for v in attrs)
if domain is None:
return (None, None, None)
return (
_encode(domain.attributes),
_encode(domain.class_vars),
_encode(domain.metas),
)
class OWCreateTable(OWWidget):
name = __("name")
icon = "icons/CreateTable.png"
priority = 50
keywords = []
class Inputs:
data = Input("Data", Table, label=i18n.t("educational.common.data"))
class Outputs:
data = Output("Data", Table, label=i18n.t("educational.common.data"))
class Error(OWWidget.Error):
transform_err = Msg(__("msg.transform_err"))
settingsHandler = CreateTableContextHandler()
n_rows = Setting(len(DEFAULT_DATA))
n_columns = Setting(len(DEFAULT_DATA[0]))
auto_commit = Setting(True)
# since data is a small (at most 20x20) table we can afford to store it
# as a context
context_data = ContextSetting(copy.deepcopy(DEFAULT_DATA), schema_only=True)
def __init__(self):
super().__init__()
options = {"labelWidth": 100, "controlWidth": 50}
box = gui.vBox(self.controlArea, __("box.control"))
self.r_spin = gui.spin(
box,
self,
"n_rows",
1,
20,
1,
**options,
label=__("label.rows"),
callback=self.nrows_changed
)
self.c_spin = gui.spin(
box,
self,
"n_columns",
1,
20,
1,
**options,
label=__("label.columns"),
callback=self.ncolumns_changed
)
box.setMinimumWidth(200)
gui.rubber(self.controlArea)
gui.auto_send(self.buttonsArea, self, "auto_commit")
box = gui.vBox(self.mainArea, True, margin=0)
self.table = QTableView(box)
self.table.setItemDelegate(EditableTableItemDelegate())
self.table.setEditTriggers(self.table.CurrentChanged)
box.layout().addWidget(self.table)
self.table_model = EditableTableModel()
self.table.setModel(self.table_model)
self.table_model.dataChanged.connect(self.data_changed)
self.table_model.set_table(self.context_data)
self.set_dataset(None) # to init the context
def nrows_changed(self):
self.table_model.setRowCount(self.n_rows)
self.commit()
def ncolumns_changed(self):
self.table_model.setColumnCount(self.n_columns)
self.commit()
def data_changed(self):
self.context_data = self.table_model.get_raw_table()
self.commit()
def commit(self):
data = None
try:
data = self.table_model.get_table()
self.Error.transform_err.clear()
except Exception as ex:
self.Error.transform_err()
self.Outputs.data.send(data)
@Inputs.data
def set_dataset(self, data):
self.closeContext()
if data is not None:
self.table_model.set_domain(data.domain)
self.context_data = [
[None] * (len(data.domain.variables) + len(data.domain.metas))
for _ in range(self.table_model.rowCount())
]
else:
self.table_model.set_domain(None)
self.context_data = copy.deepcopy(DEFAULT_DATA)
self.c_spin.setEnabled(data is None)
self.c_spin.setValue(self.table_model.columnCount())
self.openContext(data)
self.unconditional_commit()
@staticmethod
def sizeHint():
return QSize(800, 500)
def openContext(self, data):
super(OWCreateTable, self).openContext(data.domain if data else None)
self.table_model.set_table(self.context_data)
if __name__ == "__main__": # pragma: no cover
WidgetPreview(OWCreateTable).run(Table("iris"))
|
PypiClean
|
/ydk-models-cisco-ios-xe-16.9.3.post1.tar.gz/ydk-models-cisco-ios-xe-16.9.3.post1/ydk/models/cisco_ios_xe/CISCO_VLAN_MEMBERSHIP_MIB.py
|
import sys
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CISCOVLANMEMBERSHIPMIB(Entity):
"""
.. attribute:: vmvmps
**type**\: :py:class:`VmVmps <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmVmps>`
**config**\: False
.. attribute:: vmmembership
**type**\: :py:class:`VmMembership <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembership>`
**config**\: False
.. attribute:: vmstatistics
**type**\: :py:class:`VmStatistics <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmStatistics>`
**config**\: False
.. attribute:: vmstatus
**type**\: :py:class:`VmStatus <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmStatus>`
**config**\: False
.. attribute:: vmvmpstable
A table of VMPS to use. The device will use the the primary VMPS by default. If the device is unable to reach the primary server after vmVmpsRetries retries, it uses the first secondary server in the table until it runs out of secondary servers, in which case it will return to using the primary server. Entries in this table may be created and deleted via this MIB or the management console on a device
**type**\: :py:class:`VmVmpsTable <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmVmpsTable>`
**config**\: False
.. attribute:: vmmembershipsummarytable
A summary of VLAN membership of non\-trunk bridge ports. This is a convenience table for retrieving VLAN membership information. A row is created for a VLAN if\: a) the VLAN exists, or b) a port is assigned to a non\-existent VLAN. VLAN membership can only be modified via the vmMembershipTable
**type**\: :py:class:`VmMembershipSummaryTable <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable>`
**config**\: False
.. attribute:: vmmembershiptable
A table for configuring VLAN port membership. There is one row for each bridge port that is assigned to a static or dynamic access port. Trunk ports are not represented in this table. An entry may be created and deleted when ports are created or deleted via SNMP or the management console on a device
**type**\: :py:class:`VmMembershipTable <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipTable>`
**config**\: False
.. attribute:: vmmembershipsummaryexttable
A summary of VLAN membership of non\-trunk bridge ports. This table is used for retrieving VLAN membership information for the device which supports dot1dBasePort with value greater than 2048. A row is created for a VLAN and a particular bridge port range, where at least one port in the range is assigned to this VLAN. VLAN membership can only be modified via the vmMembershipTable
**type**\: :py:class:`VmMembershipSummaryExtTable <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable>`
**config**\: False
.. attribute:: vmvoicevlantable
A table for configuring the Voice VLAN\-ID for the ports. An entry will exist for each interface which supports Voice Vlan feature
**type**\: :py:class:`VmVoiceVlanTable <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable>`
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vmVmps", ("vmvmps", CISCOVLANMEMBERSHIPMIB.VmVmps)), ("vmMembership", ("vmmembership", CISCOVLANMEMBERSHIPMIB.VmMembership)), ("vmStatistics", ("vmstatistics", CISCOVLANMEMBERSHIPMIB.VmStatistics)), ("vmStatus", ("vmstatus", CISCOVLANMEMBERSHIPMIB.VmStatus)), ("vmVmpsTable", ("vmvmpstable", CISCOVLANMEMBERSHIPMIB.VmVmpsTable)), ("vmMembershipSummaryTable", ("vmmembershipsummarytable", CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable)), ("vmMembershipTable", ("vmmembershiptable", CISCOVLANMEMBERSHIPMIB.VmMembershipTable)), ("vmMembershipSummaryExtTable", ("vmmembershipsummaryexttable", CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable)), ("vmVoiceVlanTable", ("vmvoicevlantable", CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable))])
self._leafs = OrderedDict()
self.vmvmps = CISCOVLANMEMBERSHIPMIB.VmVmps()
self.vmvmps.parent = self
self._children_name_map["vmvmps"] = "vmVmps"
self.vmmembership = CISCOVLANMEMBERSHIPMIB.VmMembership()
self.vmmembership.parent = self
self._children_name_map["vmmembership"] = "vmMembership"
self.vmstatistics = CISCOVLANMEMBERSHIPMIB.VmStatistics()
self.vmstatistics.parent = self
self._children_name_map["vmstatistics"] = "vmStatistics"
self.vmstatus = CISCOVLANMEMBERSHIPMIB.VmStatus()
self.vmstatus.parent = self
self._children_name_map["vmstatus"] = "vmStatus"
self.vmvmpstable = CISCOVLANMEMBERSHIPMIB.VmVmpsTable()
self.vmvmpstable.parent = self
self._children_name_map["vmvmpstable"] = "vmVmpsTable"
self.vmmembershipsummarytable = CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable()
self.vmmembershipsummarytable.parent = self
self._children_name_map["vmmembershipsummarytable"] = "vmMembershipSummaryTable"
self.vmmembershiptable = CISCOVLANMEMBERSHIPMIB.VmMembershipTable()
self.vmmembershiptable.parent = self
self._children_name_map["vmmembershiptable"] = "vmMembershipTable"
self.vmmembershipsummaryexttable = CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable()
self.vmmembershipsummaryexttable.parent = self
self._children_name_map["vmmembershipsummaryexttable"] = "vmMembershipSummaryExtTable"
self.vmvoicevlantable = CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable()
self.vmvoicevlantable.parent = self
self._children_name_map["vmvoicevlantable"] = "vmVoiceVlanTable"
self._segment_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB, [], name, value)
class VmVmps(Entity):
"""
.. attribute:: vmvmpsvqpversion
The VLAN Query Protocol (VQP) version supported on the device. VQP is the protocol used to query VLAN Membership Policy Server (VMPS) for VLAN membership assignments of dynamic VLAN ports. A VMPS provides VLAN membership policy assignments based on the content of the packets received on a port
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: vmvmpsretries
The number of retries for VQP requests to a VMPS before using the next available VMPS
**type**\: int
**range:** 1..10
**config**\: False
.. attribute:: vmvmpsreconfirminterval
The switch will reconfirm membership of addresses on each port with VMPS periodically. This object specifies the interval to perform reconfirmation. If the value is set to 0, the switch does not reconfirm membership with VMPS
**type**\: int
**range:** 0..120
**config**\: False
**units**\: Minutes
.. attribute:: vmvmpsreconfirm
Setting this object to execute(2) causes the switch to reconfirm membership of every dynamic port. Reading this object always return ready(1)
**type**\: :py:class:`VmVmpsReconfirm <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmVmps.VmVmpsReconfirm>`
**config**\: False
.. attribute:: vmvmpsreconfirmresult
This object returns the result of the last request that sets vmVmpsReconfirm to execute(2). The semantics of the possible results are as follows\: other(1) \- none of following inProgress(2) \- reconfirm in progress success(3) \- reconfirm completed successfully noResponse(4) \- reconfirm failed because no VMPS responded noVmps(5) \- No VMPS configured noDynamicPort(6) \- No dynamic ports configured noHostConnected(7) \- No hosts on dynamic ports
**type**\: :py:class:`VmVmpsReconfirmResult <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmVmps.VmVmpsReconfirmResult>`
**config**\: False
.. attribute:: vmvmpscurrent
This is the IpAddress of the current VMPS used
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmVmps, self).__init__()
self.yang_name = "vmVmps"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vmvmpsvqpversion', (YLeaf(YType.int32, 'vmVmpsVQPVersion'), ['int'])),
('vmvmpsretries', (YLeaf(YType.int32, 'vmVmpsRetries'), ['int'])),
('vmvmpsreconfirminterval', (YLeaf(YType.int32, 'vmVmpsReconfirmInterval'), ['int'])),
('vmvmpsreconfirm', (YLeaf(YType.enumeration, 'vmVmpsReconfirm'), [('ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB', 'CISCOVLANMEMBERSHIPMIB', 'VmVmps.VmVmpsReconfirm')])),
('vmvmpsreconfirmresult', (YLeaf(YType.enumeration, 'vmVmpsReconfirmResult'), [('ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB', 'CISCOVLANMEMBERSHIPMIB', 'VmVmps.VmVmpsReconfirmResult')])),
('vmvmpscurrent', (YLeaf(YType.str, 'vmVmpsCurrent'), ['str'])),
])
self.vmvmpsvqpversion = None
self.vmvmpsretries = None
self.vmvmpsreconfirminterval = None
self.vmvmpsreconfirm = None
self.vmvmpsreconfirmresult = None
self.vmvmpscurrent = None
self._segment_path = lambda: "vmVmps"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmVmps, ['vmvmpsvqpversion', 'vmvmpsretries', 'vmvmpsreconfirminterval', 'vmvmpsreconfirm', 'vmvmpsreconfirmresult', 'vmvmpscurrent'], name, value)
class VmVmpsReconfirm(Enum):
"""
VmVmpsReconfirm (Enum Class)
Setting this object to execute(2) causes the switch
to reconfirm membership of every dynamic port.
Reading this object always return ready(1).
.. data:: ready = 1
.. data:: execute = 2
"""
ready = Enum.YLeaf(1, "ready")
execute = Enum.YLeaf(2, "execute")
class VmVmpsReconfirmResult(Enum):
"""
VmVmpsReconfirmResult (Enum Class)
This object returns the result of the last request
that sets vmVmpsReconfirm to execute(2). The
semantics of the possible results are as follows\:
other(1) \- none of following
inProgress(2) \- reconfirm in progress
success(3) \- reconfirm completed successfully
noResponse(4) \- reconfirm failed because no
VMPS responded
noVmps(5) \- No VMPS configured
noDynamicPort(6) \- No dynamic ports configured
noHostConnected(7) \- No hosts on dynamic ports
.. data:: other = 1
.. data:: inProgress = 2
.. data:: success = 3
.. data:: noResponse = 4
.. data:: noVmps = 5
.. data:: noDynamicPort = 6
.. data:: noHostConnected = 7
"""
other = Enum.YLeaf(1, "other")
inProgress = Enum.YLeaf(2, "inProgress")
success = Enum.YLeaf(3, "success")
noResponse = Enum.YLeaf(4, "noResponse")
noVmps = Enum.YLeaf(5, "noVmps")
noDynamicPort = Enum.YLeaf(6, "noDynamicPort")
noHostConnected = Enum.YLeaf(7, "noHostConnected")
class VmMembership(Entity):
"""
.. attribute:: vmvlancreationmode
This object is used to determine whether or not a non\-existing VLAN will be created automatically by the system after assigned to a port. automatic(1)\: a non\-existing VLAN will be created automatically by the system after assigned to a port. manual(2)\: a non\-existing VLAN will not be created automatically by the system and need to be manually created by the users after assigned to a port
**type**\: :py:class:`VmVlanCreationMode <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembership.VmVlanCreationMode>`
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmMembership, self).__init__()
self.yang_name = "vmMembership"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vmvlancreationmode', (YLeaf(YType.enumeration, 'vmVlanCreationMode'), [('ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB', 'CISCOVLANMEMBERSHIPMIB', 'VmMembership.VmVlanCreationMode')])),
])
self.vmvlancreationmode = None
self._segment_path = lambda: "vmMembership"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmMembership, ['vmvlancreationmode'], name, value)
class VmVlanCreationMode(Enum):
"""
VmVlanCreationMode (Enum Class)
This object is used to determine whether or not
a non\-existing VLAN will be created automatically
by the system after assigned to a port.
automatic(1)\: a non\-existing VLAN will be created
automatically by the system after
assigned to a port.
manual(2)\: a non\-existing VLAN will not be created
automatically by the system and need to be
manually created by the users after assigned
to a port.
.. data:: automatic = 1
.. data:: manual = 2
"""
automatic = Enum.YLeaf(1, "automatic")
manual = Enum.YLeaf(2, "manual")
class VmStatistics(Entity):
"""
.. attribute:: vmvqpqueries
The total number of VQP requests sent by this device to all VMPS since last system re\-initialization
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vmvqpresponses
The number of VQP responses received by this device from all VMPS since last system re\-initialization
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vmvmpschanges
The number of times, since last system re\-initialization, the current VMPS was changed. The current VMPS is changed whenever the VMPS fails to response after vmVmpsRetries of a VQP request
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vmvqpshutdown
The number of times, since last system re\-initialization, a VQP response indicates 'shutdown'. A 'shutdown' response is a result of the membership policy configured at a VMPS by the administrator
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vmvqpdenied
The number of times, since last system re\-initialization, a VQP response indicates 'denied'. A 'denied' response is a result of the membership policy configured at a VMPS by the administrator
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vmvqpwrongdomain
The number of times, since last system re\-initialization, a VQP response indicates wrong management domain. A wrong management domain response indicates that the VMPS used serves a management domain that is different from the device's management domain
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vmvqpwrongversion
The number of times, since last system re\-initialization, a VQP response indicates wrong VQP version. A wrong VQP version response indicates that the VMPS used supports a VQP version that is different from the device's VQP version
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vminsufficientresources
The number of times, since last system re\-initialization, a VQP response indicates insufficient resources. An insufficient resources response indicates that the VMPS used does not have the required resources to verify the membership assignment requested
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmStatistics, self).__init__()
self.yang_name = "vmStatistics"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vmvqpqueries', (YLeaf(YType.uint32, 'vmVQPQueries'), ['int'])),
('vmvqpresponses', (YLeaf(YType.uint32, 'vmVQPResponses'), ['int'])),
('vmvmpschanges', (YLeaf(YType.uint32, 'vmVmpsChanges'), ['int'])),
('vmvqpshutdown', (YLeaf(YType.uint32, 'vmVQPShutdown'), ['int'])),
('vmvqpdenied', (YLeaf(YType.uint32, 'vmVQPDenied'), ['int'])),
('vmvqpwrongdomain', (YLeaf(YType.uint32, 'vmVQPWrongDomain'), ['int'])),
('vmvqpwrongversion', (YLeaf(YType.uint32, 'vmVQPWrongVersion'), ['int'])),
('vminsufficientresources', (YLeaf(YType.uint32, 'vmInsufficientResources'), ['int'])),
])
self.vmvqpqueries = None
self.vmvqpresponses = None
self.vmvmpschanges = None
self.vmvqpshutdown = None
self.vmvqpdenied = None
self.vmvqpwrongdomain = None
self.vmvqpwrongversion = None
self.vminsufficientresources = None
self._segment_path = lambda: "vmStatistics"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmStatistics, ['vmvqpqueries', 'vmvqpresponses', 'vmvmpschanges', 'vmvqpshutdown', 'vmvqpdenied', 'vmvqpwrongdomain', 'vmvqpwrongversion', 'vminsufficientresources'], name, value)
class VmStatus(Entity):
"""
.. attribute:: vmnotificationsenabled
An indication of whether the notifications/traps defined in this MIB are enabled
**type**\: bool
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmStatus, self).__init__()
self.yang_name = "vmStatus"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vmnotificationsenabled', (YLeaf(YType.boolean, 'vmNotificationsEnabled'), ['bool'])),
])
self.vmnotificationsenabled = None
self._segment_path = lambda: "vmStatus"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmStatus, ['vmnotificationsenabled'], name, value)
class VmVmpsTable(Entity):
"""
A table of VMPS to use. The device will use
the the primary VMPS by default. If the
device is unable to reach the primary server
after vmVmpsRetries retries, it uses the first
secondary server in the table until it runs out
of secondary servers, in which case it will return
to using the primary server. Entries in this table
may be created and deleted via this MIB or
the management console on a device.
.. attribute:: vmvmpsentry
An entry (conceptual row) in the vmVmpsTable
**type**\: list of :py:class:`VmVmpsEntry <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmVmpsTable.VmVmpsEntry>`
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmVmpsTable, self).__init__()
self.yang_name = "vmVmpsTable"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vmVmpsEntry", ("vmvmpsentry", CISCOVLANMEMBERSHIPMIB.VmVmpsTable.VmVmpsEntry))])
self._leafs = OrderedDict()
self.vmvmpsentry = YList(self)
self._segment_path = lambda: "vmVmpsTable"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmVmpsTable, [], name, value)
class VmVmpsEntry(Entity):
"""
An entry (conceptual row) in the vmVmpsTable.
.. attribute:: vmvmpsipaddress (key)
The Ip Address of the VMPS
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: vmvmpsprimary
The status of the VMPS. Setting this value to true will make this VMPS the primary server and make the switch use this as the current server. Setting this entry to true causes other rows to transition to false. Attempting to write a value of false after creation will result in a return of bad value. Deleting an entry whose value is true will result in the first entry in the table being set to true
**type**\: bool
**config**\: False
.. attribute:: vmvmpsrowstatus
The status of this conceptual row
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmVmpsTable.VmVmpsEntry, self).__init__()
self.yang_name = "vmVmpsEntry"
self.yang_parent_name = "vmVmpsTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vmvmpsipaddress']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vmvmpsipaddress', (YLeaf(YType.str, 'vmVmpsIpAddress'), ['str'])),
('vmvmpsprimary', (YLeaf(YType.boolean, 'vmVmpsPrimary'), ['bool'])),
('vmvmpsrowstatus', (YLeaf(YType.enumeration, 'vmVmpsRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.vmvmpsipaddress = None
self.vmvmpsprimary = None
self.vmvmpsrowstatus = None
self._segment_path = lambda: "vmVmpsEntry" + "[vmVmpsIpAddress='" + str(self.vmvmpsipaddress) + "']"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/vmVmpsTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmVmpsTable.VmVmpsEntry, ['vmvmpsipaddress', 'vmvmpsprimary', 'vmvmpsrowstatus'], name, value)
class VmMembershipSummaryTable(Entity):
"""
A summary of VLAN membership of non\-trunk
bridge ports. This is a convenience table
for retrieving VLAN membership information.
A row is created for a VLAN if\:
a) the VLAN exists, or
b) a port is assigned to a non\-existent VLAN.
VLAN membership can only be modified via the
vmMembershipTable.
.. attribute:: vmmembershipsummaryentry
An entry (conceptual row) in the vmMembershipSummaryTable
**type**\: list of :py:class:`VmMembershipSummaryEntry <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable.VmMembershipSummaryEntry>`
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable, self).__init__()
self.yang_name = "vmMembershipSummaryTable"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vmMembershipSummaryEntry", ("vmmembershipsummaryentry", CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable.VmMembershipSummaryEntry))])
self._leafs = OrderedDict()
self.vmmembershipsummaryentry = YList(self)
self._segment_path = lambda: "vmMembershipSummaryTable"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable, [], name, value)
class VmMembershipSummaryEntry(Entity):
"""
An entry (conceptual row) in the
vmMembershipSummaryTable.
.. attribute:: vmmembershipsummaryvlanindex (key)
The VLAN id of the VLAN
**type**\: int
**range:** 0..4095
**config**\: False
.. attribute:: vmmembershipsummarymemberports
The set of the device's member ports that belong to the VLAN. Each octet within the value of this object specifies a set of eight ports, with the first octet specifying ports 1 through 8, the second octet specifying ports 9 through 16, etc. Within each octet, the most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port. Thus, each port of the VLAN is represented by a single bit within the value of this object. If that bit has a value of '1' then that port is included in the set of ports; the port is not included if its bit has a value of '0'. A port number is the value of dot1dBasePort for the port in the BRIDGE\-MIB (RFC 1493)
**type**\: str
**length:** 0..128
**config**\: False
**status**\: deprecated
.. attribute:: vmmembershipsummarymember2kports
The set of the device's member ports that belong to the VLAN. It has the VLAN membership information of up to 2048 ports with the port number from 1 to 2048. Each octet within the value of this object specifies a set of eight ports, with the first octet specifying ports 1 through 8, the second octet specifying ports 9 through 16, etc. Within each octet, the most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port. Thus, each port of the VLAN is represented by a single bit within the value of this object. If that bit has a value of '1' then that port is included in the set of ports; the port is not included if its bit has a value of '0'. A port number is the value of dot1dBasePort for the port in the BRIDGE\-MIB (RFC 1493)
**type**\: str
**length:** 0..256
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable.VmMembershipSummaryEntry, self).__init__()
self.yang_name = "vmMembershipSummaryEntry"
self.yang_parent_name = "vmMembershipSummaryTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vmmembershipsummaryvlanindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vmmembershipsummaryvlanindex', (YLeaf(YType.int32, 'vmMembershipSummaryVlanIndex'), ['int'])),
('vmmembershipsummarymemberports', (YLeaf(YType.str, 'vmMembershipSummaryMemberPorts'), ['str'])),
('vmmembershipsummarymember2kports', (YLeaf(YType.str, 'vmMembershipSummaryMember2kPorts'), ['str'])),
])
self.vmmembershipsummaryvlanindex = None
self.vmmembershipsummarymemberports = None
self.vmmembershipsummarymember2kports = None
self._segment_path = lambda: "vmMembershipSummaryEntry" + "[vmMembershipSummaryVlanIndex='" + str(self.vmmembershipsummaryvlanindex) + "']"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/vmMembershipSummaryTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable.VmMembershipSummaryEntry, ['vmmembershipsummaryvlanindex', 'vmmembershipsummarymemberports', 'vmmembershipsummarymember2kports'], name, value)
class VmMembershipTable(Entity):
"""
A table for configuring VLAN port membership.
There is one row for each bridge port that is
assigned to a static or dynamic access port. Trunk
ports are not represented in this table. An entry
may be created and deleted when ports are created or
deleted via SNMP or the management console on a
device.
.. attribute:: vmmembershipentry
An entry (conceptual row) in the vmMembershipTable
**type**\: list of :py:class:`VmMembershipEntry <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipTable.VmMembershipEntry>`
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmMembershipTable, self).__init__()
self.yang_name = "vmMembershipTable"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vmMembershipEntry", ("vmmembershipentry", CISCOVLANMEMBERSHIPMIB.VmMembershipTable.VmMembershipEntry))])
self._leafs = OrderedDict()
self.vmmembershipentry = YList(self)
self._segment_path = lambda: "vmMembershipTable"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmMembershipTable, [], name, value)
class VmMembershipEntry(Entity):
"""
An entry (conceptual row) in the vmMembershipTable.
.. attribute:: ifindex (key)
**type**\: int
**range:** 1..2147483647
**refers to**\: :py:class:`ifindex <ydk.models.cisco_ios_xe.IF_MIB.IFMIB.IfTable.IfEntry>`
**config**\: False
.. attribute:: vmvlantype
The type of VLAN membership assigned to this port. A port with static vlan membership is assigned to a single VLAN directly. A port with dynamic membership is assigned a single VLAN based on content of packets received on the port and via VQP queries to VMPS. A port with multiVlan membership may be assigned to one or more VLANs directly. A static or dynamic port membership is specified by the value of vmVlan. A multiVlan port membership is specified by the value of vmVlans
**type**\: :py:class:`VmVlanType <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipTable.VmMembershipEntry.VmVlanType>`
**config**\: False
.. attribute:: vmvlan
The VLAN id of the VLAN the port is assigned to when vmVlanType is set to static or dynamic. This object is not instantiated if not applicable. The value may be 0 if the port is not assigned to a VLAN. If vmVlanType is static, the port is always assigned to a VLAN and the object may not be set to 0. If vmVlanType is dynamic the object's value is 0 if the port is currently not assigned to a VLAN. In addition, the object may be set to 0 only
**type**\: int
**range:** 0..4095
**config**\: False
.. attribute:: vmportstatus
An indication of the current VLAN status of the port. A status of inactive(1) indicates that a dynamic port does not yet have a VLAN assigned, or a port is assigned to a VLAN that is currently not active. A status of active(2) indicates that the currently assigned VLAN is active. A status of shutdown(3) indicates that the port has been disabled as a result of VQP shutdown response
**type**\: :py:class:`VmPortStatus <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipTable.VmMembershipEntry.VmPortStatus>`
**config**\: False
.. attribute:: vmvlans
The VLAN(s) the port is assigned to when the port's vmVlanType is set to multiVlan. This object is not instantiated if not applicable. The port is always assigned to one or more VLANs and the object may not be set so that there are no vlans assigned. Each octet within the value of this object specifies a set of eight VLANs, with the first octet specifying VLAN id 1 through 8, the second octet specifying VLAN ids 9 through 16, etc. Within each octet, the most significant bit represents the lowest numbered VLAN id, and the least significant bit represents the highest numbered VLAN id. Thus, each VLAN of the port is represented by a single bit within the value of this object. If that bit has a value of '1' then that VLAN is included in the set of VLANs; the VLAN is not included if its bit has a value of '0'
**type**\: str
**length:** 0..128
**config**\: False
.. attribute:: vmvlans2k
The VLAN(s) the port is assigned to when the port's vmVlanType is set to multiVlan. This object is not instantiated if not applicable. The port is always assigned to one or more VLANs and the object may not be set so that there are no vlans assigned. Each octet within the value of this object specifies a set of eight VLANs, with the first octet specifying VLAN id 1024 through 1031, the second octet specifying VLAN ids 1032 through 1039, etc. Within each octet, the most significant bit represents the lowest numbered VLAN id, and the least significant bit represents the highest numbered VLAN id. Thus, each VLAN of the port is represented by a single bit within the value of this object. If that bit has a value of '1' then that VLAN is included in the set of VLANs; the VLAN is not included if its bit has a value of '0'
**type**\: str
**length:** 0..128
**config**\: False
.. attribute:: vmvlans3k
The VLAN(s) the port is assigned to when the port's vmVlanType is set to multiVlan. This object is not instantiated if not applicable. The port is always assigned to one or more VLANs and the object may not be set so that there are no vlans assigned. Each octet within the value of this object specifies a set of eight VLANs, with the first octet specifying VLAN id 2048 through 2055, the second octet specifying VLAN ids 2056 through 2063, etc. Within each octet, the most significant bit represents the lowest numbered VLAN id, and the least significant bit represents the highest numbered VLAN id. Thus, each VLAN of the port is represented by a single bit within the value of this object. If that bit has a value of '1' then that VLAN is included in the set of VLANs; the VLAN is not included if its bit has a value of '0'
**type**\: str
**length:** 0..128
**config**\: False
.. attribute:: vmvlans4k
The VLAN(s) the port is assigned to when the port's vmVlanType is set to multiVlan. This object is not instantiated if not applicable. The port is always assigned to one or more VLANs and the object may not be set so that there are no vlans assigned. Each octet within the value of this object specifies a set of eight VLANs, with the first octet specifying VLAN id 3072 through 3079, the second octet specifying VLAN ids 3040 through 3047, etc. Within each octet, the most significant bit represents the lowest numbered VLAN id, and the least significant bit represents the highest numbered VLAN id. Thus, each VLAN of the port is represented by a single bit within the value of this object. If that bit has a value of '1' then that VLAN is included in the set of VLANs; the VLAN is not included if its bit has a value of '0'
**type**\: str
**length:** 0..128
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmMembershipTable.VmMembershipEntry, self).__init__()
self.yang_name = "vmMembershipEntry"
self.yang_parent_name = "vmMembershipTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['ifindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ifindex', (YLeaf(YType.str, 'ifIndex'), ['int'])),
('vmvlantype', (YLeaf(YType.enumeration, 'vmVlanType'), [('ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB', 'CISCOVLANMEMBERSHIPMIB', 'VmMembershipTable.VmMembershipEntry.VmVlanType')])),
('vmvlan', (YLeaf(YType.int32, 'vmVlan'), ['int'])),
('vmportstatus', (YLeaf(YType.enumeration, 'vmPortStatus'), [('ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB', 'CISCOVLANMEMBERSHIPMIB', 'VmMembershipTable.VmMembershipEntry.VmPortStatus')])),
('vmvlans', (YLeaf(YType.str, 'vmVlans'), ['str'])),
('vmvlans2k', (YLeaf(YType.str, 'vmVlans2k'), ['str'])),
('vmvlans3k', (YLeaf(YType.str, 'vmVlans3k'), ['str'])),
('vmvlans4k', (YLeaf(YType.str, 'vmVlans4k'), ['str'])),
])
self.ifindex = None
self.vmvlantype = None
self.vmvlan = None
self.vmportstatus = None
self.vmvlans = None
self.vmvlans2k = None
self.vmvlans3k = None
self.vmvlans4k = None
self._segment_path = lambda: "vmMembershipEntry" + "[ifIndex='" + str(self.ifindex) + "']"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/vmMembershipTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmMembershipTable.VmMembershipEntry, ['ifindex', 'vmvlantype', 'vmvlan', 'vmportstatus', 'vmvlans', 'vmvlans2k', 'vmvlans3k', 'vmvlans4k'], name, value)
class VmPortStatus(Enum):
"""
VmPortStatus (Enum Class)
An indication of the current VLAN status of the port.
A status of inactive(1) indicates that a dynamic port
does not yet have a VLAN assigned, or a port is
assigned to a VLAN that is currently not active. A
status of active(2) indicates that the currently
assigned VLAN is active. A status of shutdown(3)
indicates that the port has been disabled as a result
of VQP shutdown response.
.. data:: inactive = 1
.. data:: active = 2
.. data:: shutdown = 3
"""
inactive = Enum.YLeaf(1, "inactive")
active = Enum.YLeaf(2, "active")
shutdown = Enum.YLeaf(3, "shutdown")
class VmVlanType(Enum):
"""
VmVlanType (Enum Class)
The type of VLAN membership assigned to this port.
A port with static vlan membership is assigned to a
single VLAN directly. A port with dynamic membership
is assigned a single VLAN based on content of packets
received on the port and via VQP queries to VMPS.
A port with multiVlan membership may be assigned to
one or more VLANs directly.
A static or dynamic port membership is specified
by the value of vmVlan. A multiVlan port membership is
specified by the value of vmVlans.
.. data:: static = 1
.. data:: dynamic = 2
.. data:: multiVlan = 3
"""
static = Enum.YLeaf(1, "static")
dynamic = Enum.YLeaf(2, "dynamic")
multiVlan = Enum.YLeaf(3, "multiVlan")
class VmMembershipSummaryExtTable(Entity):
"""
A summary of VLAN membership of non\-trunk
bridge ports. This table is used for
retrieving VLAN membership information
for the device which supports dot1dBasePort
with value greater than 2048.
A row is created for a VLAN and a particular
bridge port range, where at least one port
in the range is assigned to this VLAN.
VLAN membership can only be modified via the
vmMembershipTable.
.. attribute:: vmmembershipsummaryextentry
An entry (conceptual row) in the vmMembershipSummaryExtTable
**type**\: list of :py:class:`VmMembershipSummaryExtEntry <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable.VmMembershipSummaryExtEntry>`
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable, self).__init__()
self.yang_name = "vmMembershipSummaryExtTable"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vmMembershipSummaryExtEntry", ("vmmembershipsummaryextentry", CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable.VmMembershipSummaryExtEntry))])
self._leafs = OrderedDict()
self.vmmembershipsummaryextentry = YList(self)
self._segment_path = lambda: "vmMembershipSummaryExtTable"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable, [], name, value)
class VmMembershipSummaryExtEntry(Entity):
"""
An entry (conceptual row) in the
vmMembershipSummaryExtTable.
.. attribute:: vmmembershipsummaryvlanindex (key)
**type**\: int
**range:** 0..4095
**refers to**\: :py:class:`vmmembershipsummaryvlanindex <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryTable.VmMembershipSummaryEntry>`
**config**\: False
.. attribute:: vmmembershipportrangeindex (key)
The bridge port range index of this row
**type**\: :py:class:`CiscoPortListRange <ydk.models.cisco_ios_xe.CISCO_TC.CiscoPortListRange>`
**config**\: False
.. attribute:: vmmembershipsummaryextports
The set of the device's member ports that belong to the VLAN. It has the VLAN membership information of up to 2k ports with the port number starting from the information indicated in vmMembershipPortRangeIndex object of the same row. For example, if the value of vmMembershipPortRangeIndex is 'twoKto4K', the port number indicated in this object starting from 2049 and ending to 4096. A port number is the value of dot1dBasePort for the port in the BRIDGE\-MIB (RFC 1493)
**type**\: str
**length:** 0..256
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable.VmMembershipSummaryExtEntry, self).__init__()
self.yang_name = "vmMembershipSummaryExtEntry"
self.yang_parent_name = "vmMembershipSummaryExtTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vmmembershipsummaryvlanindex','vmmembershipportrangeindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vmmembershipsummaryvlanindex', (YLeaf(YType.str, 'vmMembershipSummaryVlanIndex'), ['int'])),
('vmmembershipportrangeindex', (YLeaf(YType.enumeration, 'vmMembershipPortRangeIndex'), [('ydk.models.cisco_ios_xe.CISCO_TC', 'CiscoPortListRange', '')])),
('vmmembershipsummaryextports', (YLeaf(YType.str, 'vmMembershipSummaryExtPorts'), ['str'])),
])
self.vmmembershipsummaryvlanindex = None
self.vmmembershipportrangeindex = None
self.vmmembershipsummaryextports = None
self._segment_path = lambda: "vmMembershipSummaryExtEntry" + "[vmMembershipSummaryVlanIndex='" + str(self.vmmembershipsummaryvlanindex) + "']" + "[vmMembershipPortRangeIndex='" + str(self.vmmembershipportrangeindex) + "']"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/vmMembershipSummaryExtTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmMembershipSummaryExtTable.VmMembershipSummaryExtEntry, ['vmmembershipsummaryvlanindex', 'vmmembershipportrangeindex', 'vmmembershipsummaryextports'], name, value)
class VmVoiceVlanTable(Entity):
"""
A table for configuring the Voice VLAN\-ID
for the ports. An entry will exist for each
interface which supports Voice Vlan feature.
.. attribute:: vmvoicevlanentry
An entry (conceptual row) in the vmVoiceVlanTable. Only interfaces which support Voice Vlan feature are shown
**type**\: list of :py:class:`VmVoiceVlanEntry <ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB.CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable.VmVoiceVlanEntry>`
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable, self).__init__()
self.yang_name = "vmVoiceVlanTable"
self.yang_parent_name = "CISCO-VLAN-MEMBERSHIP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vmVoiceVlanEntry", ("vmvoicevlanentry", CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable.VmVoiceVlanEntry))])
self._leafs = OrderedDict()
self.vmvoicevlanentry = YList(self)
self._segment_path = lambda: "vmVoiceVlanTable"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable, [], name, value)
class VmVoiceVlanEntry(Entity):
"""
An entry (conceptual row) in the vmVoiceVlanTable.
Only interfaces which support Voice Vlan feature
are shown.
.. attribute:: ifindex (key)
**type**\: int
**range:** 1..2147483647
**refers to**\: :py:class:`ifindex <ydk.models.cisco_ios_xe.IF_MIB.IFMIB.IfTable.IfEntry>`
**config**\: False
.. attribute:: vmvoicevlanid
The Voice Vlan ID (VVID) to which this port belongs to. 0 \- The CDP packets transmitting through this port would contain Appliance VLAN\-ID TLV with value of 0. VoIP and related packets are expected to be sent and received with VLAN\-id=0 and an 802.1p priority. 1..4094 \- The CDP packets transmitting through this port would contain Appliance VLAN\-ID TLV with N. VoIP and related packets are expected to be sent and received with VLAN\-id=N and an 802.1p priority. 4095 \- The CDP packets transmitting through this port would contain Appliance VLAN\-ID TLV with value of 4095. VoIP and related packets are expected to be sent and received untagged without an 802.1p priority. 4096 \- The CDP packets transmitting through this port would not include Appliance VLAN\-ID TLV; or, if the VVID is not supported on the port, this MIB object will not be configurable and will return 4096
**type**\: int
**range:** 0..4096
**config**\: False
.. attribute:: vmvoicevlancdpverifyenable
Enable or Disable the feature of CDP message verification of voice VLANs. true \- The voice VLAN vmVoiceVlan is enabled only after CDP messages are received from the IP phone. false \- The voice VLAN vmVoiceVlan is enabled as soon as the IP phone interface is up. There is no verification needed from CDP messages from the IP phone
**type**\: bool
**config**\: False
"""
_prefix = 'CISCO-VLAN-MEMBERSHIP-MIB'
_revision = '2007-12-14'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable.VmVoiceVlanEntry, self).__init__()
self.yang_name = "vmVoiceVlanEntry"
self.yang_parent_name = "vmVoiceVlanTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['ifindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ifindex', (YLeaf(YType.str, 'ifIndex'), ['int'])),
('vmvoicevlanid', (YLeaf(YType.int32, 'vmVoiceVlanId'), ['int'])),
('vmvoicevlancdpverifyenable', (YLeaf(YType.boolean, 'vmVoiceVlanCdpVerifyEnable'), ['bool'])),
])
self.ifindex = None
self.vmvoicevlanid = None
self.vmvoicevlancdpverifyenable = None
self._segment_path = lambda: "vmVoiceVlanEntry" + "[ifIndex='" + str(self.ifindex) + "']"
self._absolute_path = lambda: "CISCO-VLAN-MEMBERSHIP-MIB:CISCO-VLAN-MEMBERSHIP-MIB/vmVoiceVlanTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOVLANMEMBERSHIPMIB.VmVoiceVlanTable.VmVoiceVlanEntry, ['ifindex', 'vmvoicevlanid', 'vmvoicevlancdpverifyenable'], name, value)
def clone_ptr(self):
self._top_entity = CISCOVLANMEMBERSHIPMIB()
return self._top_entity
|
PypiClean
|
/pyocd_hx-0.0.2.tar.gz/pyocd_hx-0.0.2/pyocd/target/builtin/target_MKL46Z256xxx4.py
|
from ..family.target_kinetis import Kinetis
from ..family.flash_kinetis import Flash_Kinetis
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4604b570, 0x4616460d, 0x49302000, 0x48306008, 0xf0004448, 0x2800f8e9, 0x2001d001, 0x2000bd70,
0x4601e7fc, 0x47702000, 0x492ab510, 0x44484828, 0xf8c2f000, 0x2c004604, 0x2100d105, 0x44484824,
0xf9bef000, 0xf0004604, 0x4620f838, 0xb570bd10, 0x481f4604, 0x4b1f4448, 0x68c24621, 0xf862f000,
0x2d004605, 0x481ad107, 0x23004448, 0x68c24621, 0xf956f000, 0xf0004605, 0x4628f820, 0xb5febd70,
0x460c4605, 0x46234616, 0x46294632, 0x44484810, 0xf90af000, 0x2f004607, 0x2201d10b, 0x46339001,
0x90029200, 0x46294622, 0x44484809, 0xf99af000, 0xf0004607, 0x4638f802, 0x4807bdfe, 0x210168c0,
0x43880289, 0x49041840, 0x477060c8, 0x40048100, 0x00000004, 0x6b65666b, 0xf0003000, 0x4a102070,
0x20807010, 0xbf007010, 0x7800480d, 0x280009c0, 0x480bd0fa, 0x20207801, 0x28004008, 0x2067d001,
0x20104770, 0x28004008, 0x2068d001, 0x07c8e7f8, 0x28000fc0, 0x2069d001, 0x2000e7f2, 0x0000e7f0,
0x40020000, 0xb081b5ff, 0x460d4604, 0xf0009804, 0x4606f89f, 0xd0022e00, 0xb0054630, 0x2304bdf0,
0x46204629, 0xf0009a03, 0x4606f876, 0xd0012e00, 0xe7f24630, 0x18289803, 0x46381e47, 0xf00068e1,
0x2900f983, 0x4638d009, 0xf00068e1, 0x1c40f97d, 0x68e19000, 0x43489800, 0xe0131e47, 0x4478480c,
0x60056800, 0x490b2009, 0xf7ff71c8, 0x4606ffa7, 0x280069a0, 0x69a0d001, 0x2e004780, 0xe003d000,
0x194568e0, 0xd9e942bd, 0x4630bf00, 0x0000e7c5, 0x00000462, 0x40020000, 0x4604b570, 0x4628460d,
0xf856f000, 0x2e004606, 0x4630d001, 0x2c00bd70, 0x2004d101, 0x2044e7fa, 0x71c84902, 0xff7ef7ff,
0x0000e7f4, 0x40020000, 0x29004601, 0x2004d101, 0x482a4770, 0x010068c0, 0x00400f00, 0x447b4b28,
0x03025a18, 0xd1012a00, 0xe7f12064, 0x60082000, 0x2001604a, 0x02806088, 0x200060c8, 0x61486108,
0xbf006188, 0x4602e7e4, 0xd1012a00, 0x47702004, 0x20006191, 0xb530e7fb, 0x2c004604, 0x2004d101,
0x1e58bd30, 0x28004008, 0x1e58d103, 0x28004010, 0x2065d001, 0x6820e7f4, 0xd8054288, 0x68206865,
0x188d1940, 0xd20142a8, 0xe7e92066, 0xe7e72000, 0x480c4601, 0xd0014281, 0x4770206b, 0xe7fc2000,
0x2b004603, 0x2004d101, 0x290f4770, 0x2a04d801, 0x2004d001, 0x2000e7f8, 0x0000e7f6, 0x40048040,
0x000003c0, 0x6b65666b, 0xb081b5ff, 0x46144607, 0x2c00461d, 0x2004d102, 0xbdf0b005, 0x462a2304,
0x99024638, 0xffb7f7ff, 0x2e004606, 0x4630d001, 0xe01ce7f2, 0x44794910, 0x68099802, 0xcc016008,
0x4479490d, 0x6809390c, 0x20066048, 0x71c8490b, 0xfef4f7ff, 0x69b84606, 0xd0012800, 0x478069b8,
0xd0002e00, 0x9802e005, 0x90021d00, 0x2d001f2d, 0xbf00d1e0, 0xe7cf4630, 0x0000030a, 0x40020000,
0xb083b5ff, 0x2304460c, 0x9a054621, 0xf7ff9803, 0x9002ff82, 0x28009802, 0x9802d002, 0xbdf0b007,
0x68919a03, 0xf0006850, 0x4605f88f, 0x42684261, 0x424e4001, 0xd10042a6, 0x9f051976, 0x1b30e027,
0x98019001, 0xd90042b8, 0x98019701, 0x90000880, 0x44784811, 0x60046800, 0x49102001, 0x980071c8,
0x0e010400, 0x72c1480d, 0x9800490c, 0x98067288, 0xf7ff7248, 0x9002fea3, 0x28009802, 0x9802d001,
0x9801e7cc, 0x98011a3f, 0x19761824, 0x2f00bf00, 0x2000d1d5, 0x0000e7c2, 0x0000026e, 0x40020000,
0x4604b570, 0x2c00460d, 0x2004d101, 0x2040bd70, 0x71c84903, 0x71854608, 0xfe80f7ff, 0x0000e7f6,
0x40020000, 0xb081b5ff, 0x4617460c, 0x2d00461d, 0x2004d102, 0xbdf0b005, 0x463a2304, 0x98014621,
0xff19f7ff, 0x2e004606, 0x4630d001, 0xe022e7f2, 0x44784813, 0x60046800, 0x49122002, 0x980a71c8,
0x490f72c8, 0x39124479, 0x68096828, 0xf7ff6088, 0x4606fe55, 0xd00b2e00, 0x2800980b, 0x980bd001,
0x980c6004, 0xd0022800, 0x980c2100, 0xe0046001, 0x1d2d1f3f, 0x2f001d24, 0xbf00d1da, 0xe7c94630,
0x000001ce, 0x40020000, 0x09032200, 0xd32c428b, 0x428b0a03, 0x2300d311, 0xe04e469c, 0x430b4603,
0x2200d43c, 0x428b0843, 0x0903d331, 0xd31c428b, 0x428b0a03, 0x4694d301, 0x09c3e03f, 0xd301428b,
0x1ac001cb, 0x09834152, 0xd301428b, 0x1ac0018b, 0x09434152, 0xd301428b, 0x1ac0014b, 0x09034152,
0xd301428b, 0x1ac0010b, 0x08c34152, 0xd301428b, 0x1ac000cb, 0x08834152, 0xd301428b, 0x1ac0008b,
0x08434152, 0xd301428b, 0x1ac0004b, 0x1a414152, 0x4601d200, 0x46104152, 0xe05d4770, 0xd0000fca,
0x10034249, 0x4240d300, 0x22004053, 0x0903469c, 0xd32d428b, 0x428b0a03, 0x22fcd312, 0xba120189,
0x428b0a03, 0x0189d30c, 0x428b1192, 0x0189d308, 0x428b1192, 0x0189d304, 0x1192d03a, 0x0989e000,
0x428b09c3, 0x01cbd301, 0x41521ac0, 0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301,
0x41521ac0, 0x428b0903, 0x010bd301, 0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883,
0x008bd301, 0x41521ac0, 0x0843d2d9, 0xd301428b, 0x1ac0004b, 0x1a414152, 0x4601d200, 0x41524663,
0x4610105b, 0x4240d301, 0xd5002b00, 0x47704249, 0x105b4663, 0x4240d300, 0x2000b501, 0x46c046c0,
0x0002bd02, 0x00000004, 0x00000008, 0x00000010, 0x00000020, 0x00000040, 0x00000000, 0x00000000,
0x00000020, 0x40020004, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_eraseAll' : 0x20000049,
'pc_erase_sector' : 0x2000006F,
'pc_program_page' : 0x2000009F,
'begin_stack' : 0x20000800,
'begin_data' : 0x20000800, # Analyzer uses a max of 1 KB data (256 pages * 4 bytes / page)
# Note: 128 pages on KL25 and KL26, 256 pages on KL46
'static_base' : 0x20000000 + 0x20 + 0x5E8,
'min_program_length' : 4,
'page_buffers' : [0x20000800, 0x20000c00], # Enable double buffering
'analyzer_supported' : True,
'analyzer_address' : 0x1ffff000 # Analyzer 0x1ffff000..0x1ffff600
}
class KL46Z(Kinetis):
MEMORY_MAP = MemoryMap(
FlashRegion( start=0, length=0x40000, blocksize=0x400, is_boot_memory=True,
algo=FLASH_ALGO, flash_class=Flash_Kinetis),
RamRegion( start=0x1fffe000, length=0x8000)
)
def __init__(self, session):
super(KL46Z, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("MKL46Z4.svd")
|
PypiClean
|
/azure_percept-0.0.13-cp38-cp38-manylinux_2_24_aarch64.whl/azure/iot/percept/extensions/middle/StridedSliceNormalizer.py
|
import numpy as np
from extensions.ops.split import VariadicSplit
from mo.front.common.partial_infer.utils import int64_array
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph, Node
from mo.graph.perm_inputs import PermuteInputs
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.concat import Concat
from mo.ops.const import Const
from mo.ops.op import PermuteAttrs
from mo.ops.strided_slice import StridedSlice
from mo.utils.error import Error
class StridedSliceNormalizer(MiddleReplacementPattern):
r"""
StridedSlice is not normal if it cannot be permuted by ApplyPermutations. This normalizer
inserts blank colons ':' in slice expression so that it can be correctly permuted
from NHWC to NCHW layout. It changes masks and inserts blank begin, end and strides values.
In order to successfully handle StridedSlice in ShapeOf subgraphs
changes must be done by inserting nodes not just by overwriting constants.
StridedSlice is not normal in 2 cases:
1. rank of a slice expression is less than rank of input tensor
2. there is an ellipsis
1st case example
BEFORE:
|
begin
value=[0, 0]
|
AFTER:
|
begin Const
value=[0, 0] value=[0, 0]
\ /
\ /
Concat
value=[0, 0, 0, 0]
|
Input of a shape [16, 100, 100, 3] in NHWC layout, output = input[:, 0:50].
StridedSlice will be extended to input[:, 0:50, :, :].
After permutation to NCHW output = input[:, :, 0:50, :].
Example for 'begin' input transformation is shown above on the picture.
'end' and 'strides' inputs will be transformed the same way.
2nd case example
BEFORE:
|
begin
value=[1, 50]
|
AFTER:
|
begin
value=[1, 1, 1]
|
VariadicSplit
/ \
/ \
/ Const \
\ val=[0, 0] /
\ | /
\ | /
Concat
value=[1, 0, 0, 1, 1]
|
Input of a shape [16, 10, 100, 100, 3] in NDHWC layout, output = input[1:4, ..., 1:51, 1:3],
output_shape = [3, 10, 100, 50, 2]. In order to perform correct layout permutation
ellipsis must be replaced with colons: input[1:4, ..., 1:51, 1:3] => input[1:4, :, :, 1:51, 1:3].
After layour permutation input[1:4, 1:3, :, : 1:5].
In the places of colons blank begin, end and strides values should be inserted.
In order to do that we split input and insert blank zeros to the middle.
Example for 'begin' input transformation is shown above on the picture.
'end' and 'strides' inputs will be transformed the same way.
"""
enabled = True
def run_before(self):
from extensions.middle.LayoutChangeForConstantShapePaths import LayoutChangeForConstantShapePaths
return [LayoutChangeForConstantShapePaths]
def run_after(self):
from extensions.middle.SliceConverter import ConvertSlice
return [ConvertSlice]
def find_and_replace_pattern(self, graph: Graph):
for node in graph.get_op_nodes(type='StridedSlice'):
StridedSliceNormalizer.normalize_strided_slice(graph, node)
PermuteAttrs.create_permute_attrs(node,
attrs=[('begin_mask', 'input:0'), # but indeed depends from slice_rank
('end_mask', 'input:0'),
('new_axis_mask', 'input:0'),
('shrink_axis_mask', 'input:0'),
('ellipsis_mask', 'input:0')])
# StridedSliceNormalizer inserted nodes that changed original begin, end, and strides data nodes
# Until now it was not possible to set correct permutations
PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:1', 'slice', 'dim_size')
PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:2', 'slice', 'dim_size')
if node.is_in_port_connected(3):
PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:3', 'slice', 'dim_size')
@staticmethod
def normalize_strided_slice(graph: Graph, node: Node):
input_shape = node.in_port(0).data.get_shape()
input_rank = len(input_shape)
begin, _, _ = StridedSlice.validate_inputs_and_get_args(node)
slice_rank = len(begin)
StridedSlice.align_mask_with_slice_rank(node, slice_rank) # if StridedSlice is created after partial_infer
StridedSliceNormalizer.normalize_slices_attr(node)
num_insertions = input_rank - slice_rank + np.count_nonzero(node.new_axis_mask)
assert num_insertions >= 0, 'slice_rank - num_new_axis must <= input rank. Got instead: ' \
'input_rank = {}, slice_rank = {}, num_new_axis = {}'. \
format(input_rank, slice_rank, np.count_nonzero(node.new_axis_mask))
if np.any(node.ellipsis_mask):
assert np.count_nonzero(node.ellipsis_mask) == 1, 'only one ellipsis_mask nonzero value is allowed'
ellipsis_start = np.nonzero(node.ellipsis_mask)[0][0]
# since we don't expect values in begin and end: take the whole range along ellipsis_start
node.begin_mask[ellipsis_start] = 0
node.end_mask[ellipsis_start] = 0
node.ellipsis_mask[ellipsis_start] = 0
insertion_start_idx = ellipsis_start + 1
StridedSliceNormalizer.unroll_ellipsis_for_inputs(graph, node, ellipsis_start, num_insertions)
elif num_insertions > 0:
insertion_start_idx = slice_rank # insert blank values to mask ends
StridedSliceNormalizer.extend_inputs(node, num_insertions)
if num_insertions > 0:
# insert blank values for ellipsis unrolling and extending
for mask_name in StridedSlice.get_mask_names():
node[mask_name] = np.insert(node[mask_name], insertion_start_idx, [0] * num_insertions).astype(int)
@staticmethod
def unroll_ellipsis_for_inputs(graph: Graph, node: Node, ellipsis_start: int, num_insertions: int):
node_name = node.soft_get('name', node.id)
for i, input_name in [(1, 'begin'), (2, 'end'), (3, 'strides')]:
if i == 3 and not node.is_in_port_connected(3):
continue # no need to extend strides if they are not connected
blank_values_arr = np.zeros(num_insertions) if input_name != 'strides' else np.ones(num_insertions)
blank_values_node = Const(graph, {'name': node_name + '/const_to_unroll_{}_ellipsis'.format(input_name),
'value': int64_array(blank_values_arr)}).create_node()
concat_in_ports_count = 3 if ellipsis_start != 0 else 2
concat = Concat(graph, {'axis': 0, 'name': node_name + '/concat_{}'.format(input_name),
'in_ports_count': concat_in_ports_count}).create_node()
if ellipsis_start != 0:
split = create_op_with_const_inputs(graph, VariadicSplit, {1: int64_array(0),
2: int64_array([ellipsis_start, -1])},
{'name': node_name + '/split_for_{}_ellipsis'.format(input_name),
'out_ports_count': 2})
node.in_port(i).get_connection().set_destination(split.in_port(0))
concat.in_port(0).connect(split.out_port(0))
concat.in_port(1).connect(blank_values_node.out_port(0))
concat.in_port(2).connect(split.out_port(1))
else:
concat.in_port(0).connect(blank_values_node.out_port(0))
node.in_port(i).get_connection().set_destination(concat.in_port(1))
concat.out_port(0).get_connection().set_destination(node.in_port(i))
@staticmethod
def extend_inputs(node: Node, num_insertions: int):
graph = node.graph
node_name = node.soft_get('name', node.id)
for i, input_name in [(1, 'begin'), (2, 'end'), (3, 'strides')]:
if i == 3 and not node.is_in_port_connected(3):
continue # no need to extend strides if they are not connected
blank_values_arr = np.zeros(num_insertions) if input_name != 'strides' else np.ones(num_insertions)
blank_values_node = Const(graph, {'name': node_name + '/extend_{}_const'.format(input_name),
'value': int64_array(blank_values_arr)}).create_node()
if node.in_port(i).get_source().node.soft_get('type') == 'Concat':
# concat already exists
concat = node.in_port(i).get_source().node
last_in_port = max(concat.in_ports().keys())
assert not concat.in_port(last_in_port).disconnected(), 'The last in_port of Concat node {}' \
'should be connected'. \
format(concat.soft_get('name', node.id))
concat.add_input_port(last_in_port + 1)
concat.in_port(last_in_port + 1).connect(blank_values_node.out_port(0))
else:
# have to create concat
concat = Concat(graph, {'axis': 0, 'name': node_name + '/concat_{}'.format(input_name),
'in_ports_count': 2}).create_node()
node.in_port(i).get_connection().set_destination(concat.in_port(0))
concat.in_port(1).connect(blank_values_node.out_port(0))
concat.out_port(0).get_connection().set_destination(node.in_port(i))
@staticmethod
def normalize_slices_attr(node: Node):
# removes negative starts, ends and magic numbers from 'slice' attr which is used by ConvertGroupedStridedSlice
slice_rank = len(node['slices'])
data_shape = node.in_port(0).data.get_shape()
node_name = node.soft_get('name', node.id)
if node.is_in_port_connected(3):
strides = node.in_port(3).data.get_value()
if strides is None:
raise Error('StridedSlice operation for node {} supports only constant strides input'.format(node_name))
else:
strides = np.ones(len(node['slices']), dtype=np.int32)
num_ellipsis_inserts = len(data_shape) - slice_rank + np.count_nonzero(node.new_axis_mask) + 1
res_slices = []
in_idx = 0
for i, s in enumerate(node['slices']):
if node.new_axis_mask[i]:
res_slices.append(slice(0, 1, 1))
elif node.shrink_axis_mask[i]:
res_slices.append(slice(s, s + 1, strides[i])) # need strides if shrink index is negative
elif node.ellipsis_mask[i]:
for idx in range(num_ellipsis_inserts):
res_slices.append(slice(0, data_shape[in_idx], 1))
in_idx += 1
else:
res_slices.append(s)
if not (node.new_axis_mask[i] or node.ellipsis_mask[i]):
res_slices[-1] = slice(*res_slices[-1].indices(data_shape[in_idx])) # convert negative begins/ends
in_idx += 1
node.slices = np.array(res_slices)
|
PypiClean
|
/whisper_autosrt-0.1.10.tar.gz/whisper_autosrt-0.1.10/whisper_autosrt/__init__.py
|
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import audioop
import math
import multiprocessing
import os
import subprocess
import sys
import tempfile
import wave
import json
import requests
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from progressbar import ProgressBar, Percentage, Bar, ETA
import pysrt
import six
# ADDITIONAL IMPORT
from glob import glob, escape
import shlex
import time
from datetime import datetime, timedelta
from pathlib import Path
import warnings
warnings.filterwarnings("ignore", message=".*The 'nopython' keyword.*")
from faster_whisper import WhisperModel
import ctypes
import shutil
VERSION = "0.1.10"
#marker='█'
class WhisperLanguage:
def __init__(self):
self.list_codes = []
self.list_codes.append("auto")
self.list_codes.append("af")
self.list_codes.append("sq")
self.list_codes.append("am")
self.list_codes.append("ar")
self.list_codes.append("hy")
self.list_codes.append("as")
self.list_codes.append("az")
self.list_codes.append("ba")
self.list_codes.append("eu")
self.list_codes.append("be")
self.list_codes.append("bn")
self.list_codes.append("bs")
self.list_codes.append("br")
self.list_codes.append("bg")
self.list_codes.append("ca")
self.list_codes.append("zh")
self.list_codes.append("hr")
self.list_codes.append("cs")
self.list_codes.append("da")
self.list_codes.append("nl")
self.list_codes.append("en")
self.list_codes.append("et")
self.list_codes.append("fo")
self.list_codes.append("fi")
self.list_codes.append("fr")
self.list_codes.append("gl")
self.list_codes.append("ka")
self.list_codes.append("de")
self.list_codes.append("el")
self.list_codes.append("gu")
self.list_codes.append("ht")
self.list_codes.append("ha")
self.list_codes.append("haw")
self.list_codes.append("he")
self.list_codes.append("hi")
self.list_codes.append("hu")
self.list_codes.append("is")
self.list_codes.append("id")
self.list_codes.append("it")
self.list_codes.append("ja")
self.list_codes.append("jv")
self.list_codes.append("kn")
self.list_codes.append("kk")
self.list_codes.append("km")
self.list_codes.append("ko")
self.list_codes.append("lo")
self.list_codes.append("la")
self.list_codes.append("lv")
self.list_codes.append("ln")
self.list_codes.append("lt")
self.list_codes.append("lb")
self.list_codes.append("mk")
self.list_codes.append("mg")
self.list_codes.append("ms")
self.list_codes.append("ml")
self.list_codes.append("mt")
self.list_codes.append("mi")
self.list_codes.append("mr")
self.list_codes.append("mn")
self.list_codes.append("my")
self.list_codes.append("ne")
self.list_codes.append("no")
self.list_codes.append("nn")
self.list_codes.append("oc")
self.list_codes.append("ps")
self.list_codes.append("fa")
self.list_codes.append("pl")
self.list_codes.append("pt")
self.list_codes.append("pa")
self.list_codes.append("ro")
self.list_codes.append("ru")
self.list_codes.append("sa")
self.list_codes.append("sr")
self.list_codes.append("sn")
self.list_codes.append("sd")
self.list_codes.append("si")
self.list_codes.append("sk")
self.list_codes.append("sl")
self.list_codes.append("so")
self.list_codes.append("es")
self.list_codes.append("su")
self.list_codes.append("sw")
self.list_codes.append("sv")
self.list_codes.append("tl")
self.list_codes.append("tg")
self.list_codes.append("ta")
self.list_codes.append("tt")
self.list_codes.append("te")
self.list_codes.append("th")
self.list_codes.append("bo")
self.list_codes.append("tr")
self.list_codes.append("tk")
self.list_codes.append("uk")
self.list_codes.append("ur")
self.list_codes.append("uz")
self.list_codes.append("vi")
self.list_codes.append("cy")
self.list_codes.append("yi")
self.list_codes.append("yo")
self.list_names = []
self.list_names.append("Auto Detect")
self.list_names.append("Afrikaans")
self.list_names.append("Albanian")
self.list_names.append("Amharic")
self.list_names.append("Arabic")
self.list_names.append("Armenian")
self.list_names.append("Assamese")
self.list_names.append("Azerbaijani")
self.list_names.append("Bashkir")
self.list_names.append("Basque")
self.list_names.append("Belarusian")
self.list_names.append("Bengali")
self.list_names.append("Bosnian")
self.list_names.append("Breton")
self.list_names.append("Bulgarian")
self.list_names.append("Catalan")
self.list_names.append("Chinese")
self.list_names.append("Croatian")
self.list_names.append("Czech")
self.list_names.append("Danish")
self.list_names.append("Dutch")
self.list_names.append("English")
self.list_names.append("Estonian")
self.list_names.append("Faroese")
self.list_names.append("Finnish")
self.list_names.append("French")
self.list_names.append("Galician")
self.list_names.append("Georgian")
self.list_names.append("German")
self.list_names.append("Greek")
self.list_names.append("Gujarati")
self.list_names.append("Haitian Creole")
self.list_names.append("Hausa")
self.list_names.append("Hawaiian")
self.list_names.append("Hebrew")
self.list_names.append("Hindi")
self.list_names.append("Hungarian")
self.list_names.append("Icelandic")
self.list_names.append("Indonesian")
self.list_names.append("Italian")
self.list_names.append("Japanese")
self.list_names.append("Javanese")
self.list_names.append("Kannada")
self.list_names.append("Kazakh")
self.list_names.append("Khmer")
self.list_names.append("Korean")
self.list_names.append("Lao")
self.list_names.append("Latin")
self.list_names.append("Latvian")
self.list_names.append("Lingala")
self.list_names.append("Lithuanian")
self.list_names.append("Luxembourgish")
self.list_names.append("Macedonian")
self.list_names.append("Malagasy")
self.list_names.append("Malay")
self.list_names.append("Malayalam")
self.list_names.append("Maltese")
self.list_names.append("Maori")
self.list_names.append("Marathi")
self.list_names.append("Mongolian")
self.list_names.append("Myanmar (Burmese)")
self.list_names.append("Nepali")
self.list_names.append("Norwegian")
self.list_names.append("Nynorsk")
self.list_names.append("Occitan")
self.list_names.append("Pashto")
self.list_names.append("Persian")
self.list_names.append("Polish")
self.list_names.append("Portuguese")
self.list_names.append("Punjabi")
self.list_names.append("Romanian")
self.list_names.append("Russian")
self.list_names.append("Sanskrit")
self.list_names.append("Serbian")
self.list_names.append("Shona")
self.list_names.append("Sindhi")
self.list_names.append("Sinhala")
self.list_names.append("Slovak")
self.list_names.append("Slovenian")
self.list_names.append("Somali")
self.list_names.append("Spanish")
self.list_names.append("Sundanese")
self.list_names.append("Swahili")
self.list_names.append("Swedish")
self.list_names.append("Tagalog")
self.list_names.append("Tajik")
self.list_names.append("Tamil")
self.list_names.append("Tatar")
self.list_names.append("Telugu")
self.list_names.append("Thai")
self.list_names.append("Tibetan")
self.list_names.append("Turkish")
self.list_names.append("Turkmen")
self.list_names.append("Ukrainian")
self.list_names.append("Urdu")
self.list_names.append("Uzbek")
self.list_names.append("Vietnamese")
self.list_names.append("Welsh")
self.list_names.append("Yiddish")
self.list_names.append("Yoruba")
self.code_of_name = dict(zip(self.list_names, self.list_codes))
self.name_of_code = dict(zip(self.list_codes, self.list_names))
self.dict = {
'auto': 'Auto Detect',
'af': 'Afrikaans',
'sq': 'Albanian',
'am': 'Amharic',
'ar': 'Arabic',
'hy': 'Armenian',
'as': 'Assamese',
'az': 'Azerbaijani',
'ba': 'Bashkir',
'eu': 'Basque',
'be': 'Belarusian',
'bn': 'Bengali',
'bs': 'Bosnian',
'br': 'Breton',
'bg': 'Bulgarian',
'ca': 'Catalan',
'zh': 'Chinese',
'hr': 'Croatian',
'cs': 'Czech',
'da': 'Danish',
'nl': 'Dutch',
'en': 'English',
'et': 'Estonian',
'fo': 'Faroese',
'fi': 'Finnish',
'fr': 'French',
'gl': 'Galician',
'ka': 'Georgian',
'de': 'German',
'el': 'Greek',
'gu': 'Gujarati',
'ht': 'Haitian Creole',
'ha': 'Hausa',
'haw': 'Hawaiian',
'he': 'Hebrew',
'hi': 'Hindi',
'hu': 'Hungarian',
'is': 'Icelandic',
'id': 'Indonesian',
'it': 'Italian',
'ja': 'Japanese',
'jv': 'Javanese',
'kn': 'Kannada',
'kk': 'Kazakh',
'km': 'Khmer',
'ko': 'Korean',
'lo': 'Lao',
'la': 'Latin',
'lv': 'Latvian',
'ln': 'Lingala',
'lt': 'Lithuanian',
'lb': 'Luxembourgish',
'mk': 'Macedonian',
'mg': 'Malagasy',
'ms': 'Malay',
'ml': 'Malayalam',
'mt': 'Maltese',
'mi': 'Maori',
'mr': 'Marathi',
'mn': 'Mongolian',
'my': 'Myanmar (Burmese)',
'ne': 'Nepali',
'no': 'Norwegian',
'nn': 'Nynorsk',
'oc': 'Occitan',
'ps': 'Pashto',
'fa': 'Persian',
'pl': 'Polish',
'pt': 'Portuguese',
'pa': 'Punjabi',
'ro': 'Romanian',
'ru': 'Russian',
'sa': 'Sanskrit',
'sr': 'Serbian',
'sn': 'Shona',
'sd': 'Sindhi',
'si': 'Sinhala',
'sk': 'Slovak',
'sl': 'Slovenian',
'so': 'Somali',
'es': 'Spanish',
'su': 'Sundanese',
'sw': 'Swahili',
'sv': 'Swedish',
'tl': 'Tagalog',
'tg': 'Tajik',
'ta': 'Tamil',
'tt': 'Tatar',
'te': 'Telugu',
'th': 'Thai',
'bo': 'Tibetan',
'tr': 'Turkish',
'tk': 'Turkmen',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
'vi': 'Vietnamese',
'cy': 'Welsh',
'yi': 'Yiddish',
'yo': 'Yoruba',
}
def get_name(self, get_code):
return self.dict.get(get_code.lower(), "")
def get_code(self, language):
for get_code, lang in self.dict.items():
if lang.lower() == language.lower():
return get_code
return ""
class GoogleLanguage:
def __init__(self):
self.list_codes = []
self.list_codes.append("af")
self.list_codes.append("sq")
self.list_codes.append("am")
self.list_codes.append("ar")
self.list_codes.append("hy")
self.list_codes.append("as")
self.list_codes.append("ay")
self.list_codes.append("az")
self.list_codes.append("bm")
self.list_codes.append("eu")
self.list_codes.append("be")
self.list_codes.append("bn")
self.list_codes.append("bho")
self.list_codes.append("bs")
self.list_codes.append("bg")
self.list_codes.append("ca")
self.list_codes.append("ceb")
self.list_codes.append("ny")
self.list_codes.append("zh")
self.list_codes.append("zh-CN")
self.list_codes.append("zh-TW")
self.list_codes.append("co")
self.list_codes.append("hr")
self.list_codes.append("cs")
self.list_codes.append("da")
self.list_codes.append("dv")
self.list_codes.append("doi")
self.list_codes.append("nl")
self.list_codes.append("en")
self.list_codes.append("eo")
self.list_codes.append("et")
self.list_codes.append("ee")
self.list_codes.append("fil")
self.list_codes.append("fi")
self.list_codes.append("fr")
self.list_codes.append("fy")
self.list_codes.append("gl")
self.list_codes.append("ka")
self.list_codes.append("de")
self.list_codes.append("el")
self.list_codes.append("gn")
self.list_codes.append("gu")
self.list_codes.append("ht")
self.list_codes.append("ha")
self.list_codes.append("haw")
self.list_codes.append("he")
self.list_codes.append("hi")
self.list_codes.append("hmn")
self.list_codes.append("hu")
self.list_codes.append("is")
self.list_codes.append("ig")
self.list_codes.append("ilo")
self.list_codes.append("id")
self.list_codes.append("ga")
self.list_codes.append("it")
self.list_codes.append("ja")
self.list_codes.append("jv")
self.list_codes.append("kn")
self.list_codes.append("kk")
self.list_codes.append("km")
self.list_codes.append("rw")
self.list_codes.append("gom")
self.list_codes.append("ko")
self.list_codes.append("kri")
self.list_codes.append("kmr")
self.list_codes.append("ckb")
self.list_codes.append("ky")
self.list_codes.append("lo")
self.list_codes.append("la")
self.list_codes.append("lv")
self.list_codes.append("ln")
self.list_codes.append("lt")
self.list_codes.append("lg")
self.list_codes.append("lb")
self.list_codes.append("mk")
self.list_codes.append("mg")
self.list_codes.append("ms")
self.list_codes.append("ml")
self.list_codes.append("mt")
self.list_codes.append("mi")
self.list_codes.append("mr")
self.list_codes.append("mni-Mtei")
self.list_codes.append("lus")
self.list_codes.append("mn")
self.list_codes.append("my")
self.list_codes.append("ne")
self.list_codes.append("no")
self.list_codes.append("or")
self.list_codes.append("om")
self.list_codes.append("ps")
self.list_codes.append("fa")
self.list_codes.append("pl")
self.list_codes.append("pt")
self.list_codes.append("pa")
self.list_codes.append("qu")
self.list_codes.append("ro")
self.list_codes.append("ru")
self.list_codes.append("sm")
self.list_codes.append("sa")
self.list_codes.append("gd")
self.list_codes.append("nso")
self.list_codes.append("sr")
self.list_codes.append("st")
self.list_codes.append("sn")
self.list_codes.append("sd")
self.list_codes.append("si")
self.list_codes.append("sk")
self.list_codes.append("sl")
self.list_codes.append("so")
self.list_codes.append("es")
self.list_codes.append("su")
self.list_codes.append("sw")
self.list_codes.append("sv")
self.list_codes.append("tg")
self.list_codes.append("ta")
self.list_codes.append("tt")
self.list_codes.append("te")
self.list_codes.append("th")
self.list_codes.append("ti")
self.list_codes.append("ts")
self.list_codes.append("tr")
self.list_codes.append("tk")
self.list_codes.append("tw")
self.list_codes.append("uk")
self.list_codes.append("ur")
self.list_codes.append("ug")
self.list_codes.append("uz")
self.list_codes.append("vi")
self.list_codes.append("cy")
self.list_codes.append("xh")
self.list_codes.append("yi")
self.list_codes.append("yo")
self.list_codes.append("zu")
self.list_names = []
self.list_names.append("Afrikaans")
self.list_names.append("Albanian")
self.list_names.append("Amharic")
self.list_names.append("Arabic")
self.list_names.append("Armenian")
self.list_names.append("Assamese")
self.list_names.append("Aymara")
self.list_names.append("Azerbaijani")
self.list_names.append("Bambara")
self.list_names.append("Basque")
self.list_names.append("Belarusian")
self.list_names.append("Bengali")
self.list_names.append("Bhojpuri")
self.list_names.append("Bosnian")
self.list_names.append("Bulgarian")
self.list_names.append("Catalan")
self.list_names.append("Cebuano")
self.list_names.append("Chichewa")
self.list_names.append("Chinese")
self.list_names.append("Chinese (Simplified)")
self.list_names.append("Chinese (Traditional)")
self.list_names.append("Corsican")
self.list_names.append("Croatian")
self.list_names.append("Czech")
self.list_names.append("Danish")
self.list_names.append("Dhivehi")
self.list_names.append("Dogri")
self.list_names.append("Dutch")
self.list_names.append("English")
self.list_names.append("Esperanto")
self.list_names.append("Estonian")
self.list_names.append("Ewe")
self.list_names.append("Filipino")
self.list_names.append("Finnish")
self.list_names.append("French")
self.list_names.append("Frisian")
self.list_names.append("Galician")
self.list_names.append("Georgian")
self.list_names.append("German")
self.list_names.append("Greek")
self.list_names.append("Guarani")
self.list_names.append("Gujarati")
self.list_names.append("Haitian Creole")
self.list_names.append("Hausa")
self.list_names.append("Hawaiian")
self.list_names.append("Hebrew")
self.list_names.append("Hindi")
self.list_names.append("Hmong")
self.list_names.append("Hungarian")
self.list_names.append("Icelandic")
self.list_names.append("Igbo")
self.list_names.append("Ilocano")
self.list_names.append("Indonesian")
self.list_names.append("Irish")
self.list_names.append("Italian")
self.list_names.append("Japanese")
self.list_names.append("Javanese")
self.list_names.append("Kannada")
self.list_names.append("Kazakh")
self.list_names.append("Khmer")
self.list_names.append("Kinyarwanda")
self.list_names.append("Konkani")
self.list_names.append("Korean")
self.list_names.append("Krio")
self.list_names.append("Kurdish (Kurmanji)")
self.list_names.append("Kurdish (Sorani)")
self.list_names.append("Kyrgyz")
self.list_names.append("Lao")
self.list_names.append("Latin")
self.list_names.append("Latvian")
self.list_names.append("Lingala")
self.list_names.append("Lithuanian")
self.list_names.append("Luganda")
self.list_names.append("Luxembourgish")
self.list_names.append("Macedonian")
self.list_names.append("Malagasy")
self.list_names.append("Malay")
self.list_names.append("Malayalam")
self.list_names.append("Maltese")
self.list_names.append("Maori")
self.list_names.append("Marathi")
self.list_names.append("Meiteilon (Manipuri)")
self.list_names.append("Mizo")
self.list_names.append("Mongolian")
self.list_names.append("Myanmar (Burmese)")
self.list_names.append("Nepali")
self.list_names.append("Norwegian")
self.list_names.append("Odiya (Oriya)")
self.list_names.append("Oromo")
self.list_names.append("Pashto")
self.list_names.append("Persian")
self.list_names.append("Polish")
self.list_names.append("Portuguese")
self.list_names.append("Punjabi")
self.list_names.append("Quechua")
self.list_names.append("Romanian")
self.list_names.append("Russian")
self.list_names.append("Samoan")
self.list_names.append("Sanskrit")
self.list_names.append("Scots Gaelic")
self.list_names.append("Sepedi")
self.list_names.append("Serbian")
self.list_names.append("Sesotho")
self.list_names.append("Shona")
self.list_names.append("Sindhi")
self.list_names.append("Sinhala")
self.list_names.append("Slovak")
self.list_names.append("Slovenian")
self.list_names.append("Somali")
self.list_names.append("Spanish")
self.list_names.append("Sundanese")
self.list_names.append("Swahili")
self.list_names.append("Swedish")
self.list_names.append("Tajik")
self.list_names.append("Tamil")
self.list_names.append("Tatar")
self.list_names.append("Telugu")
self.list_names.append("Thai")
self.list_names.append("Tigrinya")
self.list_names.append("Tsonga")
self.list_names.append("Turkish")
self.list_names.append("Turkmen")
self.list_names.append("Twi (Akan)")
self.list_names.append("Ukrainian")
self.list_names.append("Urdu")
self.list_names.append("Uyghur")
self.list_names.append("Uzbek")
self.list_names.append("Vietnamese")
self.list_names.append("Welsh")
self.list_names.append("Xhosa")
self.list_names.append("Yiddish")
self.list_names.append("Yoruba")
self.list_names.append("Zulu")
# NOTE THAT Google Translate AND Vosk Speech Recognition API USE ISO-639-1 STANDARD CODE ('al', 'af', 'as', ETC)
# WHEN ffmpeg SUBTITLES STREAMS USE ISO 639-2 STANDARD CODE ('afr', 'alb', 'amh', ETC)
self.list_ffmpeg_codes = []
self.list_ffmpeg_codes.append("afr") # Afrikaans
self.list_ffmpeg_codes.append("alb") # Albanian
self.list_ffmpeg_codes.append("amh") # Amharic
self.list_ffmpeg_codes.append("ara") # Arabic
self.list_ffmpeg_codes.append("hye") # Armenian
self.list_ffmpeg_codes.append("asm") # Assamese
self.list_ffmpeg_codes.append("aym") # Aymara
self.list_ffmpeg_codes.append("aze") # Azerbaijani
self.list_ffmpeg_codes.append("bam") # Bambara
self.list_ffmpeg_codes.append("eus") # Basque
self.list_ffmpeg_codes.append("bel") # Belarusian
self.list_ffmpeg_codes.append("ben") # Bengali
self.list_ffmpeg_codes.append("bho") # Bhojpuri
self.list_ffmpeg_codes.append("bos") # Bosnian
self.list_ffmpeg_codes.append("bul") # Bulgarian
self.list_ffmpeg_codes.append("cat") # Catalan
self.list_ffmpeg_codes.append("ceb") # Cebuano
self.list_ffmpeg_codes.append("nya") # Chichewa
self.list_ffmpeg_codes.append("zho") # Chinese
self.list_ffmpeg_codes.append("zho-CN") # Chinese (Simplified)
self.list_ffmpeg_codes.append("zho-TW") # Chinese (Traditional)
self.list_ffmpeg_codes.append("cos") # Corsican
self.list_ffmpeg_codes.append("hrv") # Croatian
self.list_ffmpeg_codes.append("ces") # Czech
self.list_ffmpeg_codes.append("dan") # Danish
self.list_ffmpeg_codes.append("div") # Dhivehi
self.list_ffmpeg_codes.append("doi") # Dogri
self.list_ffmpeg_codes.append("nld") # Dutch
self.list_ffmpeg_codes.append("eng") # English
self.list_ffmpeg_codes.append("epo") # Esperanto
self.list_ffmpeg_codes.append("est") # Estonian
self.list_ffmpeg_codes.append("ewe") # Ewe
self.list_ffmpeg_codes.append("fil") # Filipino
self.list_ffmpeg_codes.append("fin") # Finnish
self.list_ffmpeg_codes.append("fra") # French
self.list_ffmpeg_codes.append("fry") # Frisian
self.list_ffmpeg_codes.append("glg") # Galician
self.list_ffmpeg_codes.append("kat") # Georgian
self.list_ffmpeg_codes.append("deu") # German
self.list_ffmpeg_codes.append("ell") # Greek
self.list_ffmpeg_codes.append("grn") # Guarani
self.list_ffmpeg_codes.append("guj") # Gujarati
self.list_ffmpeg_codes.append("hat") # Haitian Creole
self.list_ffmpeg_codes.append("hau") # Hausa
self.list_ffmpeg_codes.append("haw") # Hawaiian
self.list_ffmpeg_codes.append("heb") # Hebrew
self.list_ffmpeg_codes.append("hin") # Hindi
self.list_ffmpeg_codes.append("hmn") # Hmong
self.list_ffmpeg_codes.append("hun") # Hungarian
self.list_ffmpeg_codes.append("isl") # Icelandic
self.list_ffmpeg_codes.append("ibo") # Igbo
self.list_ffmpeg_codes.append("ilo") # Ilocano
self.list_ffmpeg_codes.append("ind") # Indonesian
self.list_ffmpeg_codes.append("gle") # Irish
self.list_ffmpeg_codes.append("ita") # Italian
self.list_ffmpeg_codes.append("jpn") # Japanese
self.list_ffmpeg_codes.append("jav") # Javanese
self.list_ffmpeg_codes.append("kan") # Kannada
self.list_ffmpeg_codes.append("kaz") # Kazakh
self.list_ffmpeg_codes.append("khm") # Khmer
self.list_ffmpeg_codes.append("kin") # Kinyarwanda
self.list_ffmpeg_codes.append("kok") # Konkani
self.list_ffmpeg_codes.append("kor") # Korean
self.list_ffmpeg_codes.append("kri") # Krio
self.list_ffmpeg_codes.append("kmr") # Kurdish (Kurmanji)
self.list_ffmpeg_codes.append("ckb") # Kurdish (Sorani)
self.list_ffmpeg_codes.append("kir") # Kyrgyz
self.list_ffmpeg_codes.append("lao") # Lao
self.list_ffmpeg_codes.append("lat") # Latin
self.list_ffmpeg_codes.append("lav") # Latvian
self.list_ffmpeg_codes.append("lin") # Lingala
self.list_ffmpeg_codes.append("lit") # Lithuanian
self.list_ffmpeg_codes.append("lug") # Luganda
self.list_ffmpeg_codes.append("ltz") # Luxembourgish
self.list_ffmpeg_codes.append("mkd") # Macedonian
self.list_ffmpeg_codes.append("mlg") # Malagasy
self.list_ffmpeg_codes.append("msa") # Malay
self.list_ffmpeg_codes.append("mal") # Malayalam
self.list_ffmpeg_codes.append("mlt") # Maltese
self.list_ffmpeg_codes.append("mri") # Maori
self.list_ffmpeg_codes.append("mar") # Marathi
self.list_ffmpeg_codes.append("mni-Mtei") # Meiteilon (Manipuri)
self.list_ffmpeg_codes.append("lus") # Mizo
self.list_ffmpeg_codes.append("mon") # Mongolian
self.list_ffmpeg_codes.append("mya") # Myanmar (Burmese)
self.list_ffmpeg_codes.append("nep") # Nepali
self.list_ffmpeg_codes.append("nor") # Norwegian
self.list_ffmpeg_codes.append("ori") # Odiya (Oriya)
self.list_ffmpeg_codes.append("orm") # Oromo
self.list_ffmpeg_codes.append("pus") # Pashto
self.list_ffmpeg_codes.append("fas") # Persian
self.list_ffmpeg_codes.append("pol") # Polish
self.list_ffmpeg_codes.append("por") # Portuguese
self.list_ffmpeg_codes.append("pan") # Punjabi
self.list_ffmpeg_codes.append("que") # Quechua
self.list_ffmpeg_codes.append("ron") # Romanian
self.list_ffmpeg_codes.append("rus") # Russian
self.list_ffmpeg_codes.append("smo") # Samoan
self.list_ffmpeg_codes.append("san") # Sanskrit
self.list_ffmpeg_codes.append("gla") # Scots Gaelic
self.list_ffmpeg_codes.append("nso") # Sepedi
self.list_ffmpeg_codes.append("srp") # Serbian
self.list_ffmpeg_codes.append("sot") # Sesotho
self.list_ffmpeg_codes.append("sna") # Shona
self.list_ffmpeg_codes.append("snd") # Sindhi
self.list_ffmpeg_codes.append("sin") # Sinhala
self.list_ffmpeg_codes.append("slk") # Slovak
self.list_ffmpeg_codes.append("slv") # Slovenian
self.list_ffmpeg_codes.append("som") # Somali
self.list_ffmpeg_codes.append("spa") # Spanish
self.list_ffmpeg_codes.append("sun") # Sundanese
self.list_ffmpeg_codes.append("swa") # Swahili
self.list_ffmpeg_codes.append("swe") # Swedish
self.list_ffmpeg_codes.append("tgk") # Tajik
self.list_ffmpeg_codes.append("tam") # Tamil
self.list_ffmpeg_codes.append("tat") # Tatar
self.list_ffmpeg_codes.append("tel") # Telugu
self.list_ffmpeg_codes.append("tha") # Thai
self.list_ffmpeg_codes.append("tir") # Tigrinya
self.list_ffmpeg_codes.append("tso") # Tsonga
self.list_ffmpeg_codes.append("tur") # Turkish
self.list_ffmpeg_codes.append("tuk") # Turkmen
self.list_ffmpeg_codes.append("twi") # Twi (Akan)
self.list_ffmpeg_codes.append("ukr") # Ukrainian
self.list_ffmpeg_codes.append("urd") # Urdu
self.list_ffmpeg_codes.append("uig") # Uyghur
self.list_ffmpeg_codes.append("uzb") # Uzbek
self.list_ffmpeg_codes.append("vie") # Vietnamese
self.list_ffmpeg_codes.append("wel") # Welsh
self.list_ffmpeg_codes.append("xho") # Xhosa
self.list_ffmpeg_codes.append("yid") # Yiddish
self.list_ffmpeg_codes.append("yor") # Yoruba
self.list_ffmpeg_codes.append("zul") # Zulu
self.code_of_name = dict(zip(self.list_names, self.list_codes))
self.code_of_ffmpeg_code = dict(zip(self.list_ffmpeg_codes, self.list_codes))
self.name_of_code = dict(zip(self.list_codes, self.list_names))
self.name_of_ffmpeg_code = dict(zip(self.list_ffmpeg_codes, self.list_names))
self.ffmpeg_code_of_name = dict(zip(self.list_names, self.list_ffmpeg_codes))
self.ffmpeg_code_of_code = dict(zip(self.list_codes, self.list_ffmpeg_codes))
self.dict = {
'af': 'Afrikaans',
'sq': 'Albanian',
'am': 'Amharic',
'ar': 'Arabic',
'hy': 'Armenian',
'as': 'Assamese',
'ay': 'Aymara',
'az': 'Azerbaijani',
'bm': 'Bambara',
'eu': 'Basque',
'be': 'Belarusian',
'bn': 'Bengali',
'bho': 'Bhojpuri',
'bs': 'Bosnian',
'bg': 'Bulgarian',
'ca': 'Catalan',
'ceb': 'Cebuano',
'ny': 'Chichewa',
'zh': 'Chinese',
'zh-CN': 'Chinese (Simplified)',
'zh-TW': 'Chinese (Traditional)',
'co': 'Corsican',
'hr': 'Croatian',
'cs': 'Czech',
'da': 'Danish',
'dv': 'Dhivehi',
'doi': 'Dogri',
'nl': 'Dutch',
'en': 'English',
'eo': 'Esperanto',
'et': 'Estonian',
'ee': 'Ewe',
'fil': 'Filipino',
'fi': 'Finnish',
'fr': 'French',
'fy': 'Frisian',
'gl': 'Galician',
'ka': 'Georgian',
'de': 'German',
'el': 'Greek',
'gn': 'Guarani',
'gu': 'Gujarati',
'ht': 'Haitian Creole',
'ha': 'Hausa',
'haw': 'Hawaiian',
'he': 'Hebrew',
'hi': 'Hindi',
'hmn': 'Hmong',
'hu': 'Hungarian',
'is': 'Icelandic',
'ig': 'Igbo',
'ilo': 'Ilocano',
'id': 'Indonesian',
'ga': 'Irish',
'it': 'Italian',
'ja': 'Japanese',
'jv': 'Javanese',
'kn': 'Kannada',
'kk': 'Kazakh',
'km': 'Khmer',
'rw': 'Kinyarwanda',
'gom': 'Konkani',
'ko': 'Korean',
'kri': 'Krio',
'kmr': 'Kurdish (Kurmanji)',
'ckb': 'Kurdish (Sorani)',
'ky': 'Kyrgyz',
'lo': 'Lao',
'la': 'Latin',
'lv': 'Latvian',
'ln': 'Lingala',
'lt': 'Lithuanian',
'lg': 'Luganda',
'lb': 'Luxembourgish',
'mk': 'Macedonian',
'mg': 'Malagasy',
'ms': 'Malay',
'ml': 'Malayalam',
'mt': 'Maltese',
'mi': 'Maori',
'mr': 'Marathi',
'mni-Mtei': 'Meiteilon (Manipuri)',
'lus': 'Mizo',
'mn': 'Mongolian',
'my': 'Myanmar (Burmese)',
'ne': 'Nepali',
'no': 'Norwegian',
'or': 'Odiya (Oriya)',
'om': 'Oromo',
'ps': 'Pashto',
'fa': 'Persian',
'pl': 'Polish',
'pt': 'Portuguese',
'pa': 'Punjabi',
'qu': 'Quechua',
'ro': 'Romanian',
'ru': 'Russian',
'sm': 'Samoan',
'sa': 'Sanskrit',
'gd': 'Scots Gaelic',
'nso': 'Sepedi',
'sr': 'Serbian',
'st': 'Sesotho',
'sn': 'Shona',
'sd': 'Sindhi',
'si': 'Sinhala',
'sk': 'Slovak',
'sl': 'Slovenian',
'so': 'Somali',
'es': 'Spanish',
'su': 'Sundanese',
'sw': 'Swahili',
'sv': 'Swedish',
'tg': 'Tajik',
'ta': 'Tamil',
'tt': 'Tatar',
'te': 'Telugu',
'th': 'Thai',
'ti': 'Tigrinya',
'ts': 'Tsonga',
'tr': 'Turkish',
'tk': 'Turkmen',
'tw': 'Twi (Akan)',
'uk': 'Ukrainian',
'ur': 'Urdu',
'ug': 'Uyghur',
'uz': 'Uzbek',
'vi': 'Vietnamese',
'cy': 'Welsh',
'xh': 'Xhosa',
'yi': 'Yiddish',
'yo': 'Yoruba',
'zu': 'Zulu',
}
self.ffmpeg_dict = {
'af': 'afr', # Afrikaans
'sq': 'alb', # Albanian
'am': 'amh', # Amharic
'ar': 'ara', # Arabic
'hy': 'arm', # Armenian
'as': 'asm', # Assamese
'ay': 'aym', # Aymara
'az': 'aze', # Azerbaijani
'bm': 'bam', # Bambara
'eu': 'baq', # Basque
'be': 'bel', # Belarusian
'bn': 'ben', # Bengali
'bho': 'bho', # Bhojpuri
'bs': 'bos', # Bosnian
'bg': 'bul', # Bulgarian
'ca': 'cat', # Catalan
'ceb': 'ceb', # Cebuano
'ny': 'nya', # Chichewa
'zh': 'chi', # Chinese
'zh-CN': 'chi', # Chinese (Simplified)
'zh-TW': 'chi', # Chinese (Traditional)
'co': 'cos', # Corsican
'hr': 'hrv', # Croatian
'cs': 'cze', # Czech
'da': 'dan', # Danish
'dv': 'div', # Dhivehi
'doi': 'doi', # Dogri
'nl': 'dut', # Dutch
'en': 'eng', # English
'eo': 'epo', # Esperanto
'et': 'est', # Estonian
'ee': 'ewe', # Ewe
'fil': 'fil', # Filipino
'fi': 'fin', # Finnish
'fr': 'fre', # French
'fy': 'fry', # Frisian
'gl': 'glg', # Galician
'ka': 'geo', # Georgian
'de': 'ger', # German
'el': 'gre', # Greek
'gn': 'grn', # Guarani
'gu': 'guj', # Gujarati
'ht': 'hat', # Haitian Creole
'ha': 'hau', # Hausa
'haw': 'haw', # Hawaiian
'he': 'heb', # Hebrew
'hi': 'hin', # Hindi
'hmn': 'hmn', # Hmong
'hu': 'hun', # Hungarian
'is': 'ice', # Icelandic
'ig': 'ibo', # Igbo
'ilo': 'ilo', # Ilocano
'id': 'ind', # Indonesian
'ga': 'gle', # Irish
'it': 'ita', # Italian
'ja': 'jpn', # Japanese
'jv': 'jav', # Javanese
'kn': 'kan', # Kannada
'kk': 'kaz', # Kazakh
'km': 'khm', # Khmer
'rw': 'kin', # Kinyarwanda
'gom': 'kok', # Konkani
'ko': 'kor', # Korean
'kri': 'kri', # Krio
'kmr': 'kur', # Kurdish (Kurmanji)
'ckb': 'kur', # Kurdish (Sorani)
'ky': 'kir', # Kyrgyz
'lo': 'lao', # Lao
'la': 'lat', # Latin
'lv': 'lav', # Latvian
'ln': 'lin', # Lingala
'lt': 'lit', # Lithuanian
'lg': 'lug', # Luganda
'lb': 'ltz', # Luxembourgish
'mk': 'mac', # Macedonian
'mg': 'mlg', # Malagasy
'ms': 'may', # Malay
'ml': 'mal', # Malayalam
'mt': 'mlt', # Maltese
'mi': 'mao', # Maori
'mr': 'mar', # Marathi
'mni-Mtei': 'mni', # Meiteilon (Manipuri)
'lus': 'lus', # Mizo
'mn': 'mon', # Mongolian
'my': 'bur', # Myanmar (Burmese)
'ne': 'nep', # Nepali
'no': 'nor', # Norwegian
'or': 'ori', # Odiya (Oriya)
'om': 'orm', # Oromo
'ps': 'pus', # Pashto
'fa': 'per', # Persian
'pl': 'pol', # Polish
'pt': 'por', # Portuguese
'pa': 'pan', # Punjabi
'qu': 'que', # Quechua
'ro': 'rum', # Romanian
'ru': 'rus', # Russian
'sm': 'smo', # Samoan
'sa': 'san', # Sanskrit
'gd': 'gla', # Scots Gaelic
'nso': 'nso', # Sepedi
'sr': 'srp', # Serbian
'st': 'sot', # Sesotho
'sn': 'sna', # Shona
'sd': 'snd', # Sindhi
'si': 'sin', # Sinhala
'sk': 'slo', # Slovak
'sl': 'slv', # Slovenian
'so': 'som', # Somali
'es': 'spa', # Spanish
'su': 'sun', # Sundanese
'sw': 'swa', # Swahili
'sv': 'swe', # Swedish
'tg': 'tgk', # Tajik
'ta': 'tam', # Tamil
'tt': 'tat', # Tatar
'te': 'tel', # Telugu
'th': 'tha', # Thai
'ti': 'tir', # Tigrinya
'ts': 'tso', # Tsonga
'tr': 'tur', # Turkish
'tk': 'tuk', # Turkmen
'tw': 'twi', # Twi (Akan)
'uk': 'ukr', # Ukrainian
'ur': 'urd', # Urdu
'ug': 'uig', # Uyghur
'uz': 'uzb', # Uzbek
'vi': 'vie', # Vietnamese
'cy': 'wel', # Welsh
'xh': 'xho', # Xhosa
'yi': 'yid', # Yiddish
'yo': 'yor', # Yoruba
'zu': 'zul', # Zulu
}
def get_code_of_name(self, name):
return self.code_of_name[name]
def get_code_of_ffmpeg_code(self, ffmpeg_code):
return self.code_of_ffmpeg_code[ffmpeg_code]
def get_name_of_code(self, code):
return self.name_of_code[code]
def get_name_of_ffmpeg_code(self, ffmpeg_code):
return self.name_of_ffmpeg_code[ffmpeg_code]
def get_ffmpeg_code_of_name(self, name):
return self.ffmpeg_code_of_name[name]
def get_ffmpeg_code_of_code(self, code):
return self.ffmpeg_code_of_code[code]
google_unsupported_languages = ["auto", "ba", "br", "fo", "nn", "oc", "tl", "bo"]
class WavConverter:
@staticmethod
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@staticmethod
def ffprobe_check():
if WavConverter.which("ffprobe"):
return "ffprobe"
if WavConverter.which("ffprobe.exe"):
return "ffprobe.exe"
return None
@staticmethod
def ffmpeg_check():
if WavConverter.which("ffmpeg"):
return "ffmpeg"
if WavConverter.which("ffmpeg.exe"):
return "ffmpeg.exe"
return None
def __init__(self, channels=1, rate=48000, progress_callback=None, error_messages_callback=None):
self.channels = channels
self.rate = rate
self.progress_callback = progress_callback
self.error_messages_callback = error_messages_callback
def __call__(self, media_filepath):
temp = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if not os.path.isfile(media_filepath):
if self.error_messages_callback:
self.error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not self.ffprobe_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffprobe executable")
else:
print("Cannot find ffprobe executable")
raise Exception("Dependency not found: ffprobe")
if not self.ffmpeg_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffmpeg executable")
else:
print("Cannot find ffmpeg executable")
raise Exception("Dependency not found: ffmpeg")
ffmpeg_command = [
'ffmpeg',
'-hide_banner',
'-loglevel', 'error',
'-v', 'error',
'-y',
'-i', media_filepath,
'-ac', str(self.channels),
'-ar', str(self.rate),
'-progress', '-', '-nostats',
temp.name
]
try:
media_file_display_name = os.path.basename(media_filepath).split('/')[-1]
info = f"Converting '{media_file_display_name}' to a temporary WAV file"
start_time = time.time()
ffprobe_command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-show_entries',
'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
media_filepath
]
ffprobe_process = None
if sys.platform == "win32":
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True)
total_duration = float(ffprobe_process.strip())
process = None
if sys.platform == "win32":
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
if process.stdout is None:
continue
stderr_line = (process.stdout.readline().decode("utf-8", errors="replace").strip())
if stderr_line == '' and process.poll() is not None:
break
if "out_time=" in stderr_line:
time_str = stderr_line.split('time=')[1].split()[0]
current_duration = sum(float(x) * 1000 * 60 ** i for i, x in enumerate(reversed(time_str.split(":"))))
if current_duration>0 and current_duration<=total_duration*1000:
percentage = int(current_duration*100/(int(float(total_duration))*1000))
if self.progress_callback and percentage <= 100:
self.progress_callback(info, media_file_display_name, percentage, start_time)
temp.close()
return temp.name, self.rate
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
class SentenceTranslator(object):
def __init__(self, src, dst, patience=-1, timeout=30, error_messages_callback=None):
self.src = src
self.dst = dst
self.patience = patience
self.timeout = timeout
self.error_messages_callback = error_messages_callback
def __call__(self, sentence):
try:
translated_sentence = []
# handle the special case: empty string.
if not sentence:
return None
translated_sentence = self.GoogleTranslate(sentence, src=self.src, dst=self.dst, timeout=self.timeout)
fail_to_translate = translated_sentence[-1] == '\n'
while fail_to_translate and patience:
translated_sentence = self.GoogleTranslate(translated_sentence, src=self.src, dst=self.dst, timeout=self.timeout).text
if translated_sentence[-1] == '\n':
if patience == -1:
continue
patience -= 1
else:
fail_to_translate = False
return translated_sentence
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
def GoogleTranslate(self, text, src, dst, timeout=30):
url = 'https://translate.googleapis.com/translate_a/'
params = 'single?client=gtx&sl='+src+'&tl='+dst+'&dt=t&q='+text;
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)', 'Referer': 'https://translate.google.com',}
try:
response = requests.get(url+params, headers=headers, timeout=self.timeout)
if response.status_code == 200:
response_json = response.json()[0]
length = len(response_json)
translation = ""
for i in range(length):
translation = translation + response_json[i][0]
return translation
return
except requests.exceptions.ConnectionError:
with httpx.Client() as client:
response = client.get(url+params, headers=headers, timeout=self.timeout)
if response.status_code == 200:
response_json = response.json()[0]
length = len(response_json)
translation = ""
for i in range(length):
translation = translation + response_json[i][0]
return translation
return
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
class SubtitleFormatter:
supported_formats = ['srt', 'vtt', 'json', 'raw']
def __init__(self, format_type, error_messages_callback=None):
self.format_type = format_type.lower()
self.error_messages_callback = error_messages_callback
def __call__(self, subtitles, padding_before=0, padding_after=0):
try:
if self.format_type == 'srt':
return self.srt_formatter(subtitles, padding_before, padding_after)
elif self.format_type == 'vtt':
return self.vtt_formatter(subtitles, padding_before, padding_after)
elif self.format_type == 'json':
return self.json_formatter(subtitles)
elif self.format_type == 'raw':
return self.raw_formatter(subtitles)
else:
if error_messages_callback:
error_messages_callback(f"Unsupported format type: '{self.format_type}'")
else:
raise ValueError(f"Unsupported format type: '{self.format_type}'")
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
def srt_formatter(self, subtitles, padding_before=0, padding_after=0):
"""
Serialize a list of subtitles according to the SRT format, with optional time padding.
"""
sub_rip_file = pysrt.SubRipFile()
for i, ((start, end), text) in enumerate(subtitles, start=1):
item = pysrt.SubRipItem()
item.index = i
item.text = six.text_type(text)
item.start.seconds = max(0, start - padding_before)
item.end.seconds = end + padding_after
sub_rip_file.append(item)
return '\n'.join(six.text_type(item) for item in sub_rip_file)
def vtt_formatter(self, subtitles, padding_before=0, padding_after=0):
"""
Serialize a list of subtitles according to the VTT format, with optional time padding.
"""
text = self.srt_formatter(subtitles, padding_before, padding_after)
text = 'WEBVTT\n\n' + text.replace(',', '.')
return text
def json_formatter(self, subtitles):
"""
Serialize a list of subtitles as a JSON blob.
"""
subtitle_dicts = [
{
'start': start,
'end': end,
'content': text,
}
for ((start, end), text)
in subtitles
]
return json.dumps(subtitle_dicts)
def raw_formatter(self, subtitles):
"""
Serialize a list of subtitles as a newline-delimited string.
"""
return ' '.join(text for (_rng, text) in subtitles)
class SubtitleWriter:
def __init__(self, regions, transcripts, format, error_messages_callback=None):
self.regions = regions
self.transcripts = transcripts
self.format = format
self.timed_subtitles = [(r, t) for r, t in zip(self.regions, self.transcripts) if t]
self.error_messages_callback = error_messages_callback
def get_timed_subtitles(self):
return self.timed_subtitles
def write(self, declared_subtitle_filepath):
try:
formatter = SubtitleFormatter(self.format)
formatted_subtitles = formatter(self.timed_subtitles)
saved_subtitle_filepath = declared_subtitle_filepath
if saved_subtitle_filepath:
subtitle_file_base, subtitle_file_ext = os.path.splitext(saved_subtitle_filepath)
if not subtitle_file_ext:
saved_subtitle_filepath = f"{subtitle_file_base}.{self.format}"
else:
saved_subtitle_filepath = declared_subtitle_filepath
with open(saved_subtitle_filepath, 'wb') as f:
f.write(formatted_subtitles.encode("utf-8"))
#with open(saved_subtitle_filepath, 'a') as f:
# f.write("\n")
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
class SubtitleStreamParser:
@staticmethod
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@staticmethod
def ffprobe_check():
if SubtitleStreamParser.which("ffprobe"):
return "ffprobe"
if SubtitleStreamParser.which("ffprobe.exe"):
return "ffprobe.exe"
return None
def __init__(self, error_messages_callback=None):
self.error_messages_callback = error_messages_callback
self._indexes = []
self._languages = []
self._timed_subtitles = []
self._number_of_streams = 0
def get_subtitle_streams(self, media_filepath):
ffprobe_cmd = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-print_format', 'json',
'-show_entries', 'stream=index:stream_tags=language',
'-select_streams', 's',
media_filepath
]
try:
result = None
if sys.platform == "win32":
result = subprocess.run(ffprobe_cmd, stdin=open(os.devnull), capture_output=True, text=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
result = subprocess.run(ffprobe_cmd, stdin=open(os.devnull), capture_output=True, text=True)
output = result.stdout
streams = json.loads(output)['streams']
subtitle_streams = []
empty_stream_exists = False
for index, stream in enumerate(streams, start=1):
language = stream['tags'].get('language')
subtitle_streams.append({'index': index, 'language': language})
# Check if 'No subtitles' stream exists
if language == 'No subtitles':
empty_stream_exists = True
# Append 'No subtitles' stream if it exists
if not empty_stream_exists:
subtitle_streams.append({'index': len(streams) + 1, 'language': 'No subtitles'})
return subtitle_streams
except FileNotFoundError:
if self.error_messages_callback:
msg = 'ffprobe not found. Make sure it is installed and added to the system PATH.'
self.error_messages_callback(msg)
else:
print(msg)
return None
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return None
def get_timed_subtitles(self, media_filepath, subtitle_stream_index):
ffmpeg_cmd = [
'ffmpeg',
'-hide_banner',
'-loglevel', 'error',
'-v', 'error',
'-i', media_filepath,
'-map', f'0:s:{subtitle_stream_index-1}',
'-f', 'srt',
'-'
]
try:
result = None
if sys.platform == "win32":
result = subprocess.run(ffmpeg_cmd, stdin=open(os.devnull), capture_output=True, text=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
result = subprocess.run(ffmpeg_cmd, stdin=open(os.devnull), capture_output=True, text=True)
output = result.stdout
timed_subtitles = []
subtitle_data = []
lines = output.strip().split('\n')
#print(lines)
subtitles = []
subtitles = None
subtitle_blocks = []
block = []
for line in lines:
if line.strip() == '':
subtitle_blocks.append(block)
block = []
else:
block.append(line.strip())
subtitle_blocks.append(block)
# Parse each subtitles block and store as tuple in timed_subtitles list
for block in subtitle_blocks:
if block:
# Extract start and end times from subtitles block
start_time_str, end_time_str = block[1].split(' --> ')
time_format = '%H:%M:%S,%f'
start_time_time_delta = datetime.strptime(start_time_str, time_format) - datetime.strptime('00:00:00,000', time_format)
start_time_total_seconds = start_time_time_delta.total_seconds()
end_time_time_delta = datetime.strptime(end_time_str, time_format) - datetime.strptime('00:00:00,000', time_format)
end_time_total_seconds = end_time_time_delta.total_seconds()
# Extract subtitles text from subtitles block
subtitles = ' '.join(block[2:])
timed_subtitles.append(((start_time_total_seconds, end_time_total_seconds), subtitles))
return timed_subtitles
except FileNotFoundError:
if self.error_messages_callback:
msg = 'ffmpeg not found. Make sure it is installed and added to the system PATH.'
self.error_messages_callback(msg)
else:
print(msg)
return None
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return None
def number_of_streams(self):
return self._number_of_streams
def indexes(self):
return self._indexes
def languages(self):
return self._languages
def timed_subtitles(self):
return self._timed_subtitles
def index_of_language(self, language):
for i in range(self.number_of_streams()):
if self.languages()[i] == language:
return i+1
return
def language_of_index(self, index):
return self.languages()[index-1]
def timed_subtitles_of_index(self, index):
return self.timed_subtitles()[index-1]
def timed_subtitles_of_language(self, language):
for i in range(self.number_of_streams()):
if self.languages()[i] == language:
return self.timed_subtitles()[i]
def __call__(self, media_filepath):
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if not self.ffprobe_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffprobe executable")
else:
print("Cannot find ffprobe executable")
raise Exception("Dependency not found: ffprobe")
subtitle_streams = self.get_subtitle_streams(media_filepath)
subtitle_streams_data = []
if subtitle_streams:
for subtitle_stream in subtitle_streams:
subtitle_stream_index = subtitle_stream['index']
subtitle_stream_language = subtitle_stream['language']
#print(f"Stream Index: {subtitle_stream_index}, Language: {subtitle_stream_language}")
subtitle_streams_data.append((subtitle_stream_index, subtitle_stream_language))
subtitle_data = []
subtitle_contents = []
for subtitle_stream_index in range(len(subtitle_streams)):
index, language = subtitle_streams_data[subtitle_stream_index]
self._indexes.append(index)
self._languages.append(language)
self._timed_subtitles.append(self.get_timed_subtitles(media_filepath, subtitle_stream_index+1))
subtitle_data.append((index, language, self.get_timed_subtitles(media_filepath, subtitle_stream_index+1)))
self._number_of_streams = len(subtitle_data)
return subtitle_data
class MediaSubtitleRenderer:
@staticmethod
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@staticmethod
def ffmpeg_check():
if MediaSubtitleRenderer.which("ffmpeg"):
return "ffmpeg"
if MediaSubtitleRenderer.which("ffmpeg.exe"):
return "ffmpeg.exe"
return None
@staticmethod
def ffprobe_check():
if MediaSubtitleRenderer.which("ffprobe"):
return "ffprobe"
if MediaSubtitleRenderer.which("ffprobe.exe"):
return "ffprobe.exe"
return None
def __init__(self, subtitle_path=None, language=None, output_path=None, progress_callback=None, error_messages_callback=None):
self.subtitle_path = subtitle_path
self.language = language
self.output_path = output_path
self.progress_callback = progress_callback
self.error_messages_callback = error_messages_callback
def __call__(self, media_filepath):
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if "\\" in self.subtitle_path:
self.subtitle_path = self.subtitle_path.replace("\\", "/")
if "\\" in self.output_path:
self.output_path = self.output_path.replace("\\", "/")
if not os.path.isfile(media_filepath):
if self.error_messages_callback:
self.error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not self.ffprobe_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffprobe executable")
else:
print("Cannot find ffprobe executable")
raise Exception("Dependency not found: ffprobe")
if not self.ffmpeg_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffmpeg executable")
else:
print("Cannot find ffmpeg executable")
raise Exception("Dependency not found: ffmpeg")
try:
scale_switch = "'trunc(iw/2)*2'\:'trunc(ih/2)*2'"
ffmpeg_command = [
'ffmpeg',
'-hide_banner',
'-loglevel', 'error',
'-v', 'error',
'-y',
'-i', media_filepath,
'-vf', f'subtitles={shlex.quote(self.subtitle_path)},scale={scale_switch}',
'-c:v', 'libx264',
'-crf', '23',
'-preset', 'medium',
'-c:a', 'copy',
'-progress', '-', '-nostats',
self.output_path
]
media_file_display_name = os.path.basename(media_filepath).split('/')[-1]
info = f"Rendering subtitles file into '{media_file_display_name}'"
start_time = time.time()
ffprobe_command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-show_entries',
'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
media_filepath
]
ffprobe_process = None
if sys.platform == "win32":
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True)
total_duration = float(ffprobe_process.strip())
process = None
if sys.platform == "win32":
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
if process.stdout is None:
continue
stderr_line = (process.stdout.readline().decode("utf-8", errors="replace").strip())
if stderr_line == '' and process.poll() is not None:
break
if "out_time=" in stderr_line:
time_str = stderr_line.split('time=')[1].split()[0]
current_duration = sum(float(x) * 1000 * 60 ** i for i, x in enumerate(reversed(time_str.split(":"))))
if current_duration>0 and current_duration<=total_duration*1000:
percentage = int(current_duration*100/(int(float(total_duration))*1000))
if self.progress_callback and percentage <= 100:
self.progress_callback(info, media_file_display_name, percentage, start_time)
if os.path.isfile(self.output_path):
return self.output_path
else:
return None
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
class MediaSubtitleEmbedder:
@staticmethod
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@staticmethod
def ffprobe_check():
if MediaSubtitleEmbedder.which("ffprobe"):
return "ffprobe"
if MediaSubtitleEmbedder.which("ffprobe.exe"):
return "ffprobe.exe"
return None
@staticmethod
def ffmpeg_check():
if MediaSubtitleEmbedder.which("ffmpeg"):
return "ffmpeg"
if MediaSubtitleEmbedder.which("ffmpeg.exe"):
return "ffmpeg.exe"
return None
def __init__(self, subtitle_path=None, language=None, output_path=None, progress_callback=None, error_messages_callback=None):
self.subtitle_path = subtitle_path
self.language = language
self.output_path = output_path
self.progress_callback = progress_callback
self.error_messages_callback = error_messages_callback
def get_existing_subtitle_language(self, media_filepath):
# Run ffprobe to get stream information
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-of', 'json',
'-show_entries',
'format:stream',
media_filepath
]
output = None
if sys.platform == "win32":
output = subprocess.run(command, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
output = subprocess.run(command, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
metadata = json.loads(output.stdout)
streams = metadata['streams']
# Find the subtitle stream with language metadata
subtitle_languages = []
for stream in streams:
if stream['codec_type'] == 'subtitle' and 'tags' in stream and 'language' in stream['tags']:
language = stream['tags']['language']
subtitle_languages.append(language)
return subtitle_languages
def __call__(self, media_filepath):
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if "\\" in self.subtitle_path:
self.subtitle_path = self.subtitle_path.replace("\\", "/")
if "\\" in self.output_path:
self.output_path = self.output_path.replace("\\", "/")
if not os.path.isfile(media_filepath):
if self.error_messages_callback:
self.error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not self.ffprobe_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffprobe executable")
else:
print("Cannot find ffprobe executable")
raise Exception("Dependency not found: ffprobe")
if not self.ffmpeg_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffmpeg executable")
else:
print("Cannot find ffmpeg executable")
raise Exception("Dependency not found: ffmpeg")
try:
existing_languages = self.get_existing_subtitle_language(media_filepath)
if self.language in existing_languages:
# THIS 'print' THINGS WILL MAKE progresbar screwed up!
#msg = (f"'{self.language}' subtitle stream already existed in '{media_filepath}'")
#if self.error_messages_callback:
# self.error_messages_callback(msg)
#else:
# print(msg)
return
else:
# Determine the next available subtitle index
next_index = len(existing_languages)
ffmpeg_command = [
'ffmpeg',
'-hide_banner',
'-loglevel', 'error',
'-v', 'error',
'-y',
'-i', media_filepath,
'-sub_charenc', 'UTF-8',
'-i', self.subtitle_path,
'-c:v', 'copy',
'-c:a', 'copy',
'-scodec', 'mov_text',
'-metadata:s:s:' + str(next_index), f'language={shlex.quote(self.language)}',
'-map', '0',
'-map', '1',
'-progress', '-', '-nostats',
self.output_path
]
subtitle_file_display_name = os.path.basename(self.subtitle_path).split('/')[-1]
media_file_display_name = os.path.basename(media_filepath).split('/')[-1]
info = f"Embedding '{self.language}' subtitles file '{subtitle_file_display_name}' into '{media_file_display_name}'"
start_time = time.time()
ffprobe_command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-show_entries',
'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
media_filepath
]
ffprobe_process = None
if sys.platform == "win32":
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True)
total_duration = float(ffprobe_process.strip())
process = None
if sys.platform == "win32":
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
if process.stdout is None:
continue
stderr_line = (process.stdout.readline().decode("utf-8", errors="replace").strip())
if stderr_line == '' and process.poll() is not None:
break
if "out_time=" in stderr_line:
time_str = stderr_line.split('time=')[1].split()[0]
current_duration = sum(float(x) * 1000 * 60 ** i for i, x in enumerate(reversed(time_str.split(":"))))
if current_duration>0 and current_duration<=total_duration*1000:
percentage = int(current_duration*100/(int(float(total_duration))*1000))
if self.progress_callback and percentage <= 100:
self.progress_callback(info, media_file_display_name, percentage, start_time)
if os.path.isfile(self.output_path):
return self.output_path
else:
return None
if os.path.isfile(self.output_path):
return self.output_path
else:
return None
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
class MediaSubtitleRemover:
@staticmethod
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@staticmethod
def ffprobe_check():
if MediaSubtitleRemover.which("ffprobe"):
return "ffprobe"
if MediaSubtitleRemover.which("ffprobe.exe"):
return "ffprobe.exe"
return None
@staticmethod
def ffmpeg_check():
if MediaSubtitleRemover.which("ffmpeg"):
return "ffmpeg"
if MediaSubtitleRemover.which("ffmpeg.exe"):
return "ffmpeg.exe"
return None
def __init__(self, output_path=None, progress_callback=None, error_messages_callback=None):
self.output_path = output_path
self.progress_callback = progress_callback
self.error_messages_callback = error_messages_callback
def __call__(self, media_filepath):
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if "\\" in self.output_path:
self.output_path = self.output_path.replace("\\", "/")
if not os.path.isfile(media_filepath):
if self.error_messages_callback:
self.error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not self.ffprobe_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffprobe executable")
else:
print("Cannot find ffprobe executable")
raise Exception("Dependency not found: ffprobe")
if not self.ffmpeg_check():
if self.error_messages_callback:
self.error_messages_callback("Cannot find ffmpeg executable")
else:
print("Cannot find ffmpeg executable")
raise Exception("Dependency not found: ffmpeg")
try:
ffmpeg_command = [
'ffmpeg',
'-hide_banner',
'-loglevel', 'error',
'-v', 'error',
'-y',
'-i', media_filepath,
'-c', 'copy',
'-sn',
'-progress', '-', '-nostats',
self.output_path
]
media_file_display_name = os.path.basename(media_filepath).split('/')[-1]
info = f"Removing subtitles streams from '{media_file_display_name}'"
start_time = time.time()
ffprobe_command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-show_entries',
'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
media_filepath
]
ffprobe_process = None
if sys.platform == "win32":
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True)
total_duration = float(ffprobe_process.strip())
process = None
if sys.platform == "win32":
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
if process.stdout is None:
continue
stderr_line = (process.stdout.readline().decode("utf-8", errors="replace").strip())
if stderr_line == '' and process.poll() is not None:
break
if "out_time=" in stderr_line:
time_str = stderr_line.split('time=')[1].split()[0]
current_duration = sum(float(x) * 1000 * 60 ** i for i, x in enumerate(reversed(time_str.split(":"))))
if current_duration>0 and current_duration<=total_duration*1000:
percentage = int(current_duration*100/(int(float(total_duration))*1000))
if self.progress_callback and percentage <= 100:
self.progress_callback(info, media_file_display_name, percentage, start_time)
if os.path.isfile(self.output_path):
return self.output_path
else:
return None
if os.path.isfile(self.output_path):
return self.output_path
else:
return None
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
class SRTFileReader:
def __init__(self, srt_file_path, error_messages_callback=None):
self.timed_subtitles = self(srt_file_path)
self.error_messages_callback = error_messages_callback
@staticmethod
def __call__(srt_file_path):
try:
"""
Read SRT formatted subtitles file and return subtitles as list of tuples
"""
timed_subtitles = []
with open(srt_file_path, 'r') as srt_file:
lines = srt_file.readlines()
# Split the subtitles file into subtitles blocks
subtitle_blocks = []
block = []
for line in lines:
if line.strip() == '':
subtitle_blocks.append(block)
block = []
else:
block.append(line.strip())
subtitle_blocks.append(block)
# Parse each subtitles block and store as tuple in timed_subtitles list
for block in subtitle_blocks:
if block:
# Extract start and end times from subtitles block
start_time_str, end_time_str = block[1].split(' --> ')
time_format = '%H:%M:%S,%f'
start_time_time_delta = datetime.strptime(start_time_str, time_format) - datetime.strptime('00:00:00,000', time_format)
start_time_total_seconds = start_time_time_delta.total_seconds()
end_time_time_delta = datetime.strptime(end_time_str, time_format) - datetime.strptime('00:00:00,000', time_format)
end_time_total_seconds = end_time_time_delta.total_seconds()
# Extract subtitles text from subtitles block
subtitles = ' '.join(block[2:])
timed_subtitles.append(((start_time_total_seconds, end_time_total_seconds), subtitles))
return timed_subtitles
except KeyboardInterrupt:
if self.error_messages_callback:
self.error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return
def has_subtitles(media_filepath, error_messages_callback=None):
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def ffmpeg_check():
if which("ffmpeg"):
return "ffmpeg"
if which("ffmpeg.exe"):
return "ffmpeg.exe"
return None
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if not os.path.isfile(media_filepath):
if error_messages_callback:
error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not ffmpeg_check():
if error_messages_callback:
error_messages_callback("Cannot find ffmpeg executable")
else:
print("Cannot find ffmpeg executable")
raise Exception("Dependency not found: ffmpeg")
try:
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
ffmpeg_cmd = [
'ffmpeg',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-y',
'-i', media_filepath,
'-map', '0:s:0',
'-f', 'srt',
'-'
]
result = None
if sys.platform == "win32":
result = subprocess.run(ffmpeg_cmd, stdin=open(os.devnull), capture_output=True, text=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
result = subprocess.run(ffmpeg_cmd, stdin=open(os.devnull), capture_output=True, text=True)
if result.stdout:
return True # Subtitles detected
else:
return False # No subtitles detected
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return False
def change_code_page(code_page):
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleOutputCP(code_page)
kernel32.SetConsoleCP(code_page)
def stop_ffmpeg_windows(error_messages_callback=None):
try:
tasklist_output = subprocess.check_output(['tasklist'], creationflags=subprocess.CREATE_NO_WINDOW).decode('utf-8')
ffmpeg_pid = None
for line in tasklist_output.split('\n'):
if "ffmpeg" in line:
ffmpeg_pid = line.split()[1]
break
if ffmpeg_pid:
devnull = open(os.devnull, 'w')
subprocess.Popen(['taskkill', '/F', '/T', '/PID', ffmpeg_pid], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
except KeyboardInterrupt:
if error_messages_callback:
error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if error_messages_callback:
error_messages_callback(e)
else:
print(e)
return
def stop_ffmpeg_linux(error_messages_callback=None):
process_name = 'ffmpeg'
try:
output = subprocess.check_output(['ps', '-ef'])
pid = [line.split()[1] for line in output.decode('utf-8').split('\n') if process_name in line][0]
subprocess.call(['kill', '-9', str(pid)])
except IndexError:
pass
except KeyboardInterrupt:
if error_messages_callback:
error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if error_messages_callback:
error_messages_callback(e)
else:
print(e)
return
def remove_temp_files(extension, error_messages_callback=None):
try:
temp_dir = tempfile.gettempdir()
for root, dirs, files in os.walk(temp_dir):
for file in files:
if file.endswith("." + extension):
os.remove(os.path.join(root, file))
except KeyboardInterrupt:
if error_messages_callback:
error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if error_messages_callback:
error_messages_callback(e)
else:
print(e)
return
def is_same_language(src, dst, error_messages_callback=None):
try:
return src.split("-")[0] == dst.split("-")[0]
except Exception as e:
if error_messages_callback:
error_messages_callback(e)
else:
print(e)
return
def check_file_type(media_filepath, error_messages_callback=None):
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def ffprobe_check():
if which("ffprobe"):
return "ffprobe"
if which("ffprobe.exe"):
return "ffprobe.exe"
return None
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if not os.path.isfile(media_filepath):
if error_messages_callback:
error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not ffprobe_check():
if error_messages_callback:
error_messages_callback("Cannot find ffprobe executable")
else:
print("Cannot find ffprobe executable")
raise Exception("Dependency not found: ffprobe")
try:
ffprobe_cmd = [
'ffprobe',
'-hide_banner',
'-loglevel', 'error',
'-v', 'error',
'-show_format',
'-show_streams',
'-print_format',
'json',
media_filepath
]
output = None
if sys.platform == "win32":
output = subprocess.check_output(ffprobe_cmd, stdin=open(os.devnull), stderr=subprocess.PIPE, creationflags=subprocess.CREATE_NO_WINDOW).decode('utf-8')
else:
output = subprocess.check_output(ffprobe_cmd, stdin=open(os.devnull), stderr=subprocess.PIPE).decode('utf-8')
data = json.loads(output)
if 'streams' in data:
for stream in data['streams']:
if 'codec_type' in stream and stream['codec_type'] == 'audio':
return 'audio'
elif 'codec_type' in stream and stream['codec_type'] == 'video':
return 'video'
except (subprocess.CalledProcessError, json.JSONDecodeError):
pass
except Exception as e:
if error_messages_callback:
error_messages_callback(e)
else:
print(e)
return None
def get_existing_subtitle_language(media_path):
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def ffprobe_check():
if which("ffprobe"):
return "ffprobe"
if which("ffprobe.exe"):
return "ffprobe.exe"
return None
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if not os.path.isfile(media_filepath):
if error_messages_callback:
error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not ffprobe_check():
if error_messages_callback:
error_messages_callback("Cannot find ffprobe executable")
else:
print("Cannot find ffprobe executable")
raise Exception("Dependency not found: ffprobe")
try:
# Run ffprobe to get stream information
command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-of', 'json',
'-show_entries',
'format:stream',
media_path
]
output = None
if sys.platform == "win32":
output = subprocess.run(command, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
output = subprocess.run(command, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
metadata = json.loads(output.stdout)
streams = metadata['streams']
# Find the subtitles stream with language metadata
subtitle_languages = []
for stream in streams:
if stream['codec_type'] == 'subtitles' and 'tags' in stream and 'language' in stream['tags']:
language = stream['tags']['language']
subtitle_languages.append(language)
return subtitle_languages
except Exception as e:
if self.error_messages_callback:
self.error_messages_callback(e)
else:
print(e)
return None
def render_subtitle_to_media(media_filepath, media_type, media_ext, subtitle_path, output_path, error_messages_callback=None):
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def ffprobe_check():
if which("ffprobe"):
return "ffprobe"
if which("ffprobe.exe"):
return "ffprobe.exe"
return None
def ffmpeg_check():
if which("ffmpeg"):
return "ffmpeg"
if which("ffmpeg.exe"):
return "ffmpeg.exe"
return None
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if not os.path.isfile(media_filepath):
if error_messages_callback:
error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not ffprobe_check():
if error_messages_callback:
error_messages_callback("Cannot find ffprobe executable")
else:
print("Cannot find ffprobe executable")
raise Exception("Dependency not found: ffprobe")
if not ffmpeg_check():
if error_messages_callback:
error_messages_callback("Cannot find ffmpeg executable")
else:
print("Cannot find ffmpeg executable")
raise Exception("Dependency not found: ffmpeg")
try:
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if "\\" in subtitle_path:
subtitle_path = subtitle_path.replace("\\", "/")
if "\\" in output_path:
output_path = output_path.replace("\\", "/")
scale_switch = "'trunc(iw/2)*2':'trunc(ih/2)*2'"
ffmpeg_command = [
'ffmpeg',
'-y',
'-i', media_filepath,
'-vf', f'subtitles={shlex.quote(self.subtitle_path)},scale={scale_switch}',
'-c:v', 'libx264',
'-crf', '23',
'-preset', 'medium',
'-c:a', 'copy',
self.output_path
]
ffprobe_command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-show_entries',
'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
media_filepath
]
ffprobe_process = None
if sys.platform == "win32":
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True)
total_duration = float(ffprobe_process.strip())
widgets = [f"Rendering '{language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
percentage = 0
process = None
if sys.platform == "win32":
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
if process.stdout is None:
continue
stderr_line = (process.stdout.readline().decode("utf-8", errors="replace").strip())
if stderr_line == '' and process.poll() is not None:
break
if "out_time=" in stderr_line:
time_str = stderr_line.split('time=')[1].split()[0]
current_duration = sum(float(x) * 1000 * 60 ** i for i, x in enumerate(reversed(time_str.split(":"))))
if current_duration>0 and current_duration<=total_duration*1000:
percentage = int(current_duration*100/(int(float(total_duration))*1000))
if percentage <= 100:
pbar.update(percentage)
pbar.finish()
return output_path
except Exception as e:
if error_messages_callback:
error_messages_callback(e)
else:
print(e)
return None
def embed_subtitle_to_media(media_filepath, media_type, subtitle_path, language_code, output_path, error_messages_callback=None):
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def ffmpeg_check():
if which("ffmpeg"):
return "ffmpeg"
if which("ffmpeg.exe"):
return "ffmpeg.exe"
return None
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if not os.path.isfile(media_filepath):
if error_messages_callback:
error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not ffmpeg_check():
if error_messages_callback:
error_messages_callback("Cannot find ffmpeg executable")
else:
print("Cannot find ffmpeg executable")
raise Exception("Dependency not found: ffmpeg")
try:
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if "\\" in subtitle_path:
subtitle_path = subtitle_path.replace("\\", "/")
if "\\" in output_path:
output_path = output_path.replace("\\", "/")
existing_languages = get_existing_subtitle_language(media_filepath)
if language_code in existing_languages:
#print(f"'{language_code}' subtitles stream already existed in '{media_filepath}'")
return
else:
# Determine the next available subtitles index
next_index = len(existing_languages)
ffmpeg_command = [
'ffmpeg',
'-hide_banner',
'-loglevel', 'error',
'-v', 'error',
'-y',
'-i', media_filepath,
'-sub_charenc', 'UTF-8',
'-i', subtitle_path,
'-c:v', 'copy',
'-c:a', 'copy',
'-scodec', 'mov_text',
'-metadata:s:s:' + str(next_index), f'language={shlex.quote(language_code)}',
'-map', '0',
'-map', '1',
output_path
]
ffprobe_command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-show_entries',
'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
media_filepath
]
ffprobe_process = None
if sys.platform == "win32":
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True)
total_duration = float(ffprobe_process.strip())
widgets = [f"Embedding '{language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
percentage = 0
process = None
if sys.platform == "win32":
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
if process.stdout is None:
continue
stderr_line = (process.stdout.readline().decode("utf-8", errors="replace").strip())
if stderr_line == '' and process.poll() is not None:
break
if "out_time=" in stderr_line:
time_str = stderr_line.split('time=')[1].split()[0]
current_duration = sum(float(x) * 1000 * 60 ** i for i, x in enumerate(reversed(time_str.split(":"))))
if current_duration>0 and current_duration<=total_duration*1000:
percentage = int(current_duration*100/(int(float(total_duration))*1000))
if percentage <= 100:
pbar.update(percentage)
pbar.finish()
return output_path
return
except Exception as e:
if error_messages_callback:
error_messages_callback(e)
else:
print(e)
return None
def remove_subtitles_from_media(media_filepath, output_path, progress_callback=None, error_messages_callback=None):
def which(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def ffmpeg_check():
if which("ffmpeg"):
return "ffmpeg"
if which("ffmpeg.exe"):
return "ffmpeg.exe"
return None
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if not os.path.isfile(media_filepath):
if error_messages_callback:
error_messages_callback(f"The given file does not exist: '{media_filepath}'")
else:
print(f"The given file does not exist: '{media_filepath}'")
raise Exception(f"Invalid file: '{media_filepath}'")
if not ffmpeg_check():
if error_messages_callback:
error_messages_callback("Cannot find ffmpeg executable")
else:
print("Cannot find ffmpeg executable")
raise Exception("Dependency not found: ffmpeg")
try:
if "\\" in media_filepath:
media_filepath = media_filepath.replace("\\", "/")
if "\\" in output_path:
output_path = output_path.replace("\\", "/")
ffmpeg_command = [
'ffmpeg',
'-hide_banner',
'-loglevel', 'error',
'-v', 'error',
'-y',
'-i', media_filepath,
'-c', 'copy',
'-sn',
'-progress', '-', '-nostats',
self.output_path
]
ffprobe_command = [
'ffprobe',
'-hide_banner',
'-v', 'error',
'-loglevel', 'error',
'-show_entries',
'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
media_filepath
]
ffprobe_process = None
if sys.platform == "win32":
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
ffprobe_process = subprocess.check_output(ffprobe_command, stdin=open(os.devnull), universal_newlines=True)
total_duration = float(ffprobe_process.strip())
widgets = ["Removing subtitles streams from file : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
percentage = 0
process = None
if sys.platform == "win32":
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
if process.stdout is None:
continue
stderr_line = (process.stdout.readline().decode("utf-8", errors="replace").strip())
if stderr_line == '' and process.poll() is not None:
break
if "out_time=" in stderr_line:
time_str = stderr_line.split('time=')[1].split()[0]
current_duration = sum(float(x) * 1000 * 60 ** i for i, x in enumerate(reversed(time_str.split(":"))))
if current_duration>0 and current_duration<=total_duration*1000:
percentage = int(current_duration*100/(int(float(total_duration))*1000))
if percentage <= 100:
pbar.update(percentage)
pbar.finish()
return output_path
except KeyboardInterrupt:
if error_messages_callback:
error_messages_callback("Cancelling all tasks")
else:
print("Cancelling all tasks")
return
except Exception as e:
if error_messages_callback:
error_messages_callback(e)
else:
print(e)
return None
def show_progress(info, media_file_display_name, progress, start_time):
global pbar
pbar.update(progress)
def show_error_messages(messages):
print(messages)
def main():
global pbar
whisper_models = [
"tiny.en",
"tiny",
"base.en",
"base",
"small.en",
"small",
"medium.en",
"medium",
"large-v1",
"large-v2",
"large"
]
devices = ["auto", "cuda", "cpu"]
compute_types = ["default", "auto", "int8", "int8_float16", "int16", "float16", "float32"]
if sys.platform == "win32":
change_code_page(65001)
stop_ffmpeg_windows(error_messages_callback=show_error_messages)
else:
stop_ffmpeg_linux(error_messages_callback=show_error_messages)
remove_temp_files("wav", error_messages_callback=show_error_messages)
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio files to generate subtitles files (use wildcard for multiple files or separate them with a space character e.g. \"file 1.mp4\" \"file 2.mp4\")", nargs='*')
parser.add_argument('-m', '--model-name', default="small", help="name of whisper model to use")
parser.add_argument('-lm', '--list-models', help="List of whisper models name", action='store_true')
parser.add_argument('-d', '--device', default="auto", help="name of the device to use")
parser.add_argument('-ld', '--list-devices', help="List of supported devices", action='store_true')
parser.add_argument('-ct', '--compute-type', default="auto", help="name of the compute type (quantization) to use")
parser.add_argument('-lct', '--list-compute-types', help="List of supported compute types", action='store_true')
parser.add_argument('-t', '--cpu-threads', default=0, help="Number of threads to use when running on CPU")
parser.add_argument('-nw', '--num-workers', default=1, help="Number of concurrent calls when running whisper model")
parser.add_argument('-S', '--src-language', help="Language code of the audio language spoken in video/audio source_path", default="auto")
parser.add_argument('-D', '--dst-language', help="Desired translation language code for the subtitles", default=None)
parser.add_argument('-lS', '--list-src-languages', help="List all available src_languages (whisper supported languages)", action='store_true')
parser.add_argument('-lD', '--list-dst-languages', help="List all available dst_languages (google translate supported languages)", action='store_true')
parser.add_argument('-F', '--format', help="Desired subtitles format", default="srt")
parser.add_argument('-lF', '--list-formats', help="List all supported subtitles formats", action='store_true')
parser.add_argument('-c', '--concurrency', help="Number of concurrent calls for Google Translate API", type=int, default=10)
parser.add_argument('-es', '--embed-src', help="Boolean value (True or False) for embedding original language subtitles file into video file", type=bool, default=False)
parser.add_argument('-ed', '--embed-dst', help="Boolean value (True or False) for embedding translated subtitles file into video file", type=bool, default=False)
parser.add_argument('-fr', '--force-recognize', help="Boolean value (True or False) for re-recognize media file event if it's already has subtitles stream", type=bool, default=False)
parser.add_argument('-v', '--version', action='version', version=VERSION)
args = parser.parse_args()
src_language = args.src_language
dst_language = args.dst_language
model_name = args.model_name
if model_name.endswith(".en"):
print(f"{model_name} is an English-only model, forcing English detection.")
args.src_language = "en"
elif args.src_language != "auto":
args.src_language = src_language
model = WhisperModel(model_name, device=args.device, compute_type=args.compute_type, cpu_threads=int(args.cpu_threads), num_workers=int(args.num_workers))
if args.list_models:
print("List of whisper models:")
for model_name in whisper_models:
print(model_name)
return 0
if args.list_devices:
print("List of supported devices:")
for device in devices:
print(device)
return 0
if args.list_compute_types:
print("List of supported compute types:")
for compute_type in compute_types:
print(compute_type)
return 0
whisper_language = WhisperLanguage()
google_language = GoogleLanguage()
google_unsupported_languages = ["auto", "ba", "br", "fo", "nn", "oc", "tl", "bo"]
if args.list_src_languages:
print("List of whisper supported languages:")
for whisper_code, whisper_language in (whisper_language.name_of_code.items()):
print("%-8s : %s" %(whisper_code, whisper_language))
return 0
if args.list_dst_languages:
print("List of google translate supported languages:")
for google_code, google_language in sorted(google_language.name_of_code.items()):
print("%-8s : %s" %(google_code, google_language))
return 0
if args.src_language not in whisper_language.name_of_code.keys():
print("Source language is not supported. Run with --list-whisper-languages to see all whisper supported languages.")
return 1
if args.dst_language:
if not args.dst_language in google_language.name_of_code.keys():
print("Destination language is not supported. Run with --list-google-languages to see all google translate supported languages.")
return 1
if not is_same_language(args.src_language, args.dst_language, error_messages_callback=show_error_messages):
do_translate = True
else:
do_translate = False
else:
do_translate = False
if args.list_formats:
print("List of supported subtitles formats:")
for subtitle_format in SubtitleFormatter.supported_formats:
print(f"{subtitle_format}")
return 0
if args.format not in SubtitleFormatter.supported_formats:
print("Subtitles format is not supported. Run with --list-formats to see all supported formats.")
return 1
if not args.source_path:
parser.print_help(sys.stderr)
return 1
completed_tasks = 0
media_filepaths = []
arg_filepaths = []
invalid_media_filepaths = []
not_exist_filepaths = []
argpath = None
media_type = None
media_format = None
args_source_path = args.source_path
subtitle_format = args.format
if (not "*" in str(args_source_path)) and (not "?" in str(args_source_path)):
for filepath in args_source_path:
fpath = Path(filepath)
if not os.path.isfile(fpath):
not_exist_filepaths.append(filepath)
if sys.platform == "win32":
for i in range(len(args.source_path)):
if ("[" or "]") in args.source_path[i]:
placeholder = "#TEMP#"
args_source_path[i] = args.source_path[i].replace("[", placeholder)
args_source_path[i] = args_source_path[i].replace("]", "[]]")
args_source_path[i] = args_source_path[i].replace(placeholder, "[[]")
for arg in args_source_path:
if not sys.platform == "win32" :
arg = escape(arg)
arg_filepaths += glob(arg)
if arg_filepaths:
for argpath in arg_filepaths:
if os.path.isfile(argpath):
if check_file_type(argpath, error_messages_callback=show_error_messages) == 'video':
media_filepaths.append(argpath)
elif check_file_type(argpath, error_messages_callback=show_error_messages) == 'audio':
media_filepaths.append(argpath)
else:
invalid_media_filepaths.append(argpath)
else:
not_exist_filepaths.append(argpath)
if invalid_media_filepaths:
for invalid_media_filepath in invalid_media_filepaths:
msg = f"'{invalid_media_filepath}' is not valid video or audio files"
print(msg)
if not_exist_filepaths:
for not_exist_filepath in not_exist_filepaths:
msg = f"'{not_exist_filepath}' is not exist"
print(msg)
if (not "*" in str(args_source_path)) and (not "?" in str(args_source_path)):
sys.exit(0)
if not arg_filepaths and not not_exist_filepaths:
print("No any files matching filenames you typed")
sys.exit(0)
pool = multiprocessing.Pool(args.concurrency)
transcribe_end_time = None
transcribe_elapsed_time = None
transcribe_start_time = time.time()
task = "transcribe"
total_duration = 0
src_subtitle_filepath = None
dst_subtitle_filepath = None
ffmpeg_src_language_code = None
ffmpeg_dst_language_code = None
embedded_media_filepath = None
if args.src_language in google_unsupported_languages and do_translate:
task = "translate"
src_language = "en"
removed_media_filepaths = []
processed_list = []
dst_language = args.dst_language
# CHECK SUBTITLE STREAM PART
if args.force_recognize == False:
print("CHECKING EXISTING SUBTITLES STREAMS")
print("===================================")
# CHECKING ffmpeg_src_language_code SUBTITLES STREAM ONLY, IF EXISTS WE PRINT IT AND EXTRACT IT
if do_translate == False:
for media_filepath in media_filepaths:
print(f"Checking '{media_filepath}'")
media_type = check_file_type(media_filepath, error_messages_callback=show_error_messages)
if media_type == "audio":
print("Audio file won't has subtitles streams, skip checking")
continue
if args.src_language == "auto":
try:
widgets = ["Converting to a temporary WAV file : ", Percentage(), ' ', Bar(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
wav_converter = WavConverter(progress_callback=show_progress, error_messages_callback=show_error_messages)
wav_filepath, sample_rate = wav_converter(media_filepath)
pbar.finish()
segments, info = model.transcribe(wav_filepath)
src_language = info.language
print(f"Detected language : {info.language} (probability = {info.language_probability})")
except KeyboardInterrupt:
pbar.finish()
pool.terminate()
pool.close()
pool.join()
print("Cancelling all tasks")
if sys.platform == "win32":
stop_ffmpeg_windows(error_messages_callback=show_error_messages)
else:
stop_ffmpeg_linux(error_messages_callback=show_error_messages)
remove_temp_files("wav")
sys.exit(1)
except Exception as e:
if not KeyboardInterrupt in str(e):
pbar.finish()
pool.terminate()
pool.close()
pool.join()
print(e)
if sys.platform == "win32":
stop_ffmpeg_windows(error_messages_callback=show_error_messages)
else:
stop_ffmpeg_linux(error_messages_callback=show_error_messages)
remove_temp_files("wav")
sys.exit(1)
else:
src_language = args.src_language
if src_language in google_unsupported_languages and args.force_recognize==False:
print("Language is not supported by Google Translate API")
print(f"Removing '{media_filepath}' from speech recognition process list")
removed_media_filepaths.append(media_filepath)
else:
ffmpeg_src_language_code = google_language.ffmpeg_code_of_code[src_language]
subtitle_stream_parser = SubtitleStreamParser(error_messages_callback=show_error_messages)
subtitle_streams_data = subtitle_stream_parser(media_filepath)
if subtitle_streams_data and subtitle_streams_data != []:
src_subtitle_stream_timed_subtitles = subtitle_stream_parser.timed_subtitles_of_language(ffmpeg_src_language_code)
if ffmpeg_src_language_code in subtitle_stream_parser.languages():
print(f"Is '{ffmpeg_src_language_code}' subtitles stream exist : Yes")
subtitle_stream_regions = []
subtitle_stream_transcripts = []
for entry in src_subtitle_stream_timed_subtitles:
subtitle_stream_regions.append(entry[0])
subtitle_stream_transcripts.append(entry[1])
base, ext = os.path.splitext(media_filepath)
src_subtitle_filepath = f"{base}.{src_language}.{subtitle_format}"
print(f"Extracting '{ffmpeg_src_language_code}' subtitles stream as : '{src_subtitle_filepath}'")
writer = SubtitleWriter(subtitle_stream_regions, subtitle_stream_transcripts, subtitle_format, error_messages_callback=show_error_messages)
writer.write(src_subtitle_filepath)
# no translate process as instructed in command arguments
# if args.embed_src is True we can't embed it because dst subtitles stream already exist
if args.embed_src == True and src_subtitle_stream_timed_subtitles and src_subtitle_stream_timed_subtitles != []:
print(f"No need to embed '{ffmpeg_src_language_code}' subtitles stream because it's already existed")
if args.force_recognize == False:
print(f"Removing '{media_filepath}' from speech recognition process list")
removed_media_filepaths.append(media_filepath)
if os.path.isfile(src_subtitle_filepath):
completed_tasks += 1
#print(f"args.force_recognize == False, do_translate == False, media_type == 'video', subtitle stream = exist : completed_tasks = {completed_tasks}")
else:
print(f"Is '{ffmpeg_src_language_code}' subtitles stream exist : No")
print("")
if not media_filepaths:
transcribe_end_time = time.time()
transcribe_elapsed_time = transcribe_end_time - transcribe_start_time
transcribe_elapsed_time_seconds = timedelta(seconds=int(transcribe_elapsed_time))
transcribe_elapsed_time_str = str(transcribe_elapsed_time_seconds)
hour, minute, second = transcribe_elapsed_time_str.split(":")
msg = "Total running time : %s:%s:%s" %(hour.zfill(2), minute, second)
print(msg)
sys.exit(0)
# CHECKING ffmpeg_src_language_code AND ffmpeg_dst_language_code SUBTITLES STREAMS, IF EXISTS WE PRINT IT AND EXTRACT IT
# IF ONE OF THEM (ffmpeg_src_language_code OR ffmpeg_dst_language_code) NOT EXIST, WE TRANSLATE IT AND THEN EMBED IT
elif do_translate == True:
for media_filepath in media_filepaths:
print(f"Checking '{media_filepath}'")
media_type = check_file_type(media_filepath, error_messages_callback=show_error_messages)
if media_type == "audio":
print("Audio file won't has subtitles streams, skip checking")
continue
if args.src_language == "auto":
try:
widgets = ["Converting to a temporary WAV file : ", Percentage(), ' ', Bar(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
wav_converter = WavConverter(progress_callback=show_progress, error_messages_callback=show_error_messages)
wav_filepath, sample_rate = wav_converter(media_filepath)
pbar.finish()
segments, info = model.transcribe(wav_filepath)
src_language = info.language
print(f"Detected language : {info.language} (probability = {info.language_probability})")
except KeyboardInterrupt:
pbar.finish()
pool.terminate()
pool.close()
pool.join()
print("Cancelling all tasks")
if sys.platform == "win32":
stop_ffmpeg_windows(error_messages_callback=show_error_messages)
else:
stop_ffmpeg_linux(error_messages_callback=show_error_messages)
remove_temp_files("wav")
sys.exit(1)
except Exception as e:
if not KeyboardInterrupt in str(e):
pbar.finish()
pool.terminate()
pool.close()
pool.join()
print(e)
if sys.platform == "win32":
stop_ffmpeg_windows(error_messages_callback=show_error_messages)
else:
stop_ffmpeg_linux(error_messages_callback=show_error_messages)
remove_temp_files("wav")
sys.exit(1)
else:
src_language = args.src_language
if src_language in google_unsupported_languages and args.force_recognize==False:
print(f"Language '{src_language}' is not supported by Google Translate API")
print(f"Removing '{media_filepath}' from speech recognition process list")
removed_media_filepaths.append(media_filepath)
else:
ffmpeg_src_language_code = google_language.ffmpeg_code_of_code[src_language]
ffmpeg_dst_language_code = google_language.ffmpeg_code_of_code[dst_language]
subtitle_stream_parser = SubtitleStreamParser(error_messages_callback=show_error_messages)
subtitle_streams_data = subtitle_stream_parser(media_filepath)
if subtitle_streams_data and subtitle_streams_data != []:
src_subtitle_stream_timed_subtitles = subtitle_stream_parser.timed_subtitles_of_language(ffmpeg_src_language_code)
dst_subtitle_stream_timed_subtitles = subtitle_stream_parser.timed_subtitles_of_language(ffmpeg_dst_language_code)
# ffmpeg_src_language_code subtitles stream exist, we print it and extract it
if ffmpeg_src_language_code in subtitle_stream_parser.languages():
print(f"Is '{ffmpeg_src_language_code}' subtitles stream exist : Yes")
subtitle_stream_regions = []
subtitle_stream_transcripts = []
for entry in src_subtitle_stream_timed_subtitles:
subtitle_stream_regions.append(entry[0])
subtitle_stream_transcripts.append(entry[1])
base, ext = os.path.splitext(media_filepath)
src_subtitle_filepath = f"{base}.{src_language}.{subtitle_format}"
print(f"Extracting '{ffmpeg_src_language_code}' subtitles stream as : '{src_subtitle_filepath}'")
writer = SubtitleWriter(subtitle_stream_regions, subtitle_stream_transcripts, subtitle_format, error_messages_callback=show_error_messages)
writer.write(src_subtitle_filepath)
# ffmpeg_src_language_code subtitles stream not exist, just print it
else:
print(f"Is '{ffmpeg_src_language_code}' subtitles stream exist : No")
# ffmpeg_src_language_code subtitles stream exist, we print it and extract it
if ffmpeg_dst_language_code in subtitle_stream_parser.languages():
print(f"Is '{ffmpeg_dst_language_code}' subtitles stream exist : Yes")
subtitle_stream_regions = []
subtitle_stream_transcripts = []
for entry in dst_subtitle_stream_timed_subtitles:
subtitle_stream_regions.append(entry[0])
subtitle_stream_transcripts.append(entry[1])
base, ext = os.path.splitext(media_filepath)
dst_subtitle_filepath = f"{base}.{dst_language}.{subtitle_format}"
writer = SubtitleWriter(subtitle_stream_regions, subtitle_stream_transcripts, subtitle_format, error_messages_callback=show_error_messages)
print(f"Extracting '{ffmpeg_dst_language_code}' subtitles stream as : '{dst_subtitle_filepath}'")
writer.write(dst_subtitle_filepath)
# ffmpeg_dst_language_code subtitles stream not exist, just print it
else:
print(f"Is '{ffmpeg_dst_language_code}' subtitles stream exist : No")
# ffmpeg_src_language_code subtitles stream = not exist,
# ffmpeg_dst_language_code subtitles stream = exist,
# so we translate it from 'dst_language' to 'src_language'
if ffmpeg_dst_language_code in subtitle_stream_parser.languages() and ffmpeg_src_language_code not in subtitle_stream_parser.languages():
if dst_subtitle_stream_timed_subtitles and dst_subtitle_stream_timed_subtitles != []:
prompt = "Translating from %s to %s : " %(dst_language.center(8), src_language.center(8))
widgets = [prompt, Percentage(), ' ', Bar(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(dst_subtitle_stream_timed_subtitles)).start()
transcript_translator = SentenceTranslator(src=dst_language, dst=src_language, error_messages_callback=show_error_messages)
translated_subtitle_stream_transcripts = []
for i, translated_subtitle_stream_transcript in enumerate(pool.imap(transcript_translator, subtitle_stream_transcripts)):
translated_subtitle_stream_transcripts.append(translated_subtitle_stream_transcript)
pbar.update(i)
pbar.finish()
base, ext = os.path.splitext(media_filepath)
src_subtitle_filepath = f"{base}.{src_language}.{subtitle_format}"
translation_writer = SubtitleWriter(subtitle_stream_regions, translated_subtitle_stream_transcripts, subtitle_format, error_messages_callback=show_error_messages)
translation_writer.write(src_subtitle_filepath)
print(f"Translated subtitles file saved as : '{src_subtitle_filepath}'")
if args.force_recognize == False:
print(f"Removing '{media_filepath}' from speech recognition process list")
removed_media_filepaths.append(media_filepath)
if args.embed_src and dst_subtitle_stream_timed_subtitles and dst_subtitle_stream_timed_subtitles != []:
ffmpeg_src_language_code = google_language.ffmpeg_code_of_code[src_language]
base, ext = os.path.splitext(media_filepath)
if ext[1:] == "ts":
media_format = "mp4"
else:
media_format = ext[1:]
src_tmp_embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.tmp.embedded.{media_format}"
embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.embedded.{media_format}"
widgets = [f"Embedding '{ffmpeg_src_language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
subtitle_embedder = MediaSubtitleEmbedder(subtitle_path=src_subtitle_filepath, language=ffmpeg_src_language_code, output_path=src_tmp_embedded_media_filepath, progress_callback=show_progress, error_messages_callback=show_error_messages)
src_tmp_output = subtitle_embedder(media_filepath)
pbar.finish()
if os.path.isfile(src_tmp_output):
shutil.copy(src_tmp_output, embedded_media_filepath)
os.remove(src_tmp_output)
if os.path.isfile(embedded_media_filepath):
print(f"Subtitles embedded {media_type} file saved as : '{embedded_media_filepath}'")
# if args.embed_dst is True we can't embed it because dst subtitles stream already exist
if args.embed_dst == True and dst_subtitle_stream_timed_subtitles and dst_subtitle_stream_timed_subtitles != []:
print(f"No need to embed '{ffmpeg_dst_language_code}' subtitles stream because it's already existed")
# ffmpeg_src_language_code subtitles stream = exist,
# ffmpeg_dst_language_code subtitles stream = not exist,
# so we translate it from 'src_language' to 'dst_language'
if ffmpeg_dst_language_code not in subtitle_stream_parser.languages() and ffmpeg_src_language_code in subtitle_stream_parser.languages():
if src_subtitle_stream_timed_subtitles and src_subtitle_stream_timed_subtitles != []:
prompt = "Translating from %s to %s : " %(src_language.center(8), dst_language.center(8))
widgets = [prompt, Percentage(), ' ', Bar(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(src_subtitle_stream_timed_subtitles)).start()
transcript_translator = SentenceTranslator(src=src_language, dst=dst_language, error_messages_callback=show_error_messages)
translated_subtitle_stream_transcripts = []
for i, translated_subtitle_stream_transcript in enumerate(pool.imap(transcript_translator, subtitle_stream_transcripts)):
translated_subtitle_stream_transcripts.append(translated_subtitle_stream_transcript)
pbar.update(i)
pbar.finish()
base, ext = os.path.splitext(media_filepath)
dst_subtitle_filepath = f"{base}.{dst_language}.{subtitle_format}"
translation_writer = SubtitleWriter(subtitle_stream_regions, translated_subtitle_stream_transcripts, subtitle_format, error_messages_callback=show_error_messages)
translation_writer.write(dst_subtitle_filepath)
print(f"Translated subtitles file saved as : '{dst_subtitle_filepath}'")
if args.force_recognize == False:
print(f"Removing '{media_filepath}' from speech recognition process list")
removed_media_filepaths.append(media_filepath)
if args.embed_dst == True and src_subtitle_stream_timed_subtitles and src_subtitle_stream_timed_subtitles != []:
ffmpeg_dst_language_code = google_language.ffmpeg_code_of_code[dst_language]
base, ext = os.path.splitext(media_filepath)
if ext[1:] == "ts":
media_format = "mp4"
else:
media_format = ext[1:]
dst_tmp_embedded_media_filepath = f"{base}.{ffmpeg_dst_language_code}.tmp.embedded.{media_format}"
embedded_media_filepath = f"{base}.{ffmpeg_dst_language_code}.embedded.{media_format}"
widgets = [f"Embedding '{ffmpeg_dst_language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
subtitle_embedder = MediaSubtitleEmbedder(subtitle_path=dst_subtitle_filepath, language=ffmpeg_dst_language_code, output_path=dst_tmp_embedded_media_filepath, progress_callback=show_progress, error_messages_callback=show_error_messages)
dst_tmp_output = subtitle_embedder(media_filepath)
pbar.finish()
if os.path.isfile(dst_tmp_output):
shutil.copy(dst_tmp_output, embedded_media_filepath)
os.remove(dst_tmp_output)
if os.path.isfile(embedded_media_filepath):
print(f"Subtitles embedded {media_type} file saved as : '{embedded_media_filepath}'")
# if args.embed_src is True then no need to embed it because src subtitles stream already exist
if args.embed_src == True and src_subtitle_stream_timed_subtitles and src_subtitle_stream_timed_subtitles != []:
print(f"No need to embed '{ffmpeg_src_language_code}' subtitles stream because it's already existed")
# ffmpeg_dst_language_code subtitles stream = exist,
# ffmpeg_src_language_code subtitles stream = exist,
# so we remove media_filepath from the list of files to be processed
elif ffmpeg_dst_language_code in subtitle_stream_parser.languages() and ffmpeg_src_language_code in subtitle_stream_parser.languages():
# remove media_filepath from transcribe processed_list because all needed srt files already saved
if args.force_recognize == False:
print(f"Removing '{media_filepath}' from speech recognition process list")
removed_media_filepaths.append(media_filepath)
# no need to translate becouse both languages subtitles files already saved
# if args.embed_src is True we can't embed it because dst subtitles stream already exist
if args.embed_src == True and src_subtitle_stream_timed_subtitles and src_subtitle_stream_timed_subtitles != []:
print(f"No need to embed '{ffmpeg_src_language_code}' subtitles stream because it's already existed")
# if args.embed_dst is True we can't embed it because dst subtitles stream already exist
if args.embed_dst == True and dst_subtitle_stream_timed_subtitles and dst_subtitle_stream_timed_subtitles != []:
print(f"No need to embed '{ffmpeg_dst_language_code}' subtitles stream because it's already existed")
if (src_subtitle_filepath and os.path.isfile(src_subtitle_filepath)) or (dst_subtitle_filepath and os.path.isfile(dst_subtitle_filepath)):
if args.force_recognize == False:
completed_tasks += 1
#print(f"\nargs.force_recognize == False, do_translate == True, media_type == 'video', subtitle stream = exist : completed_tasks = {completed_tasks}\n")
print("")
print("")
if not media_filepaths:
transcribe_end_time = time.time()
transcribe_elapsed_time = transcribe_end_time - transcribe_start_time
transcribe_elapsed_time_seconds = timedelta(seconds=int(transcribe_elapsed_time))
transcribe_elapsed_time_str = str(transcribe_elapsed_time_seconds)
hour, minute, second = transcribe_elapsed_time_str.split(":")
msg = "Total running time : %s:%s:%s" %(hour.zfill(2), minute, second)
print(msg)
sys.exit(0)
if args.force_recognize == True:
# SUBTITLES STREAMS REMOVER PART (IF args.force_recognize == True)
print("FORCE RECOGNIZE FLAG CHECK")
print("==========================")
# if args.force_recognize is true then we need to remove subtitle streams and save it as new media file to processed with transcribe
for media_filepath in media_filepaths:
print(f"Checking '{media_filepath}'")
media_type = check_file_type(media_filepath, error_messages_callback=show_error_messages)
if media_type == "video" and args.force_recognize == True:
force_recognize_media_file_format = None
base, ext = os.path.splitext(media_filepath)
if ext[1:] == "ts":
force_recognize_media_file_format = "mp4"
else:
force_recognize_media_file_format = ext[1:]
#print(f"media_filepath = {media_filepath}")
subtitle_stream_parser = SubtitleStreamParser()
subtitle_streams_data = subtitle_stream_parser(media_filepath)
#print(f"subtitle_streams_data = {subtitle_streams_data}")
#print(f"subtitle_stream_parser.timed_subtitles_of_index(1) = {subtitle_stream_parser.timed_subtitles_of_index(1)}")
if subtitle_streams_data and subtitle_stream_parser.timed_subtitles_of_index(1) != []:
tmp_subtitle_removed_media_filepath = f"{base}.tmp.subtitles.removed.media_filepath.{force_recognize_media_file_format}"
subtitle_removed_media_filepath = f"{base}.force.recognize.{force_recognize_media_file_format}"
widgets = ["Removing subtitles streams from file : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
subtitle_remover = MediaSubtitleRemover(output_path=tmp_subtitle_removed_media_filepath, progress_callback=show_progress, error_messages_callback=show_error_messages)
tmp_output = subtitle_remover(media_filepath)
pbar.finish()
if os.path.isfile(tmp_output):
shutil.copy(tmp_output, subtitle_removed_media_filepath)
os.remove(tmp_output)
processed_list.append(subtitle_removed_media_filepath)
print(f"Subtitles removed {media_type} file saved as : '{subtitle_removed_media_filepath}'")
else:
print("Nothing to remove")
if media_filepath not in processed_list and media_filepath not in removed_media_filepaths:
processed_list.append(media_filepath)
else:
if media_type == "video":
print("force_recognize is false")
if media_type == "audio":
print(f"'{media_filepath}' is audio file, nothing to remove")
if media_filepath not in processed_list and media_filepath not in removed_media_filepaths:
processed_list.append(media_filepath)
print("")
if args.force_recognize == False and processed_list == []:
for media_filepath in media_filepaths:
if media_filepath not in removed_media_filepaths:
processed_list.append(media_filepath)
if processed_list:
# START THE TRANSCRIBE PROCESS
print("PERFORMING SPEECH RECOGNITION FOR MEDIA FILES THAT HAVE NO SUBTITLES STREAMS OR FORCED TO BE RECOGNIZED")
print("=========================================================================================================")
for media_filepath in processed_list:
print(f"Processing '{media_filepath}'")
media_type = check_file_type(media_filepath, error_messages_callback=show_error_messages)
try:
widgets = ["Converting to a temporary WAV file : ", Percentage(), ' ', Bar(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
wav_converter = WavConverter(progress_callback=show_progress, error_messages_callback=show_error_messages)
wav_filepath, sample_rate = wav_converter(media_filepath)
pbar.finish()
if args.src_language == "auto":
segments, info = model.transcribe(wav_filepath)
src_language = info.language
print(f"Detected language : {info.language} (probability = {info.language_probability})")
total_duration = info.duration
ffmpeg_src_language_code = google_language.ffmpeg_code_of_code[src_language]
if src_language in google_unsupported_languages and do_translate:
task = "translate"
src_language = "en"
else:
segments, info = model.transcribe(wav_filepath, language=src_language, task=task)
total_duration = info.duration
if segments:
widgets = ["Performing speech recognition : ", Percentage(), ' ', Bar(marker='#'), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
timed_subtitles = []
regions = []
transcripts = []
for segment in segments:
progress = int(round(float(segment.end))*100/total_duration)
regions.append((segment.start, segment.end))
transcripts.append(segment.text)
pbar.update(progress)
pbar.finish()
timed_subtitles = [(r, t) for r, t in zip(regions, transcripts) if t]
base, ext = os.path.splitext(media_filepath)
src_subtitle_filepath = f"{base}.{src_language}.{subtitle_format}"
writer = SubtitleWriter(regions, transcripts, subtitle_format, error_messages_callback=show_error_messages)
writer.write(src_subtitle_filepath)
if do_translate == True:
timed_subtitles = writer.timed_subtitles
created_regions = []
created_subtitles = []
for entry in timed_subtitles:
created_regions.append(entry[0])
created_subtitles.append(entry[1])
prompt = "Translating from %s to %s : " %(src_language.center(8), dst_language.center(8))
widgets = [prompt, Percentage(), ' ', Bar(marker='#'), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(timed_subtitles)).start()
transcript_translator = SentenceTranslator(src=src_language, dst=dst_language, error_messages_callback=show_error_messages)
translated_subtitles = []
for i, translated_subtitle in enumerate(pool.imap(transcript_translator, created_subtitles)):
translated_subtitles.append(translated_subtitle)
pbar.update(i)
pbar.finish()
base, ext = os.path.splitext(media_filepath)
dst_subtitle_filepath = f"{base}.{dst_language}.{subtitle_format}"
translation_writer = SubtitleWriter(created_regions, translated_subtitles, subtitle_format, error_messages_callback=show_error_messages)
translation_writer.write(dst_subtitle_filepath)
print(f"Original subtitles file saved as : '{src_subtitle_filepath}'")
print(f"Translated subtitles file saved as : '{dst_subtitle_filepath}'")
if media_type == "audio":
completed_tasks += 1
#print(f"\nmedia_filepath = {media_filepath}, do_translate == True, media_type == 'audio' : completed_tasks = {completed_tasks}\n")
elif media_type == "video" and args.embed_src == False and args.embed_dst == False:
completed_tasks += 1
#print(f"\nmedia_filepath = {media_filepath}, do_translate == True, media_type == 'video', args.embed_src == False and args.embed_dst == False : completed_tasks = {completed_tasks}\n")
elif do_translate == False:
print(f"Subtitles file saved as : '{src_subtitle_filepath}'")
if media_type == "audio":
completed_tasks += 1
#print(f"\nmedia_filepath = {media_filepath}, do_translate == False, media_type == 'audio' : completed_tasks = {completed_tasks}\n")
elif media_type == "video" and args.embed_src == False:
completed_tasks += 1
#print(f"\nmedia_filepath = {media_filepath}, do_translate == False, media_type == 'video', args.embed_src == False : completed_tasks = {completed_tasks}\n")
# EMBEDDING SUBTITLES FILE
embedded_media_filepath = None
if do_translate == False:
media_type = check_file_type(media_filepath, error_messages_callback=show_error_messages)
if media_type == "audio" and args.embed_src == True:
print("Subtitles can only be embedded into video file, not audio file")
if media_type == "video" and args.embed_src == True:
ffmpeg_src_language_code = google_language.ffmpeg_code_of_code[src_language]
base, ext = os.path.splitext(media_filepath)
if ext[1:] == "ts":
media_format = "mp4"
else:
media_format = ext[1:]
src_tmp_embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.tmp.embedded.{media_format}"
embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.embedded.{media_format}"
widgets = [f"Embedding '{ffmpeg_src_language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
subtitle_embedder = MediaSubtitleEmbedder(subtitle_path=src_subtitle_filepath, language=ffmpeg_src_language_code, output_path=src_tmp_embedded_media_filepath, progress_callback=show_progress, error_messages_callback=show_error_messages)
src_tmp_output = subtitle_embedder(media_filepath)
pbar.finish()
if os.path.isfile(src_tmp_output):
shutil.copy(src_tmp_output, embedded_media_filepath)
os.remove(src_tmp_output)
print(f"Subtitles embedded {media_type} file saved as : '{embedded_media_filepath}'")
completed_tasks += 1
#print(f"\ndo_translate == False, media_type == 'video', args.embed_src == True: completed_tasks = {completed_tasks}\n")
elif do_translate == True:
media_type = check_file_type(media_filepath, error_messages_callback=show_error_messages)
if media_type == "audio" and (args.embed_src == True or args.embed_src == True):
print("Subtitles can only be embedded into video file, not audio file")
if media_type == "video" and args.embed_src == True and args.embed_dst == True:
ffmpeg_src_language_code = google_language.ffmpeg_code_of_code[src_language]
ffmpeg_dst_language_code = google_language.ffmpeg_code_of_code[dst_language]
base, ext = os.path.splitext(media_filepath)
if ext[1:] == "ts":
media_format = "mp4"
else:
media_format = ext[1:]
src_tmp_embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.tmp.embedded.{media_format}"
src_dst_tmp_embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.{ffmpeg_dst_language_code}.tmp.embedded.{media_format}"
embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.{ffmpeg_dst_language_code}.embedded.{media_format}"
widgets = [f"Embedding '{ffmpeg_src_language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
subtitle_embedder = MediaSubtitleEmbedder(subtitle_path=src_subtitle_filepath, language=ffmpeg_src_language_code, output_path=src_tmp_embedded_media_filepath, progress_callback=show_progress, error_messages_callback=show_error_messages)
src_tmp_output = subtitle_embedder(media_filepath)
pbar.finish()
if os.path.isfile(src_tmp_output) and os.path.isfile(dst_subtitle_filepath):
widgets = [f"Embedding '{ffmpeg_dst_language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
subtitle_embedder = MediaSubtitleEmbedder(subtitle_path=dst_subtitle_filepath, language=ffmpeg_dst_language_code, output_path=src_dst_tmp_embedded_media_filepath, progress_callback=show_progress, error_messages_callback=show_error_messages)
src_dst_tmp_output = subtitle_embedder(src_tmp_output)
pbar.finish()
if os.path.isfile(src_dst_tmp_output):
shutil.copy(src_dst_tmp_output, embedded_media_filepath)
print(f"Subtitles embedded {media_type} file saved as : '{embedded_media_filepath}'")
completed_tasks += 1
#print(f"\ndo_translate == True, media_type == 'video', args.embed_src == True and args.embed_dst == True : completed_tasks = {completed_tasks}\n")
else:
print("Unknown error!")
if os.path.isfile(src_dst_tmp_output):
os.remove(src_dst_tmp_output)
if os.path.isfile(src_tmp_output):
os.remove(src_tmp_output)
elif media_type == "video" and args.embed_src == True and args.embed_dst == False:
ffmpeg_src_language_code = google_language.ffmpeg_code_of_code[src_language]
base, ext = os.path.splitext(media_filepath)
if ext[1:] == "ts":
media_format = "mp4"
else:
media_format = ext[1:]
src_tmp_embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.tmp.embedded.{media_format}"
embedded_media_filepath = f"{base}.{ffmpeg_src_language_code}.embedded.{media_format}"
widgets = [f"Embedding '{ffmpeg_src_language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
subtitle_embedder = MediaSubtitleEmbedder(subtitle_path=src_subtitle_filepath, language=ffmpeg_src_language_code, output_path=src_tmp_embedded_media_filepath, progress_callback=show_progress, error_messages_callback=show_error_messages)
src_tmp_output = subtitle_embedder(media_filepath)
pbar.finish()
if os.path.isfile(src_tmp_output):
shutil.copy(src_tmp_output, embedded_media_filepath)
os.remove(src_tmp_embedded_media_filepath)
print(f"Subtitles embedded {media_type} file saved as : '{embedded_media_filepath}'")
completed_tasks += 1
#print(f"\ndo_translate == True, media_type == 'video', args.embed_src == True and args.embed_dst == False : completed_tasks = {completed_tasks}\n")
else:
print("Unknown error!")
elif media_type == "video" and args.embed_src == False and args.embed_dst == True:
ffmpeg_dst_language_code = google_language.ffmpeg_code_of_code[dst_language]
base, ext = os.path.splitext(media_filepath)
if ext[1:] == "ts":
media_format = "mp4"
else:
media_format = ext[1:]
dst_tmp_embedded_media_filepath = f"{base}.{ffmpeg_dst_language_code}.tmp.embedded.{media_format}"
embedded_media_filepath = f"{base}.{ffmpeg_dst_language_code}.embedded.{media_format}"
widgets = [f"Embedding '{ffmpeg_dst_language_code}' subtitles into {media_type} : ", Percentage(), ' ', Bar(marker="#"), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
subtitle_embedder = MediaSubtitleEmbedder(subtitle_path=dst_subtitle_filepath, language=ffmpeg_dst_language_code, output_path=dst_tmp_embedded_media_filepath, progress_callback=show_progress, error_messages_callback=show_error_messages)
dst_tmp_output = subtitle_embedder(media_filepath)
pbar.finish()
if os.path.isfile(dst_tmp_output):
shutil.copy(dst_tmp_output, embedded_media_filepath)
os.remove(dst_tmp_output)
print(f"Subtitles embedded {media_type} file saved as : '{embedded_media_filepath}'")
completed_tasks += 1
#print(f"\ndo_translate == True, media_type == 'video', args.embed_src == False and args.embed_dst == True : completed_tasks = {completed_tasks}\n")
else:
print("Unknown error!")
print("")
except KeyboardInterrupt:
pbar.finish()
pool.terminate()
pool.close()
pool.join()
print("Cancelling all tasks")
if sys.platform == "win32":
stop_ffmpeg_windows(error_messages_callback=show_error_messages)
else:
stop_ffmpeg_linux(error_messages_callback=show_error_messages)
remove_temp_files("wav")
return 1
except Exception as e:
if not KeyboardInterrupt in str(e):
pbar.finish()
pool.terminate()
pool.close()
pool.join()
print(e)
if sys.platform == "win32":
stop_ffmpeg_windows(error_messages_callback=show_error_messages)
else:
stop_ffmpeg_linux(error_messages_callback=show_error_messages)
remove_temp_files("wav")
return 1
#print(f"len(media_filepaths) = {len(media_filepaths)}")
#print(f"completed_tasks = {completed_tasks}")
if len(media_filepaths)>0 and completed_tasks == len(media_filepaths):
transcribe_end_time = time.time()
transcribe_elapsed_time = transcribe_end_time - transcribe_start_time
transcribe_elapsed_time_seconds = timedelta(seconds=int(transcribe_elapsed_time))
transcribe_elapsed_time_str = str(transcribe_elapsed_time_seconds)
hour, minute, second = transcribe_elapsed_time_str.split(":")
msg = "Total running time : %s:%s:%s" %(hour.zfill(2), minute, second)
print(msg)
if pool:
pool.close()
pool.join()
pool = None
if sys.platform == "win32":
stop_ffmpeg_windows(error_messages_callback=show_error_messages)
else:
stop_ffmpeg_linux(error_messages_callback=show_error_messages)
remove_temp_files("wav")
if __name__ == '__main__':
multiprocessing.freeze_support()
sys.exit(main())
|
PypiClean
|
/distribuciones_jjm-0.1.tar.gz/distribuciones_jjm-0.1/distribuciones_jjm/Gaussiandistribution.py
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
PypiClean
|
/gamification-engine-0.4.0.tar.gz/gamification-engine-0.4.0/gengine/app/jsscripts/node_modules/eslint/lib/rules/prefer-reflect.js
|
"use strict";
//------------------------------------------------------------------------------
// Rule Definition
//------------------------------------------------------------------------------
module.exports = {
meta: {
docs: {
description: "require `Reflect` methods where applicable",
category: "ECMAScript 6",
recommended: false,
replacedBy: []
},
deprecated: true,
schema: [
{
type: "object",
properties: {
exceptions: {
type: "array",
items: {
enum: [
"apply",
"call",
"delete",
"defineProperty",
"getOwnPropertyDescriptor",
"getPrototypeOf",
"setPrototypeOf",
"isExtensible",
"getOwnPropertyNames",
"preventExtensions"
]
},
uniqueItems: true
}
},
additionalProperties: false
}
]
},
create(context) {
const existingNames = {
apply: "Function.prototype.apply",
call: "Function.prototype.call",
defineProperty: "Object.defineProperty",
getOwnPropertyDescriptor: "Object.getOwnPropertyDescriptor",
getPrototypeOf: "Object.getPrototypeOf",
setPrototypeOf: "Object.setPrototypeOf",
isExtensible: "Object.isExtensible",
getOwnPropertyNames: "Object.getOwnPropertyNames",
preventExtensions: "Object.preventExtensions"
};
const reflectSubsitutes = {
apply: "Reflect.apply",
call: "Reflect.apply",
defineProperty: "Reflect.defineProperty",
getOwnPropertyDescriptor: "Reflect.getOwnPropertyDescriptor",
getPrototypeOf: "Reflect.getPrototypeOf",
setPrototypeOf: "Reflect.setPrototypeOf",
isExtensible: "Reflect.isExtensible",
getOwnPropertyNames: "Reflect.getOwnPropertyNames",
preventExtensions: "Reflect.preventExtensions"
};
const exceptions = (context.options[0] || {}).exceptions || [];
/**
* Reports the Reflect violation based on the `existing` and `substitute`
* @param {Object} node The node that violates the rule.
* @param {string} existing The existing method name that has been used.
* @param {string} substitute The Reflect substitute that should be used.
* @returns {void}
*/
function report(node, existing, substitute) {
context.report({ node, message: "Avoid using {{existing}}, instead use {{substitute}}.", data: {
existing,
substitute
} });
}
return {
CallExpression(node) {
const methodName = (node.callee.property || {}).name;
const isReflectCall = (node.callee.object || {}).name === "Reflect";
const hasReflectSubsitute = reflectSubsitutes.hasOwnProperty(methodName);
const userConfiguredException = exceptions.indexOf(methodName) !== -1;
if (hasReflectSubsitute && !isReflectCall && !userConfiguredException) {
report(node, existingNames[methodName], reflectSubsitutes[methodName]);
}
},
UnaryExpression(node) {
const isDeleteOperator = node.operator === "delete";
const targetsIdentifier = node.argument.type === "Identifier";
const userConfiguredException = exceptions.indexOf("delete") !== -1;
if (isDeleteOperator && !targetsIdentifier && !userConfiguredException) {
report(node, "the delete keyword", "Reflect.deleteProperty");
}
}
};
}
};
|
PypiClean
|
/fix_rosdep-0.1.7-py3-none-any.whl/fixed_rosdep/main.py
|
# Author Tully Foote/[email protected]
"""
Command-line interface to rosdep library
"""
from __future__ import print_function
import errno
import os
import sys
import traceback
try:
from urllib.error import URLError
from urllib.request import build_opener
from urllib.request import HTTPBasicAuthHandler
from urllib.request import HTTPHandler
from urllib.request import install_opener
from urllib.request import ProxyHandler
except ImportError:
from urllib2 import build_opener
from urllib2 import HTTPBasicAuthHandler
from urllib2 import HTTPHandler
from urllib2 import install_opener
from urllib2 import ProxyHandler
from urllib2 import URLError
import warnings
from optparse import OptionParser
import rospkg
from . import create_default_installer_context, get_default_installer
from . import __version__
from .core import RosdepInternalError, InstallFailed, UnsupportedOs, InvalidData, CachePermissionError, DownloadFailure
from .installers import normalize_uninstalled_to_list
from .installers import RosdepInstaller
from .lookup import RosdepLookup, ResolutionError, prune_catkin_packages
from .meta import MetaDatabase
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import update_sources_list, get_sources_cache_dir,\
download_default_sources_list, SourcesListLoader, CACHE_INDEX,\
get_sources_list_dir, get_default_sources_list_file,\
DEFAULT_SOURCES_LIST_URL
from .rosdistrohelper import PreRep137Warning
from .ament_packages import AMENT_PREFIX_PATH_ENV_VAR
from .ament_packages import get_packages_with_prefixes
from .catkin_packages import find_catkin_packages_in
from .catkin_packages import set_workspace_packages
from .catkin_packages import get_workspace_packages
from .catkin_packages import VALID_DEPENDENCY_TYPES
from catkin_pkg.package import InvalidPackage
class UsageError(Exception):
pass
_usage = """usage: rosdep [options] <command> <args>
Commands:
rosdep check <stacks-and-packages>...
check if the dependencies of package(s) have been met.
rosdep install <stacks-and-packages>...
download and install the dependencies of a given package or packages.
rosdep db
generate the dependency database and print it to the console.
rosdep init
initialize rosdep sources in /etc/ros/rosdep. May require sudo.
rosdep keys <stacks-and-packages>...
list the rosdep keys that the packages depend on.
rosdep resolve <rosdeps>
resolve <rosdeps> to system dependencies
rosdep update
update the local rosdep database based on the rosdep sources.
rosdep what-needs <rosdeps>...
print a list of packages that declare a rosdep on (at least
one of) <rosdeps>
rosdep where-defined <rosdeps>...
print a list of yaml files that declare a rosdep on (at least
one of) <rosdeps>
rosdep fix-permissions
Recursively change the permissions of the user's ros home directory.
May require sudo. Can be useful to fix permissions after calling
"rosdep update" with sudo accidentally.
"""
def _get_default_RosdepLookup(options):
"""
Helper routine for converting command-line options into
appropriate RosdepLookup instance.
"""
os_override = convert_os_override_option(options.os_override)
sources_loader = SourcesListLoader.create_default(sources_cache_dir=options.sources_cache_dir,
os_override=os_override,
verbose=options.verbose)
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader, dependency_types=options.dependency_types)
lookup.verbose = options.verbose
return lookup
def rosdep_main(args=None):
if args is None:
args = sys.argv[1:]
try:
exit_code = _rosdep_main(args)
if exit_code not in [0, None]:
sys.exit(exit_code)
except rospkg.ResourceNotFound as e:
print("""
ERROR: rosdep cannot find all required resources to answer your query
%s
""" % (error_to_human_readable(e)), file=sys.stderr)
sys.exit(1)
except UsageError as e:
print(_usage, file=sys.stderr)
print('ERROR: %s' % (str(e)), file=sys.stderr)
if hasattr(os, 'EX_USAGE'):
sys.exit(os.EX_USAGE)
else:
sys.exit(64) # EX_USAGE is not available on Windows; EX_USAGE is 64 on Unix
except RosdepInternalError as e:
print("""
ERROR: Rosdep experienced an internal error.
Please go to the rosdep page [1] and file a bug report with the message below.
[1] : http://www.ros.org/wiki/rosdep
rosdep version: %s
%s
""" % (__version__, e.message), file=sys.stderr)
sys.exit(1)
except ResolutionError as e:
print("""
ERROR: %s
%s
""" % (e.args[0], e), file=sys.stderr)
sys.exit(1)
except CachePermissionError as e:
print(str(e))
print("Try running 'sudo rosdep fix-permissions'")
sys.exit(1)
except UnsupportedOs as e:
print('Unsupported OS: %s\nSupported OSes are [%s]' % (e.args[0], ', '.join(e.args[1])), file=sys.stderr)
sys.exit(1)
except InvalidPackage as e:
print(str(e))
sys.exit(1)
except Exception as e:
print("""
ERROR: Rosdep experienced an error: %s
rosdep version: %s
%s
""" % (e, __version__, traceback.format_exc()), file=sys.stderr)
sys.exit(1)
def check_for_sources_list_init(sources_cache_dir):
"""
Check to see if sources list and cache are present.
*sources_cache_dir* alone is enough to pass as the user has the
option of passing in a cache dir.
If check fails, tell user how to resolve and sys exit.
"""
commands = []
filename = os.path.join(sources_cache_dir, CACHE_INDEX)
if os.path.exists(filename):
return
else:
commands.append('rosdep update')
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
commands.insert(0, 'sudo rosdep init')
else:
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
commands.insert(0, 'sudo rosdep init')
if commands:
commands = '\n'.join([' %s' % c for c in commands])
print("""
ERROR: your rosdep installation has not been initialized yet. Please run:
%s
""" % (commands), file=sys.stderr)
sys.exit(1)
else:
return True
def key_list_to_dict(key_list):
"""
Convert a list of strings of the form 'foo:bar' to a dictionary.
Splits strings of the form 'foo:bar quux:quax' into separate entries.
"""
try:
key_list = [key for s in key_list for key in s.split(' ')]
return dict(map(lambda s: [t.strip() for t in s.split(':')], key_list))
except ValueError as e:
raise UsageError("Invalid 'key:value' list: '%s'" % ' '.join(key_list))
def str_to_bool(s):
"""Maps a string to bool. Supports true/false, and yes/no, and is case-insensitive"""
s = s.lower()
if s in ['yes', 'true']:
return True
elif s in ['no', 'false']:
return False
else:
raise UsageError("Cannot parse '%s' as boolean" % s)
def setup_proxy_opener():
# check for http[s]?_proxy user
for scheme in ['http', 'https']:
key = scheme + '_proxy'
if key in os.environ:
proxy = ProxyHandler({scheme: os.environ[key]})
auth = HTTPBasicAuthHandler()
opener = build_opener(proxy, auth, HTTPHandler)
install_opener(opener)
def setup_environment_variables(ros_distro):
"""
Set environment variables needed to find ROS packages and evaluate conditional dependencies.
:param ros_distro: The requested ROS distro passed on the CLI, or None
"""
if ros_distro is not None:
if 'ROS_DISTRO' in os.environ and os.environ['ROS_DISTRO'] != ros_distro:
# user has a different workspace sourced, use --rosdistro
print('WARNING: given --rosdistro {} but ROS_DISTRO is "{}". Ignoring environment.'.format(
ros_distro, os.environ['ROS_DISTRO']))
# Use python version from --rosdistro
if 'ROS_PYTHON_VERSION' in os.environ:
del os.environ['ROS_PYTHON_VERSION']
os.environ['ROS_DISTRO'] = ros_distro
if 'ROS_PYTHON_VERSION' not in os.environ and 'ROS_DISTRO' in os.environ:
# Set python version to version used by ROS distro
python_versions = MetaDatabase().get('ROS_PYTHON_VERSION', default=[])
if os.environ['ROS_DISTRO'] in python_versions:
os.environ['ROS_PYTHON_VERSION'] = str(python_versions[os.environ['ROS_DISTRO']])
if 'ROS_PYTHON_VERSION' not in os.environ:
# Default to same python version used to invoke rosdep
print('WARNING: ROS_PYTHON_VERSION is unset. Defaulting to {}'.format(sys.version[0]), file=sys.stderr)
os.environ['ROS_PYTHON_VERSION'] = sys.version[0]
def _rosdep_main(args):
# sources cache dir is our local database.
default_sources_cache = get_sources_cache_dir()
parser = OptionParser(usage=_usage, prog='rosdep')
parser.add_option('--os', dest='os_override', default=None,
metavar='OS_NAME:OS_VERSION', help='Override OS name and version (colon-separated), e.g. ubuntu:lucid')
parser.add_option('-c', '--sources-cache-dir', dest='sources_cache_dir', default=default_sources_cache,
metavar='SOURCES_CACHE_DIR', help='Override %s' % (default_sources_cache))
parser.add_option('--verbose', '-v', dest='verbose', default=False,
action='store_true', help='verbose display')
parser.add_option('--version', dest='print_version', default=False,
action='store_true', help='print just the rosdep version, then exit')
parser.add_option('--all-versions', dest='print_all_versions', default=False,
action='store_true', help='print rosdep version and version of installers, then exit')
parser.add_option('--reinstall', dest='reinstall', default=False,
action='store_true', help='(re)install all dependencies, even if already installed')
parser.add_option('--default-yes', '-y', dest='default_yes', default=False,
action='store_true', help='Tell the package manager to default to y or fail when installing')
parser.add_option('--simulate', '-s', dest='simulate', default=False,
action='store_true', help='Simulate install')
parser.add_option('-r', dest='robust', default=False,
action='store_true', help='Continue installing despite errors.')
parser.add_option('-q', dest='quiet', default=False,
action='store_true', help='Quiet. Suppress output except for errors.')
parser.add_option('-a', '--all', dest='rosdep_all', default=False,
action='store_true', help='select all packages')
parser.add_option('-n', dest='recursive', default=True,
action='store_false', help="Do not consider implicit/recursive dependencies. Only valid with 'keys', 'check', and 'install' commands.")
parser.add_option('--ignore-packages-from-source', '--ignore-src', '-i',
dest='ignore_src', default=False, action='store_true',
help="Affects the 'check', 'install', and 'keys' verbs. "
'If specified then rosdep will ignore keys that '
'are found to be catkin or ament packages anywhere in the '
'ROS_PACKAGE_PATH, AMENT_PREFIX_PATH or in any of the directories '
'given by the --from-paths option.')
parser.add_option('--skip-keys',
dest='skip_keys', action='append', default=[],
help="Affects the 'check' and 'install' verbs. The "
'specified rosdep keys will be ignored, i.e. not '
'resolved and not installed. The option can be supplied multiple '
'times. A space separated list of rosdep keys can also '
'be passed as a string. A more permanent solution to '
'locally ignore a rosdep key is creating a local rosdep rule '
'with an empty list of packages (include it in '
'/etc/ros/rosdep/sources.list.d/ before the defaults).')
parser.add_option('--filter-for-installers',
action='append', default=[],
help="Affects the 'db' verb. If supplied, the output of the 'db' "
'command is filtered to only list packages whose installer '
'is in the provided list. The option can be supplied '
'multiple times. A space separated list of installers can also '
'be passed as a string. Example: `--filter-for-installers "apt pip"`')
parser.add_option('--from-paths', dest='from_paths',
default=False, action='store_true',
help="Affects the 'check', 'keys', and 'install' verbs. "
'If specified the arguments to those verbs will be '
'considered paths to be searched, acting on all '
'catkin packages found there in.')
parser.add_option('--rosdistro', dest='ros_distro', default=None,
help='Explicitly sets the ROS distro to use, overriding '
'the normal method of detecting the ROS distro '
'using the ROS_DISTRO environment variable. '
"When used with the 'update' verb, "
'only the specified distro will be updated.')
parser.add_option('--as-root', default=[], action='append',
metavar='INSTALLER_KEY:<bool>', help='Override '
'whether sudo is used for a specific installer, '
"e.g. '--as-root pip:false' or '--as-root \"pip:no homebrew:yes\"'. "
'Can be specified multiple times.')
parser.add_option('--include-eol-distros', dest='include_eol_distros',
default=False, action='store_true',
help="Affects the 'update' verb. "
'If specified end-of-life distros are being '
'fetched too.')
parser.add_option('-t', '--dependency-types', dest='dependency_types',
type="choice", choices=list(VALID_DEPENDENCY_TYPES),
default=[], action='append',
help='Dependency types to install, can be given multiple times. '
'Choose from {}. Default: all except doc.'.format(VALID_DEPENDENCY_TYPES))
options, args = parser.parse_args(args)
if options.print_version or options.print_all_versions:
# First print the rosdep version.
print('{}'.format(__version__))
# If not printing versions of all installers, exit.
if not options.print_all_versions:
sys.exit(0)
# Otherwise, Then collect the versions of the installers and print them.
installers = create_default_installer_context().installers
installer_keys = get_default_installer()[1]
version_strings = []
for key in installer_keys:
if key == 'source':
# Explicitly skip the source installer.
continue
installer = installers[key]
try:
installer_version_strings = installer.get_version_strings()
assert isinstance(installer_version_strings, list), installer_version_strings
version_strings.extend(installer_version_strings)
except NotImplementedError:
version_strings.append('{} unknown'.format(key))
continue
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
version_strings.append('{} not installed'.format(key))
continue
if version_strings:
print()
print('Versions of installers:')
print('\n'.join([' ' + x for x in version_strings if x]))
else:
print()
print('No installers with versions available found.')
sys.exit(0)
# flatten list of skipped keys, filter-for-installers, and dependency types
options.skip_keys = [key for s in options.skip_keys for key in s.split(' ')]
options.filter_for_installers = [inst for s in options.filter_for_installers for inst in s.split(' ')]
options.dependency_types = [dep for s in options.dependency_types for dep in s.split(' ')]
if len(args) == 0:
parser.error('Please enter a command')
command = args[0]
if command not in _commands:
parser.error('Unsupported command %s.' % command)
args = args[1:]
# Convert list of keys to dictionary
options.as_root = dict((k, str_to_bool(v)) for k, v in key_list_to_dict(options.as_root).items())
if command not in ['init', 'update', 'fix-permissions']:
check_for_sources_list_init(options.sources_cache_dir)
# _package_args_handler uses `ROS_DISTRO`, so environment variables must be set before
setup_environment_variables(options.ros_distro)
elif command not in ['fix-permissions']:
setup_proxy_opener()
if command in _command_rosdep_args:
return _rosdep_args_handler(command, parser, options, args)
elif command in _command_no_args:
return _no_args_handler(command, parser, options, args)
else:
return _package_args_handler(command, parser, options, args)
def _no_args_handler(command, parser, options, args):
if args:
parser.error('command [%s] takes no arguments' % (command))
else:
return command_handlers[command](options)
def _rosdep_args_handler(command, parser, options, args):
# rosdep keys as args
if options.rosdep_all:
parser.error('-a, --all is not a valid option for this command')
elif len(args) < 1:
parser.error("Please enter arguments for '%s'" % command)
else:
return command_handlers[command](args, options)
def _package_args_handler(command, parser, options, args):
if options.rosdep_all:
if args:
parser.error('cannot specify additional arguments with -a')
else:
# let the loader filter the -a. This will take out some
# packages that are catkinized (for now).
lookup = _get_default_RosdepLookup(options)
loader = lookup.get_loader()
args = loader.get_loadable_resources()
not_found = []
elif not args:
parser.error('no packages or stacks specified')
# package or stack names as args. have to convert stack names to packages.
# - overrides to enable testing
packages = []
not_found = []
if options.from_paths:
for path in args:
if options.verbose:
print("Using argument '{0}' as a path to search.".format(path))
if not os.path.exists(path):
print("given path '{0}' does not exist".format(path))
return 1
path = os.path.abspath(path)
if 'ROS_PACKAGE_PATH' not in os.environ:
os.environ['ROS_PACKAGE_PATH'] = '{0}'.format(path)
else:
os.environ['ROS_PACKAGE_PATH'] = '{0}{1}{2}'.format(
path,
os.pathsep,
os.environ['ROS_PACKAGE_PATH']
)
pkgs = find_catkin_packages_in(path, options.verbose)
packages.extend(pkgs)
# Make packages list unique
packages = list(set(packages))
else:
rospack = rospkg.RosPack()
rosstack = rospkg.RosStack()
val = rospkg.expand_to_packages(args, rospack, rosstack)
packages = val[0]
not_found = val[1]
if not_found:
raise rospkg.ResourceNotFound(not_found[0], rospack.get_ros_paths())
# Handle the --ignore-src option
if command in ['install', 'check', 'keys'] and options.ignore_src:
if options.verbose:
print('Searching ROS_PACKAGE_PATH for '
'sources: ' + str(os.environ['ROS_PACKAGE_PATH'].split(os.pathsep)))
ws_pkgs = get_workspace_packages()
for path in os.environ['ROS_PACKAGE_PATH'].split(os.pathsep):
path = os.path.abspath(path.strip())
if os.path.exists(path):
pkgs = find_catkin_packages_in(path, options.verbose)
ws_pkgs.extend(pkgs)
elif options.verbose:
print('Skipping non-existent path ' + path)
set_workspace_packages(ws_pkgs)
# Lookup package names from ament index.
if AMENT_PREFIX_PATH_ENV_VAR in os.environ:
if options.verbose:
print(
'Searching ' + AMENT_PREFIX_PATH_ENV_VAR + ' for '
'sources: ' + str(os.environ[AMENT_PREFIX_PATH_ENV_VAR].split(':')))
ws_pkgs = get_workspace_packages()
pkgs = get_packages_with_prefixes().keys()
ws_pkgs.extend(pkgs)
# Make packages list unique
ws_pkgs = list(set(ws_pkgs))
set_workspace_packages(ws_pkgs)
lookup = _get_default_RosdepLookup(options)
# Handle the --skip-keys option by pretending that they are packages in the catkin workspace
if command in ['install', 'check'] and options.skip_keys:
if options.verbose:
print('Skipping the specified keys:\n- ' + '\n- '.join(options.skip_keys))
lookup.skipped_keys = options.skip_keys
if 0 and not packages: # disable, let individual handlers specify behavior
# possible with empty stacks
print('No packages in arguments, aborting')
return
return command_handlers[command](lookup, packages, options)
def convert_os_override_option(options_os_override):
"""
Convert os_override option flag to ``(os_name, os_version)`` tuple, or
``None`` if not set
:returns: ``(os_name, os_version)`` tuple if option is set, ``None`` otherwise
:raises: :exc:`UsageError` if option is not set properly
"""
if not options_os_override:
return None
val = options_os_override
if ':' not in val:
raise UsageError('OS override must be colon-separated OS_NAME:OS_VERSION, e.g. ubuntu:maverick')
os_name = val[:val.find(':')]
os_version = val[val.find(':') + 1:]
return os_name, os_version
def configure_installer_context(installer_context, options):
"""
Configure the *installer_context* from *options*.
- Override the OS detector in *installer_context* if necessary.
- Set *as_root* for installers if specified.
:raises: :exc:`UsageError` If user input options incorrectly
"""
os_override = convert_os_override_option(options.os_override)
if os_override is not None:
installer_context.set_os_override(*os_override)
for k, v in options.as_root.items():
try:
installer_context.get_installer(k).as_root = v
except KeyError:
raise UsageError("Installer '%s' not defined." % k)
def change_name():
try:
os.rename("/usr/bin/rosdep/","/usr/bin/rosdep_src")
os.link("/usr/local/bin/fix-rosdep","/usr/bin/rosdep_src")
except Exception as e:
pass
def command_init(options):
try:
src_list = download_default_sources_list()
data = src_list.replace("https://raw.githubusercontent.com/ros/rosdistro/master","mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro/master")
except URLError as e:
print('ERROR: cannot download default sources list from:\n%s\nWebsite may be down.' % (DEFAULT_SOURCES_LIST_URL))
return 4
except DownloadFailure as e:
print('ERROR: cannot download default sources list from:\n%s\nWebsite may be down.' % (DEFAULT_SOURCES_LIST_URL))
print(e)
return 4
# reuse path variable for error message
path = get_sources_list_dir()
old_umask = os.umask(0o022)
try:
if not os.path.exists(path):
os.makedirs(path)
path = get_default_sources_list_file()
if os.path.exists(path):
print('ERROR: default sources list file already exists:\n\t%s\nDelete !!' % (path))
os.remove(path)
# return 1
with open(path, 'w') as f:
f.write(data)
print('Wrote %s' % (path))
print('Recommended: please run\n\n\trosdep update\n')
except IOError as e:
print('ERROR: cannot create %s:\n\t%s' % (path, e), file=sys.stderr)
return 2
except OSError as e:
print("ERROR: cannot create %s:\n\t%s\nPerhaps you need to run 'sudo rosdep init' instead" % (path, e), file=sys.stderr)
return 3
finally:
os.umask(old_umask)
def command_update(options):
error_occured = []
def update_success_handler(data_source):
print('Hit %s' % (data_source.url))
def update_error_handler(data_source, exc):
error_string = 'ERROR: unable to process source [%s]:\n\t%s' % (data_source.url, exc)
print(error_string, file=sys.stderr)
error_occured.append(error_string)
sources_list_dir = get_sources_list_dir()
# disable deprecation warnings when using the command-line tool
warnings.filterwarnings('ignore', category=PreRep137Warning)
if not os.path.exists(sources_list_dir):
print('ERROR: no sources directory exists on the system meaning rosdep has not yet been initialized.\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n')
return 1
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
print('ERROR: no data sources in %s\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n' % sources_list_dir, file=sys.stderr)
return 1
try:
print('reading in sources list data from %s' % (sources_list_dir))
sources_cache_dir = get_sources_cache_dir()
try:
if os.geteuid() == 0:
print("Warning: running 'rosdep update' as root is not recommended.", file=sys.stderr)
print("You should run 'sudo rosdep fix-permissions' and invoke 'rosdep update' again without sudo.", file=sys.stderr)
except AttributeError:
# nothing we wanna do under Windows
pass
update_sources_list(success_handler=update_success_handler,
error_handler=update_error_handler,
skip_eol_distros=not options.include_eol_distros,
ros_distro=options.ros_distro)
print('updated cache in %s' % (sources_cache_dir))
except InvalidData as e:
print('ERROR: invalid sources list file:\n\t%s' % (e), file=sys.stderr)
return 1
except IOError as e:
print('ERROR: error loading sources list:\n\t%s' % (e), file=sys.stderr)
return 1
except ValueError as e:
print('ERROR: invalid argument value provided:\n\t%s' % (e), file=sys.stderr)
return 1
if error_occured:
print('ERROR: Not all sources were able to be updated.\n[[[')
for e in error_occured:
print(e)
print(']]]')
return 1
def command_keys(lookup, packages, options):
lookup = _get_default_RosdepLookup(options)
rosdep_keys = get_keys(lookup, packages, options.recursive)
prune_catkin_packages(rosdep_keys, options.verbose)
_print_lookup_errors(lookup)
print('\n'.join(rosdep_keys))
def get_keys(lookup, packages, recursive):
rosdep_keys = set() # using a set to ensure uniqueness
for package_name in packages:
deps = lookup.get_rosdeps(package_name, implicit=recursive)
rosdep_keys.update(deps)
return list(rosdep_keys)
def command_check(lookup, packages, options):
verbose = options.verbose
installer_context = create_default_installer_context(verbose=verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=verbose)
# pretty print the result
if [v for k, v in uninstalled if v]:
print('System dependencies have not been satisfied:')
for installer_key, resolved in uninstalled:
if resolved:
for r in resolved:
print('%s\t%s' % (installer_key, r))
else:
print('All system dependencies have been satisfied')
if errors:
for package_name, ex in errors.items():
if isinstance(ex, rospkg.ResourceNotFound):
print('ERROR[%s]: resource not found [%s]' % (package_name, ex.args[0]), file=sys.stderr)
else:
print('ERROR[%s]: %s' % (package_name, ex), file=sys.stderr)
if uninstalled:
return 1
else:
return 0
def error_to_human_readable(error):
if isinstance(error, rospkg.ResourceNotFound):
return 'Missing resource %s' % (error,)
elif isinstance(error, ResolutionError):
return '%s' % (error.args[0],)
else:
return '%s' % (error,)
def command_install(lookup, packages, options):
# map options
install_options = dict(interactive=not options.default_yes, verbose=options.verbose,
reinstall=options.reinstall,
continue_on_error=options.robust, simulate=options.simulate, quiet=options.quiet)
# setup installer
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
if options.reinstall:
if options.verbose:
print('reinstall is true, resolving all dependencies')
try:
uninstalled, errors = lookup.resolve_all(packages, installer_context, implicit=options.recursive)
except InvalidData as e:
print('ERROR: unable to process all dependencies:\n\t%s' % (e), file=sys.stderr)
return 1
else:
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=options.verbose)
if options.verbose:
uninstalled_dependencies = normalize_uninstalled_to_list(uninstalled)
print('uninstalled dependencies are: [%s]' % ', '.join(uninstalled_dependencies))
if errors:
err_msg = ('ERROR: the following packages/stacks could not have their '
'rosdep keys resolved\nto system dependencies')
if rospkg.distro.current_distro_codename() is None:
err_msg += (
' (ROS distro is not set. '
'Make sure `ROS_DISTRO` environment variable is set, or use '
'`--rosdistro` option to specify the distro, '
'e.g. `--rosdistro indigo`)'
)
print(err_msg + ':', file=sys.stderr)
for rosdep_key, error in errors.items():
print('%s: %s' % (rosdep_key, error_to_human_readable(error)), file=sys.stderr)
if options.robust:
print('Continuing to install resolvable dependencies...')
else:
return 1
try:
installer.install(uninstalled, **install_options)
if not options.simulate:
print('#All required rosdeps installed successfully')
return 0
except KeyError as e:
raise RosdepInternalError(e)
except InstallFailed as e:
print('ERROR: the following rosdeps failed to install', file=sys.stderr)
print('\n'.join([' %s: %s' % (k, m) for k, m in e.failures]), file=sys.stderr)
return 1
def command_db(options):
# exact same setup logic as command_resolve, should possibly combine
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
os_name, os_version = installer_context.get_os_name_and_version()
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise UnsupportedOs(os_name, installer_context.get_os_keys())
installer = installer_context.get_installer(default_key)
print('OS NAME: %s' % os_name)
print('OS VERSION: %s' % os_version)
errors = []
print('DB [key -> resolution]')
# db does not leverage the resource-based API
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
for rosdep_name in view.keys():
try:
d = view.lookup(rosdep_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
if options.filter_for_installers and inst_key not in options.filter_for_installers:
continue
resolved = installer.resolve(rule)
resolved_str = ' '.join([str(r) for r in resolved])
print('%s -> %s' % (rosdep_name, resolved_str))
except ResolutionError as e:
errors.append(e)
# TODO: add command-line option for users to be able to see this.
# This is useful for platform bringup, but useless for most users
# as the rosdep db contains numerous, platform-specific keys.
if 0:
for error in errors:
print('WARNING: %s' % (error_to_human_readable(error)), file=sys.stderr)
def _print_lookup_errors(lookup):
for error in lookup.get_errors():
if isinstance(error, rospkg.ResourceNotFound):
print('WARNING: unable to locate resource %s' % (str(error.args[0])), file=sys.stderr)
else:
print('WARNING: %s' % (str(error)), file=sys.stderr)
def command_what_needs(args, options):
lookup = _get_default_RosdepLookup(options)
packages = []
for rosdep_name in args:
packages.extend(lookup.get_resources_that_need(rosdep_name))
_print_lookup_errors(lookup)
print('\n'.join(set(packages)))
def command_where_defined(args, options):
lookup = _get_default_RosdepLookup(options)
locations = []
for rosdep_name in args:
locations.extend(lookup.get_views_that_define(rosdep_name))
_print_lookup_errors(lookup)
if locations:
for location in locations:
origin = location[1]
print(origin)
else:
print('ERROR: cannot find definition(s) for [%s]' % (', '.join(args)), file=sys.stderr)
return 1
def command_resolve(args, options):
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer, installer_keys, default_key, \
os_name, os_version = get_default_installer(installer_context=installer_context,
verbose=options.verbose)
invalid_key_errors = []
for rosdep_name in args:
if len(args) > 1:
print('#ROSDEP[%s]' % rosdep_name)
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
try:
d = view.lookup(rosdep_name)
except KeyError as e:
invalid_key_errors.append(e)
continue
rule_installer, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
installer = installer_context.get_installer(rule_installer)
resolved = installer.resolve(rule)
print('#%s' % (rule_installer))
print(' '.join([str(r) for r in resolved]))
for error in invalid_key_errors:
print('ERROR: no rosdep rule for %s' % (error), file=sys.stderr)
for error in lookup.get_errors():
print('WARNING: %s' % (error_to_human_readable(error)), file=sys.stderr)
if invalid_key_errors:
return 1 # error exit code
def command_fix_permissions(options):
import os
import pwd
import grp
stat_info = os.stat(os.path.expanduser('~'))
uid = stat_info.st_uid
gid = stat_info.st_gid
user_name = pwd.getpwuid(uid).pw_name
try:
group_name = grp.getgrgid(gid).gr_name
except KeyError as e:
group_name = gid
ros_home = rospkg.get_ros_home()
print("Recursively changing ownership of ros home directory '{0}' "
"to '{1}:{2}' (current user)...".format(ros_home, user_name, group_name))
failed = []
try:
for dirpath, dirnames, filenames in os.walk(ros_home):
try:
os.lchown(dirpath, uid, gid)
except Exception as e:
failed.append((dirpath, str(e)))
for f in filenames:
try:
path = os.path.join(dirpath, f)
os.lchown(path, uid, gid)
except Exception as e:
failed.append((path, str(e)))
except Exception:
import traceback
traceback.print_exc()
print('Failed to walk directory. Try with sudo?')
else:
if failed:
print('Failed to change ownership for:')
for p, e in failed:
print('{0} --> {1}'.format(p, e))
print('Try with sudo?')
else:
print('Done.')
command_handlers = {
'db': command_db,
'check': command_check,
'keys': command_keys,
'install': command_install,
'what-needs': command_what_needs,
'where-defined': command_where_defined,
'resolve': command_resolve,
'init': command_init,
'update': command_update,
'fix-permissions': command_fix_permissions,
# backwards compat
'what_needs': command_what_needs,
'where_defined': command_where_defined,
'depdb': command_db,
}
# commands that accept rosdep names as args
_command_rosdep_args = ['what-needs', 'what_needs', 'where-defined', 'where_defined', 'resolve']
# commands that take no args
_command_no_args = ['update', 'init', 'db', 'fix-permissions']
_commands = command_handlers.keys()
|
PypiClean
|
/lib-dzne-workbook-0.2.0.tar.gz/lib-dzne-workbook-0.2.0/src/lib_dzne_workbook/__init__.py
|
import math as _math
import os as _os
import lib_dzne_filedata as _fd
import openpyxl as _xl
import pandas as _pd
class WorkbookData(_fd.FileData):
_ext = '.xlsx'
def __init__(self, workbook):
self.workbook = workbook
@classmethod
def _load(cls, /, file):
return _xl.load_workbook(file)
def _save(self, /, file):
self._workbook.save(filename=file)
@staticmethod
def _default():
return _xl.Workbook()
@classmethod
def clone_data(workbook):
with _tmp.TemporaryDirectory() as directory:
file = _os.path.join(directory, "a" + cls.ext())
workbook.save(file)
return _xl.load_workbook(file)
@staticmethod
def workbook_from_DataFrames(dataFrames):
dataFrames = dict(dataFrames)
if len(dataFrames) == 0:
return None
workbook = _xl.Workbook()
default_sheet = workbook.active
for table, df in dataFrames.items():
if default_sheet is None:
workbook.create_sheet(table)
else:
default_sheet.title = table
default_sheet = None
for table, df in dataFrames.items():
columns = list(df.columns)
for x, column in enumerate(columns):
workbook[table].cell(row=1, column=x+1).value = column
for y, v in enumerate(df[column].tolist()):
if _pd.isna(v):
continue
elif (type(v) is float) and (_math.isinf(v)):# is this really needed?
value = str(v)
else:
value = v
workbook[table].cell(row=y+2, column=x+1).value = value
return workbook
@staticmethod
def set_cell(*, cell, value):
"""Setting value of cell. """
if _pd.isna(value):
value = 'N/A'
else:
if type(value) is float:
if _math.isinf(value):
if value < 0:
value = '-inf'
else:
value = '+inf'
if type(value) not in {str, int, float, bool}:
raise TypeError(f"The value {value} is of the invalid type {type(value)}! ")
cell.value = value
cell.alignment = _xl.styles.Alignment()#horizontal='general')
|
PypiClean
|
/pymc3_ext_wlad-0.2.1-py3-none-any.whl/pymc3_ext/plots/__init__.py
|
import functools
import sys
import warnings
import arviz as az
def map_args(func):
swaps = [
('varnames', 'var_names')
]
@functools.wraps(func)
def wrapped(*args, **kwargs):
for (old, new) in swaps:
if old in kwargs and new not in kwargs:
warnings.warn('Keyword argument `{old}` renamed to `{new}`, and will be removed in pymc3_ext 3.8'.format(old=old, new=new))
kwargs[new] = kwargs.pop(old)
return func(*args, **kwargs)
return wrapped
# pymc3_ext custom plots: override these names for custom behavior
autocorrplot = map_args(az.plot_autocorr)
forestplot = map_args(az.plot_forest)
kdeplot = map_args(az.plot_kde)
plot_posterior = map_args(az.plot_posterior)
energyplot = map_args(az.plot_energy)
densityplot = map_args(az.plot_density)
pairplot = map_args(az.plot_pair)
# Use compact traceplot by default
@map_args
@functools.wraps(az.plot_trace)
def traceplot(*args, **kwargs):
try:
kwargs.setdefault('compact', True)
return az.plot_trace(*args, **kwargs)
except TypeError:
kwargs.pop('compact')
return az.plot_trace(*args, **kwargs)
# addition arg mapping for compare plot
@functools.wraps(az.plot_compare)
def compareplot(*args, **kwargs):
if 'comp_df' in kwargs:
comp_df = kwargs['comp_df'].copy()
else:
args = list(args)
comp_df = args[0].copy()
if 'WAIC' in comp_df.columns:
comp_df = comp_df.rename(index=str,
columns={'WAIC': 'waic',
'pWAIC': 'p_waic',
'dWAIC': 'd_waic',
'SE': 'se',
'dSE': 'dse',
'var_warn': 'warning'})
elif 'LOO' in comp_df.columns:
comp_df = comp_df.rename(index=str,
columns={'LOO': 'loo',
'pLOO': 'p_loo',
'dLOO': 'd_loo',
'SE': 'se',
'dSE': 'dse',
'shape_warn': 'warning'})
if 'comp_df' in kwargs:
kwargs['comp_df'] = comp_df
else:
args[0] = comp_df
return az.plot_compare(*args, **kwargs)
from .posteriorplot import plot_posterior_predictive_glm
# Access to arviz plots: base plots provided by arviz
for plot in az.plots.__all__:
setattr(sys.modules[__name__], plot, map_args(getattr(az.plots, plot)))
__all__ = tuple(az.plots.__all__) + (
'autocorrplot',
'compareplot',
'forestplot',
'kdeplot',
'plot_posterior',
'traceplot',
'energyplot',
'densityplot',
'pairplot',
'plot_posterior_predictive_glm',
)
|
PypiClean
|
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/an-xiao-yao/6、【安小妖】恋爱达人速成班:魅力工程-第8课:建设正确的聊天心态!.md
|
# 6、【安小妖】恋爱达人速成班:魅力工程-第8课:建设正确的聊天心态!
如果你每天都在等一个女人回复你,如果你来来回会只会问,忙吗 再吗 最近好吗,如果你担心没有话题,你不想要我去找你,我不是想要我去找你,我不是想要我去找你,如果你每天都在等一个女人回复你。
如果你来来回会只会问,忙吗 再吗 最近好吗,如果你担心没有话题,如果你总是害怕犯错,如果你永远在试图证明自己,如果你永远逃好女人,这不是你的聊天有问题,而是你的心态问题,就像在一个舞会你只有一个舞伴。
当天被别人邀请走了,变成了一个可怜息息的失败者,但是如果你有十个舞伴可以选择,那么当一个女人被别人带走,你还有九个,不会荒张 更加不会害怕,因为你知道你还有的选,原缘的意思不是让你变成一个。
处处张花热草的渣男,而是想告诉你,如果对方知道她是你的唯一,你没有别的选择,也没有办法吸引别人,那么她会似好不害怕失去你,也越来越不可能为你心动,女人都有虚拥心,她想要成为你的独特唯一。
而这个独特唯一的意思是,你在非常多的女人里选择了她,而不是你只能选择她,这其中是有非常大的差别的,当然你不能是一个花心大少,你不应该脚踩几条船,当你拥有了选择拳,当你能控制自己的狱望。
那么你就会在面对女人使,拥有一个非常好的心态,那就是我不被你选择,我不需要逃耗你,我足够优秀,我们有在追求你,当你想要从这些疫情身上过去价值,比如美贸,比如好的情感反馈,再比如你懂得。
一旦极切地想要所取对方的价值,很多人都等不及用自身价值,去吸引对方的投资,想要立刻就得到那么怎么做呢,他们只能想到起球贵田,所以会因为紧张的情绪,而担心自己说错话,因为害怕说错话。
而对方会给你负面的反馈,那样自己的锁球就会变得非常大力,你越这样想就会越紧张,恨不得找一个地动逃走,但是你又想跟心里的疫情持续地,聊下去,在这样的矛盾中痛苦不义,既然大脑失去思考能力。
然后更加紧张线路死循环,这就是你遇到疫情,就会紧张的原因之一,也有可能你本身的社交机会比较少,日常生活轻向于宅在家里,长时间处于这种生活状态,你的社交和语言能力,就会漸漸的退化,遇到陌生的同性还好。
一旦遇到疫情,同时又想要所取对方的价值,但能力又不够,怎么办呢,这就是你内心的唯一想法,大脑陷入一种无所事从的问问状态,这也是产生社交苦惧症的原因之一,很多男生约会是紧张的心里,被称为患德幻视。
那么什么叫患德幻视呢,为了形象一点,我们把自己氛围两部分,一部分是在意社交评价的我,我们称之为A-我,另一部分是主导自我行动内在的我,我们称之为B-我,所谓患德幻视,其实就是自己限制了自己。
就是A-我限制了B-我,举个例子,当B-我去搭上的时候,女生给出了负面的反馈,比如拒绝搭上,这个时候A-我收到来自外界的负面反馈,就开始定义评价,认为B-我刚才的行为是错误的,于是B-我在行动上。
就开始收到A-我的否定,害怕搞砸,本身的发挥,受到了严重限制,就像游戏王者荣耀上,也常常出现这样的状况,比如开局几分钟的时候,你空了一次大,那么接下来的一段时间里,你会有很大的概率继续空大。
因为空大的瞬间,你的潜音势力会出现这么一句话,天哪,我怎么会空大呢,在下一次打盘的时候,你又会想到上一次的失误,从而错误是正常发挥的机会,再举个例子,我相信很多人都会有这种感受。
当你机缘巧合地搞定了一个女生,在接下来的一段时间,你会感觉像走了桃花院一样,轻轻松松就能让别的女生,喜欢上迷,你会感觉任何一性都可以聊得来,相反,如果你在一个女人身上投入很多。
但总是被她拒绝或者伤害受挫,同一时期你会感觉,任何女生都是那么难搞,别人说什么,你都需要做冒半天,最后你要知道,和女人聊天有一个很大的前提,你是谁而不是怎么聊,如果你是一个和她不熟的人。
一个对于女人而言没有吸引力的人,那么不论你发起什么话题,对女人而言都是可恢复,可不恢复的,但是如果你是一个高价值的男人,如果你是一个新女人的男人,那么你即便是,发一个表情,女人也会在心里解读。
她是什么意思吗,她是想约我,说点什么好呢,还有就是,你和女人是什么关系,这直接决定了你们能聊什么,如果只是普通朋友,你关心她天冷,有没有家衣服,就很多人也很乐界,如果你们是情侣,你不关心她冷不冷。
在女人看来你就是不爱她,普通朋友不应该每天早安晚安,一口一个宝贝,熟悉的朋友不应该过于虚划温暖,来要送饭,亲密的朋友可以互相安慰,但是不应该大感了心,安慰的对象可以彼此试谈。
但是不应该突然老自一热就表白,任何人之间都有一个边界值,所以重点是你是谁,而不是你说了什么,更加不是你聊的内容,再和女人聊天的过程中,一定要实实刻刻,准确的分辨你们目前的关系。
这样就能很大程度上一面犯错,作为现代男人,我们不需要高高在上,但是作为女人,他们的内心常常是希望,男人来承担,两性之间交往而风险,所以男人要带领女人,而不是被女人左右,只有你在两性关系上,掌握了主动权。
你才会有更多的化育权,才不会被对方牵着鼻子走,也就不会走是猜测对方,美举化背后的意思
|
PypiClean
|
/torch_mlir-20221213.686-cp310-cp310-macosx_11_0_universal2.whl/torch_mlir/__init__.py
|
from typing import Optional, Sequence, Union, List, Dict, Tuple
from enum import Enum
import sys
from io import StringIO
from torch._functorch.compile_utils import strip_overloads
import torch
from torch_mlir.passmanager import PassManager
from .compiler_utils import run_pipeline_with_repro_report
from torch_mlir.dialects.torch.importer.jit_ir import ClassAnnotator, ImportOptions, ModuleBuilder
class OutputType(Enum):
"""The kind of output that `torch_mlir.compile` can produce.
In MLIR terminology, this describes the mix of dialects that will be
produced by the conversion process.
In user-facing API's, this type can always be passed interchangeably with an
appropriate string specifying the output type. The allowed strings are
the set of enum vales, allowed to be case insensitive and with `-` allowed
in place of `_`. The `OutputType.get` static method can be used to convert
from a string to an `OutputType` instance.
"""
# This output type consists of `torch` dialect ops that have been converted
# maximally to value semantics, decomposed, and shapes have been inferred.
TORCH = "torch"
# The output type contains a mix of `linalg`-on-tensors ops, `scf`, and
# `arith` ops (and also `math` and `tm_tensor`). It can be thought of
# as taking the `TORCH` output type and lowering it so that tensor
# computations are done with `linalg`-on-tensors ops.
LINALG_ON_TENSORS = "linalg-on-tensors"
# This output type consists of `tosa` dialect ops. It can be thought of
# as taking the `TORCH` output type and lowering it to TOSA.
TOSA = "tosa"
# This output type consists of `mhlo` dialect ops. It can be thought of
# as taking the `TORCH` output type and lowering it to MHLO.
MHLO = "mhlo"
# Raw output of the JIT IR importer. This is not expected to be useful
# for end-users, but can be convenient for development or reporting bugs.
RAW = "raw"
@staticmethod
def get(spec: Union[str, "OutputType"]) -> "OutputType":
"""Gets an OutputType from allowed way to specify one.
Args:
spec: An OutputType instance or the case-insensitive name of one of the
enum values.
Returns:
An OutputType instance.
"""
if isinstance(spec, OutputType):
return spec
spec = spec.upper().replace("-", "_")
if spec not in OutputType.__members__:
raise ValueError(f"For output_type= argument, expected one of: "
f"{', '.join(OutputType.__members__.keys())}")
return OutputType[spec]
class TensorPlaceholder:
"""A class that represents a formal parameter of a given shape and dtype.
This class can be constructed explicitly from a shape and dtype:
```python
placeholder = TensorPlaceholder([3, 4], torch.float32)
```
This class can also be constructed from a `torch.Tensor` which is already
known to be a valid input to the function. In this case, a set of
dynamic axes are allowed to be specified.
```python
placeholder = TensorPlaceholder.like(torch.ones(3, 4), dynamic_axes=[1])
# Equivalent to `TensorPlaceholder([3, -1], torch.float32)`
```
"""
def __init__(self, shape: List[int], dtype: torch.dtype):
"""Create a tensor with shape `shape` and dtype `dtype`.
Args:
shape: The shape of the tensor. A size of `-1` indicates that the
dimension has an unknown size.
dtype: The dtype of the tensor.
"""
self.shape = shape
self.dtype = dtype
@staticmethod
def like(tensor: torch.Tensor, dynamic_axes: List[int] = None):
"""Create a tensor placeholder that is like the given tensor.
Args:
tensor: The tensor to create a placeholder for.
dynamic_axes: A list of dynamic axes. If specified, the compiled
module will allow those axes to be any size at runtime.
"""
if dynamic_axes is None:
dynamic_axes = []
shape = []
for i, dim in enumerate(tensor.shape):
if i in dynamic_axes:
shape.append(-1)
else:
shape.append(dim)
return TensorPlaceholder(shape, tensor.dtype)
_example_arg = Union[TensorPlaceholder, torch.Tensor]
_example_args_for_one_method = Union[_example_arg, Sequence[_example_arg]]
_example_args = Union[_example_args_for_one_method, "ExampleArgs"]
class ExampleArgs:
"""A class representing the example arguments to an nn.Module.
In general, an nn.Module may have multiple methods that need to be compiled.
This requires example args for each method. This class is a lightweight
wrapper around a dictionary that maps method names to example arguments.
In user-facing API's, this type can always be passed interchangeably with a
single arg or list of args, which normalizes to an ExampleArgs for just
the `forward` method via the `ExampleArgs.get` static method.
"""
def __init__(self):
self._example_args = {}
def add_method(self, method_name: str, example_args: _example_args_for_one_method):
"""Adds example args for a method.
Args:
method_name: The name of the method. Must have not already been
added previously as a method.
example_args: The example args for the method.
Returns:
self, for chaining.
"""
assert method_name not in self._example_args
self._example_args[method_name] = ExampleArgs._canonicalize_args(
example_args)
return self
@staticmethod
def get(example_args: _example_args) -> "ExampleArgs":
"""Gets an ExampleArgs from one of the permissible ways to specify one.
Args:
example_args: An ExampleArgs instance or a single arg or list of args.
Returns:
An ExampleArgs instance.
"""
if isinstance(example_args, ExampleArgs):
return example_args
return ExampleArgs().add_method("forward", example_args)
@staticmethod
def _canonicalize_args(example_args: _example_args_for_one_method):
"""Canonicalize the args for one method into a tuple."""
if not isinstance(example_args, Sequence):
example_args = [example_args]
for arg in example_args:
if not isinstance(arg, (TensorPlaceholder, torch.Tensor)):
raise Exception(f"Only Tensor's, TensorPlaceholder's, or sequences of "
f"Tensor's and TensorPlaceholder's are supported as "
f"example args for method inputs. "
f"Got '{arg}'.")
return tuple(example_args)
def _get_methods(self):
return self._example_args.keys()
def _get_for_annotation(self):
result = {}
for method_name, example_args in self._example_args.items():
placeholders = []
for arg in example_args:
if isinstance(arg, TensorPlaceholder):
placeholders.append(arg)
else:
assert isinstance(arg, torch.Tensor)
placeholders.append(TensorPlaceholder.like(arg))
result[method_name] = placeholders
return result
def _get_for_tracing(
self,
use_tracing: bool,
ignore_traced_shapes: bool,
) -> Dict[str, Tuple[_example_arg, ...]]:
result = {}
for method_name, example_args in self._example_args.items():
# If we are tracing, then we need to convert any placeholders into
# concrete values.
if use_tracing:
example_args_for_trace = []
for arg in example_args:
if isinstance(arg, TensorPlaceholder):
if not ignore_traced_shapes:
# To avoid accidental footguns, we require
# `ignore_traced_shapes` to be true if we're using
# TensorPlaceholder's, as it falls into the same
# "hopefully the trace works for different inputs"
# bucket of concerns.
raise Exception(
"TensorPlaceholder can only be used with tracing when `ignore_traced_shapes=True`")
# For any dynamic dimensions, replace them with "7"
# arbitrarily. If a user is using dynamic dimensions with
# tracing, they are walking on thin ice already -- assume
# they know what they are doing and that their trace is
# correct for any specific concrete size.
shape = [s if s != -1 else 7 for s in arg.shape]
example_args_for_trace.append(
torch.ones(*shape, dtype=arg.dtype))
else:
assert isinstance(arg, torch.Tensor)
example_args_for_trace.append(arg)
example_args = tuple(example_args_for_trace)
result[method_name] = example_args
return result
# The set of ops that are considered legal for each backend.
# These are currently quite load-bearing, since different backends might be
# missing patterns for decomposed forms of certain ops.
# TODO: Tighten up the definition of these "conditionally legal for backends"
# ops in the backend contract, and move these lists somewhere deeper in the
# compiler where each backend can "own" its set of legal ops.
BACKEND_LEGAL_OPS = {
OutputType.TOSA: ['torch.aten.flatten.using_ints', 'torch.aten.native_layer_norm', 'torch.aten.linear'],
OutputType.LINALG_ON_TENSORS: ['torch.aten.flatten.using_ints', ],
OutputType.MHLO: [],
}
def compile(model: torch.nn.Module,
example_args: _example_args,
output_type: Union[str, "OutputType"] = OutputType.TORCH,
use_tracing: bool = False,
ignore_traced_shapes=False,
backend_legal_ops: Optional[Sequence[str]] = None,
verbose: bool = False):
"""Convert a PyTorch model to MLIR.
Args:
model: The PyTorch model to convert.
example_args: A list of example arguments to use when inferring the
shapes of the arguments to `forward` method of the model.
A single tensor is treated as a list of a single tensor.
A TensorPlaceholder object is also allowed in the place of any
Tensor. For models with multiple methods, an `ExampleArgs` object
can be passed.
output_type: The kind of output to produce. See `OutputType` for more
details.
use_tracing: If True, use `torch.jit.trace` to convert the model to
JIT IR rather than `torch.jit.script`.
ignore_traced_shapes: If True, ignore the shapes that were observed
during tracing. This should only be used if one knows that the
original traced program would result in the same trace (modulo
shapes) for all shape combinations implied by any
`TensorPlaceholder`'s used as `example_args`. Also,
strictly-speaking, this option covers dtypes too, but we just say
"shapes" to be succinct.
backend_legal_ops: A list of ops that should be considered legal for
the backend. An op that is considered legal will not be decomposed.
This option is only valid with the `"torch"` output type.
verbose: If true, print extra information about the conversion.
Returns:
An MLIR module that contains the converted model in the specified
output type.
"""
output_type = OutputType.get(output_type)
example_args = ExampleArgs.get(example_args)
if ignore_traced_shapes and not use_tracing:
raise Exception("`ignore_traced_shapes` requires `use_tracing`")
# We only allow `backend_legal_ops` to be specified for the `"torch"`
# output type because the other output types actually invoke their
# respective backends (Linalg, TOSA, or MHLO), and those backends have
# very specific requirements about the ops which are legal.
# See `BACKEND_LEGAL_OPS` for more details.
if backend_legal_ops is not None:
if output_type != OutputType.TORCH:
raise Exception("`backend_legal_ops` is only valid with the "
"`torch` output type")
backend_legal_ops = list(sorted(set(backend_legal_ops)))
else:
backend_legal_ops = BACKEND_LEGAL_OPS.get(output_type, [])
# For FX-based models, automatically strip overloads.
if isinstance(model, torch.fx.GraphModule):
strip_overloads(model)
# Get the model as JIT IR (TorchScript) for import.
# TODO: Longer-term, we probably need to split `torch_mlir.compile`.
# There should be an "acquisition" step that does
# tracing/scripting/importing from FX/using torchdynamo.export/etc.
# + any lowering to the backend contract. Then there should be a
# "backend lowering" step that does the actual lowering to each
# backend. This separation should be visible at the Python API level, and
# we can implement a deliberately simplified API like `torch_mlir.compile`
# on top of those building blocks.
if isinstance(model, torch.jit._script.RecursiveScriptModule):
# If the user already converted the model to JIT IR themselves, just
# do some basic error checking, but take the model as-is.
for method_name in example_args._get_methods():
if not hasattr(model, method_name):
raise Exception(
f"Model does not have exported method '{method_name}', "
f"requested in `example_args`. Consider adding "
f"`@torch.jit.export` to the method definition.")
scripted = model
elif use_tracing:
scripted = torch.jit.trace_module(
model,
example_args._get_for_tracing(use_tracing, ignore_traced_shapes)
)
else:
# Make sure that all the methods that the user requested get scripted.
# By default, PyTorch only scripts the `forward` method and transitive
# callees.
for method_name in example_args._get_methods():
torch.jit.export(getattr(model, method_name).__func__)
scripted = torch.jit.script(model)
class_annotator = ClassAnnotator()
class_annotator.exportNone(scripted._c._type())
for method_name, example_args in example_args._get_for_annotation().items():
class_annotator.exportPath(scripted._c._type(), [method_name])
annotation = [None] # `None` is always the annotation for "self".
for arg in example_args:
annotation.append((arg.shape, arg.dtype, True))
class_annotator.annotateArgs(
scripted._c._type(), [method_name], annotation)
mb = ModuleBuilder()
import_options = ImportOptions()
import_options.ignoreExistingTensorShapesAndDtypes = ignore_traced_shapes
try:
original_stderr = sys.stderr
sys.stderr = StringIO()
# Import the TorchScript module to MLIR
mb.import_module(scripted._c, class_annotator, import_options)
except Exception as e:
raise Exception(f"""
PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
### Importer C++ Exception:
{e}
### Importer Diagnostics:
{sys.stderr.getvalue()}
""") from None
finally:
sys.stderr = original_stderr
if output_type == OutputType.RAW:
return mb.module
option_string = "{backend-legal-ops=" + ",".join(backend_legal_ops) + "}"
run_pipeline_with_repro_report(
mb.module,
f"builtin.module(torchscript-module-to-torch-backend-pipeline{option_string})",
"Lowering TorchScript IR -> Torch Backend IR",
)
if verbose:
print("\n====================")
print("Torch Backend IR")
print(mb.module)
if output_type == OutputType.TORCH:
return mb.module
if output_type == OutputType.TOSA:
run_pipeline_with_repro_report(
mb.module,
"builtin.module(torch-backend-to-tosa-backend-pipeline)",
"Lowering Torch Backend IR -> TOSA Backend IR")
if verbose:
print("\n====================")
print("TOSA Backend IR")
print(mb.module)
return mb.module
if output_type == OutputType.LINALG_ON_TENSORS:
run_pipeline_with_repro_report(
mb.module,
"builtin.module(torch-backend-to-linalg-on-tensors-backend-pipeline)",
"Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR")
if verbose:
print("\n====================")
print("LINALG Backend IR")
print(mb.module)
return mb.module
elif output_type == OutputType.MHLO:
run_pipeline_with_repro_report(
mb.module,
"builtin.module(torch-backend-to-mhlo-backend-pipeline)",
"Lowering Torch Backend IR -> MHLO Backend IR")
if verbose:
print("\n====================")
print("MHLO Backend IR")
print(mb.module)
return mb.module
raise Exception(f"Unknown OutputType: {output_type}")
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.