gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Utility functions and objects to ease Python 3 compatibility.
"""
import sys
import re
import codecs
import warnings
try:
from functools import wraps
assert wraps
except ImportError:
# No-op wraps decorator
def wraps(f):
def dec(newf):
return newf
return dec
def cast_bytes(s, enc='utf-8'):
if isinstance(s, str):
return s.encode(enc)
return s
PY3 = (sys.version_info[0] >= 3)
def _modify_str_or_docstring(str_change_func):
@wraps(str_change_func)
def wrapper(func_or_str):
if isinstance(func_or_str, str):
func = None
doc = func_or_str
else:
func = func_or_str
doc = func.__doc__
doc = str_change_func(doc)
if func:
func.__doc__ = doc
return func
return doc
return wrapper
if PY3:
# Python 3:
# ---------
def b(s):
return s.encode('ascii')
def ascii(stream):
return codecs.getreader('ascii')(stream)
def bopen(*args, **kwargs):
return open(*args, mode = 'rb', **kwargs)
bytestype = bytes
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "'abc'"
"%(b)s'abc'" --> "b'abc'"
"55%(L)s" --> "55"
"unicode(x)" --> "str(x)"
Accepts a string or a function, so it can be used as a decorator."""
# s may be None if processed by Py2exe
if s is None:
return ''
return s % {'u': '', 'b': 'b', 'L': '', 'unicode': 'str'}
def type_cmp(a, b):
"""Python 2 style comparison based on type"""
ta, tb = type(a).__name__, type(b).__name__
# Ugly hack: some tests rely on tuple sorting before unicode, and I
# don't know if that's important. Better retain it for now.
if ta == 'str':
ta = 'unicode'
if tb == 'str':
tb = 'unicode'
# return 1 if ta > tb else -1 if ta < tb else 0
if ta > tb:
return 1
elif ta < tb:
return -1
else:
return 0
def sign(n):
if n < 0:
return -1
if n > 0:
return 1
return 0
else:
# Python 2
# --------
def b(s):
return s
def ascii(stream):
return stream
bopen = open
bytestype = str
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "u'abc'"
"%(b)s'abc'" --> "'abc'"
"55%(L)s" --> "55L"
Accepts a string or a function, so it can be used as a decorator."""
# s may be None if processed by Py2exe
if s is None:
return ''
return s % {'u': 'u', 'b': '', 'L': 'L', 'unicode': 'unicode'}
def type_cmp(a, b):
# return 1 if a > b else -1 if a < b else 0
if a > b:
return 1
elif a < b:
return -1
else:
return 0
def sign(n):
return cmp(n, 0)
r_unicodeEscape = re.compile(r'(\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8})')
def _unicodeExpand(s):
return r_unicodeEscape.sub(lambda m: chr(int(m.group(0)[2:], 16)), s)
narrow_build = False
try:
chr(0x10FFFF)
except ValueError:
narrow_build = True
if narrow_build:
def _unicodeExpand(s):
try:
return r_unicodeEscape.sub(
lambda m: chr(int(m.group(0)[2:], 16)), s)
except ValueError:
warnings.warn(
'Encountered a unicode char > 0xFFFF in a narrow python build. '
'Trying to degrade gracefully, but this can cause problems '
'later when working with the string:\n%s' % s)
return r_unicodeEscape.sub(
lambda m: codecs.decode(m.group(0), 'unicode_escape'), s)
def decodeStringEscape(s):
"""
s is byte-string - replace \ escapes in string
"""
if not PY3:
s = s.decode('string-escape')
else:
s = s.replace('\\t', '\t')
s = s.replace('\\n', '\n')
s = s.replace('\\r', '\r')
s = s.replace('\\b', '\b')
s = s.replace('\\f', '\f')
s = s.replace('\\"', '"')
s = s.replace("\\'", "'")
s = s.replace('\\\\', '\\')
return s
#return _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping
def decodeUnicodeEscape(s):
"""
s is a unicode string
replace \n and \\u00AC unicode escapes
"""
if not PY3:
s = s.encode('utf-8').decode('string-escape')
s = _unicodeExpand(s)
else:
s = s.replace('\\t', '\t')
s = s.replace('\\n', '\n')
s = s.replace('\\r', '\r')
s = s.replace('\\b', '\b')
s = s.replace('\\f', '\f')
s = s.replace('\\"', '"')
s = s.replace("\\'", "'")
s = s.replace('\\\\', '\\')
s = _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping
return s
|
|
import pybullet
import gym, gym.spaces, gym.utils
import numpy as np
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
import pybullet_data
class XmlBasedRobot:
"""
Base class for mujoco .xml based agents.
"""
self_collision = True
def __init__(self, robot_name, action_dim, obs_dim, self_collision):
self.parts = None
self.objects = []
self.jdict = None
self.ordered_joints = None
self.robot_body = None
high = np.ones([action_dim])
self.action_space = gym.spaces.Box(-high, high)
high = np.inf * np.ones([obs_dim])
self.observation_space = gym.spaces.Box(-high, high)
#self.model_xml = model_xml
self.robot_name = robot_name
self.self_collision = self_collision
def addToScene(self, bullet_client, bodies):
self._p = bullet_client
if self.parts is not None:
parts = self.parts
else:
parts = {}
if self.jdict is not None:
joints = self.jdict
else:
joints = {}
if self.ordered_joints is not None:
ordered_joints = self.ordered_joints
else:
ordered_joints = []
if np.isscalar(bodies): # streamline the case where bodies is actually just one body
bodies = [bodies]
dump = 0
for i in range(len(bodies)):
if self._p.getNumJoints(bodies[i]) == 0:
part_name, robot_name = self._p.getBodyInfo(bodies[i])
self.robot_name = robot_name.decode("utf8")
part_name = part_name.decode("utf8")
parts[part_name] = BodyPart(self._p, part_name, bodies, i, -1)
for j in range(self._p.getNumJoints(bodies[i])):
self._p.setJointMotorControl2(bodies[i],
j,
pybullet.POSITION_CONTROL,
positionGain=0.1,
velocityGain=0.1,
force=0)
jointInfo = self._p.getJointInfo(bodies[i], j)
joint_name = jointInfo[1]
part_name = jointInfo[12]
joint_name = joint_name.decode("utf8")
part_name = part_name.decode("utf8")
if dump: print("ROBOT PART '%s'" % part_name)
if dump:
print(
"ROBOT JOINT '%s'" % joint_name
) # limits = %+0.2f..%+0.2f effort=%0.3f speed=%0.3f" % ((joint_name,) + j.limits()) )
parts[part_name] = BodyPart(self._p, part_name, bodies, i, j)
if part_name == self.robot_name:
self.robot_body = parts[part_name]
if i == 0 and j == 0 and self.robot_body is None: # if nothing else works, we take this as robot_body
parts[self.robot_name] = BodyPart(self._p, self.robot_name, bodies, 0, -1)
self.robot_body = parts[self.robot_name]
if joint_name[:6] == "ignore":
Joint(self._p, joint_name, bodies, i, j).disable_motor()
continue
if joint_name[:8] != "jointfix":
joints[joint_name] = Joint(self._p, joint_name, bodies, i, j)
ordered_joints.append(joints[joint_name])
joints[joint_name].power_coef = 100.0
# TODO: Maybe we need this
# joints[joint_name].power_coef, joints[joint_name].max_velocity = joints[joint_name].limits()[2:4]
# self.ordered_joints.append(joints[joint_name])
# self.jdict[joint_name] = joints[joint_name]
return parts, joints, ordered_joints, self.robot_body
def reset_pose(self, position, orientation):
self.parts[self.robot_name].reset_pose(position, orientation)
class MJCFBasedRobot(XmlBasedRobot):
"""
Base class for mujoco .xml based agents.
"""
def __init__(self, model_xml, robot_name, action_dim, obs_dim, self_collision=True):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision)
self.model_xml = model_xml
self.doneLoading = 0
def reset(self, bullet_client):
self._p = bullet_client
#print("Created bullet_client with id=", self._p._client)
if (self.doneLoading == 0):
self.ordered_joints = []
self.doneLoading = 1
if self.self_collision:
self.objects = self._p.loadMJCF(os.path.join(pybullet_data.getDataPath(), "mjcf",
self.model_xml),
flags=pybullet.URDF_USE_SELF_COLLISION |
pybullet.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(
self._p, self.objects)
else:
self.objects = self._p.loadMJCF(
os.path.join(pybullet_data.getDataPath(), "mjcf", self.model_xml))
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(
self._p, self.objects)
self.robot_specific_reset(self._p)
s = self.calc_state(
) # optimization: calc_state() can calculate something in self.* for calc_potential() to use
return s
def calc_potential(self):
return 0
class URDFBasedRobot(XmlBasedRobot):
"""
Base class for URDF .xml based robots.
"""
def __init__(self,
model_urdf,
robot_name,
action_dim,
obs_dim,
basePosition=[0, 0, 0],
baseOrientation=[0, 0, 0, 1],
fixed_base=False,
self_collision=False):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision)
self.model_urdf = model_urdf
self.basePosition = basePosition
self.baseOrientation = baseOrientation
self.fixed_base = fixed_base
def reset(self, bullet_client):
self._p = bullet_client
self.ordered_joints = []
print(os.path.join(os.path.dirname(__file__), "data", self.model_urdf))
if self.self_collision:
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(
self._p,
self._p.loadURDF(os.path.join(pybullet_data.getDataPath(), self.model_urdf),
basePosition=self.basePosition,
baseOrientation=self.baseOrientation,
useFixedBase=self.fixed_base,
flags=pybullet.URDF_USE_SELF_COLLISION))
else:
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(
self._p,
self._p.loadURDF(os.path.join(pybullet_data.getDataPath(), self.model_urdf),
basePosition=self.basePosition,
baseOrientation=self.baseOrientation,
useFixedBase=self.fixed_base))
self.robot_specific_reset(self._p)
s = self.calc_state(
) # optimization: calc_state() can calculate something in self.* for calc_potential() to use
self.potential = self.calc_potential()
return s
def calc_potential(self):
return 0
class SDFBasedRobot(XmlBasedRobot):
"""
Base class for SDF robots in a Scene.
"""
def __init__(self,
model_sdf,
robot_name,
action_dim,
obs_dim,
basePosition=[0, 0, 0],
baseOrientation=[0, 0, 0, 1],
fixed_base=False,
self_collision=False):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision)
self.model_sdf = model_sdf
self.fixed_base = fixed_base
def reset(self, bullet_client):
self._p = bullet_client
self.ordered_joints = []
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(
self._p, # TODO: Not sure if this works, try it with kuka
self._p.loadSDF(os.path.join("models_robot", self.model_sdf)))
self.robot_specific_reset(self._p)
s = self.calc_state(
) # optimization: calc_state() can calculate something in self.* for calc_potential() to use
self.potential = self.calc_potential()
return s
def calc_potential(self):
return 0
class Pose_Helper: # dummy class to comply to original interface
def __init__(self, body_part):
self.body_part = body_part
def xyz(self):
return self.body_part.current_position()
def rpy(self):
return pybullet.getEulerFromQuaternion(self.body_part.current_orientation())
def orientation(self):
return self.body_part.current_orientation()
class BodyPart:
def __init__(self, bullet_client, body_name, bodies, bodyIndex, bodyPartIndex):
self.bodies = bodies
self._p = bullet_client
self.bodyIndex = bodyIndex
self.bodyPartIndex = bodyPartIndex
self.initialPosition = self.current_position()
self.initialOrientation = self.current_orientation()
self.bp_pose = Pose_Helper(self)
def state_fields_of_pose_of(
self, body_id,
link_id=-1): # a method you will most probably need a lot to get pose and orientation
if link_id == -1:
(x, y, z), (a, b, c, d) = self._p.getBasePositionAndOrientation(body_id)
else:
(x, y, z), (a, b, c, d), _, _, _, _ = self._p.getLinkState(body_id, link_id)
return np.array([x, y, z, a, b, c, d])
def get_position(self):
return self.current_position()
def get_pose(self):
return self.state_fields_of_pose_of(self.bodies[self.bodyIndex], self.bodyPartIndex)
def speed(self):
if self.bodyPartIndex == -1:
(vx, vy, vz), _ = self._p.getBaseVelocity(self.bodies[self.bodyIndex])
else:
(x, y, z), (a, b, c, d), _, _, _, _, (vx, vy, vz), (vr, vp, vy) = self._p.getLinkState(
self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
return np.array([vx, vy, vz])
def current_position(self):
return self.get_pose()[:3]
def current_orientation(self):
return self.get_pose()[3:]
def get_orientation(self):
return self.current_orientation()
def reset_position(self, position):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], position,
self.get_orientation())
def reset_orientation(self, orientation):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], self.get_position(),
orientation)
def reset_velocity(self, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0]):
self._p.resetBaseVelocity(self.bodies[self.bodyIndex], linearVelocity, angularVelocity)
def reset_pose(self, position, orientation):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], position, orientation)
def pose(self):
return self.bp_pose
def contact_list(self):
return self._p.getContactPoints(self.bodies[self.bodyIndex], -1, self.bodyPartIndex, -1)
class Joint:
def __init__(self, bullet_client, joint_name, bodies, bodyIndex, jointIndex):
self.bodies = bodies
self._p = bullet_client
self.bodyIndex = bodyIndex
self.jointIndex = jointIndex
self.joint_name = joint_name
jointInfo = self._p.getJointInfo(self.bodies[self.bodyIndex], self.jointIndex)
self.lowerLimit = jointInfo[8]
self.upperLimit = jointInfo[9]
self.power_coeff = 0
def set_state(self, x, vx):
self._p.resetJointState(self.bodies[self.bodyIndex], self.jointIndex, x, vx)
def current_position(self): # just some synonyme method
return self.get_state()
def current_relative_position(self):
pos, vel = self.get_state()
pos_mid = 0.5 * (self.lowerLimit + self.upperLimit)
return (2 * (pos - pos_mid) / (self.upperLimit - self.lowerLimit), 0.1 * vel)
def get_state(self):
x, vx, _, _ = self._p.getJointState(self.bodies[self.bodyIndex], self.jointIndex)
return x, vx
def get_position(self):
x, _ = self.get_state()
return x
def get_orientation(self):
_, r = self.get_state()
return r
def get_velocity(self):
_, vx = self.get_state()
return vx
def set_position(self, position):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],
self.jointIndex,
pybullet.POSITION_CONTROL,
targetPosition=position)
def set_velocity(self, velocity):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],
self.jointIndex,
pybullet.VELOCITY_CONTROL,
targetVelocity=velocity)
def set_motor_torque(self, torque): # just some synonyme method
self.set_torque(torque)
def set_torque(self, torque):
self._p.setJointMotorControl2(bodyIndex=self.bodies[self.bodyIndex],
jointIndex=self.jointIndex,
controlMode=pybullet.TORQUE_CONTROL,
force=torque) #, positionGain=0.1, velocityGain=0.1)
def reset_current_position(self, position, velocity): # just some synonyme method
self.reset_position(position, velocity)
def reset_position(self, position, velocity):
self._p.resetJointState(self.bodies[self.bodyIndex],
self.jointIndex,
targetValue=position,
targetVelocity=velocity)
self.disable_motor()
def disable_motor(self):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],
self.jointIndex,
controlMode=pybullet.POSITION_CONTROL,
targetPosition=0,
targetVelocity=0,
positionGain=0.1,
velocityGain=0.1,
force=0)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FluidRelayServersOperations(object):
"""FluidRelayServersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~fluid_relay_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FluidRelayServer"
"""Get a Fluid Relay server.
Get a Fluid Relay server.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param name: The resource name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FluidRelayServer, or the result of cls(response)
:rtype: ~fluid_relay_management_client.models.FluidRelayServer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FluidRelayServer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-15-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FluidRelayServer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{name}'} # type: ignore
def create_or_update(
self,
resource_group, # type: str
name, # type: str
resource, # type: "_models.FluidRelayServer"
**kwargs # type: Any
):
# type: (...) -> "_models.FluidRelayServer"
"""Create or Update a Fluid Relay server.
Create or Update a Fluid Relay server.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param name: The resource name.
:type name: str
:param resource: The details of the Fluid Relay server resource.
:type resource: ~fluid_relay_management_client.models.FluidRelayServer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FluidRelayServer, or the result of cls(response)
:rtype: ~fluid_relay_management_client.models.FluidRelayServer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FluidRelayServer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-15-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(resource, 'FluidRelayServer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('FluidRelayServer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{name}'} # type: ignore
def update(
self,
resource_group, # type: str
name, # type: str
resource, # type: "_models.FluidRelayServerUpdate"
**kwargs # type: Any
):
# type: (...) -> "_models.FluidRelayServer"
"""Update a Fluid Relay server.
Update a Fluid Relay server.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param name: The resource name.
:type name: str
:param resource: The updatable details of the Fluid Relay server resource.
:type resource: ~fluid_relay_management_client.models.FluidRelayServerUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FluidRelayServer, or the result of cls(response)
:rtype: ~fluid_relay_management_client.models.FluidRelayServer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FluidRelayServer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-15-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(resource, 'FluidRelayServerUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('FluidRelayServer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{name}'} # type: ignore
def delete(
self,
resource_group, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete a Fluid Relay server.
Delete a Fluid Relay server.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param name: The resource name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-15-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
if response.status_code == 204:
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
if cls:
return cls(pipeline_response, None, response_headers)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{name}'} # type: ignore
def regenerate_key(
self,
resource_group, # type: str
name, # type: str
parameters, # type: "_models.RegenerateKeyRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.FluidRelayServerKeys"
"""Regenerate the primary or secondary key for this server.
Regenerate the primary or secondary key for this server.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param name: The resource name.
:type name: str
:param parameters: The details of which keys to generate.
:type parameters: ~fluid_relay_management_client.models.RegenerateKeyRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FluidRelayServerKeys, or the result of cls(response)
:rtype: ~fluid_relay_management_client.models.FluidRelayServerKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FluidRelayServerKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-15-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_key.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RegenerateKeyRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('FluidRelayServerKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{name}/regenerateKey'} # type: ignore
def get_keys(
self,
resource_group, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FluidRelayServerKeys"
"""Regenerate the primary or secondary key for this server.
Regenerate the primary or secondary key for this server.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param name: The resource name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FluidRelayServerKeys, or the result of cls(response)
:rtype: ~fluid_relay_management_client.models.FluidRelayServerKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FluidRelayServerKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-15-preview"
accept = "application/json"
# Construct URL
url = self.get_keys.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('FluidRelayServerKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{name}/getKeys'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FluidRelayServerList"]
"""List all Fluid Relay servers in a subscription.
List all Fluid Relay servers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FluidRelayServerList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~fluid_relay_management_client.models.FluidRelayServerList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FluidRelayServerList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-15-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FluidRelayServerList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.FluidRelay/fluidRelayServers'} # type: ignore
def list_by_resource_group(
self,
resource_group, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FluidRelayServerList"]
"""List all Fluid Relay servers in a resource group.
List all Fluid Relay servers in a resource group.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FluidRelayServerList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~fluid_relay_management_client.models.FluidRelayServerList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FluidRelayServerList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-15-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FluidRelayServerList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers'} # type: ignore
|
|
#! /usr/bin/env python
"""
Module with frame/cube filtering functionalities
"""
from __future__ import division
__author__ = 'C. Gomez @ ULg'
__all__ = ['frame_filter_highpass',
'frame_filter_lowpass',
'cube_filter_highpass',
'cube_filter_iuwt',
'frame_filter_gaussian2d',
'gaussian_kernel']
import numpy as np
import photutils
import pyprind
from scipy.ndimage import gaussian_filter, median_filter
from astropy.convolution import convolve_fft, Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
from .shapes import frame_center
from ..exlib import iuwt
def cube_filter_iuwt(cube, coeff=5, rel_coeff=1, full_output=False):
"""
Parameters
----------
cube : array_like
Input cube.
coeff : int, optional
Number of wavelet scales to be used in the decomposition.
rel_coeff : int, optional
Number of relevant coefficients. In other words how many wavelet scales
will represent in a better way our data. One or two scales are enough
for filtering our images.
full_output : {False, True}, bool optional
If True, an additional cube with the multiscale decomposition of each
frame will be returned.
Returns
-------
cubeout : array_like
Output cube with the filtered frames.
If full_output is True the filtered cube is returned together with the a
4d cube containing the multiscale decomposition of each frame.
"""
cubeout = np.zeros_like(cube)
cube_coeff = np.zeros((cube.shape[0], coeff, cube.shape[1], cube.shape[2]))
n_frames = cube.shape[0]
msg = 'Decomposing frames with the Isotropic Undecimated Wavelet Transform'
bar = pyprind.ProgBar(n_frames, stream=1, title=msg)
for i in range(n_frames):
res = iuwt.iuwt_decomposition(cube[i], coeff, store_smoothed=False)
cube_coeff[i] = res
for j in range(rel_coeff):
cubeout[i] += cube_coeff[i][j]
bar.update()
if full_output:
return cubeout, cube_coeff
else:
return cubeout
def cube_filter_highpass(array, mode, median_size=5, kernel_size=5,
fwhm_size=5, btw_cutoff=0.2, btw_order=2):
""" Wrapper of *frame_filter_highpass* for cubes or 3d arrays.
Parameters
----------
array : array_like
Input 3d array.
mode : {'kernel-conv', 'median-subt', 'gauss-subt', 'fourier-butter'}
Type of High-pass filtering.
median_size : int
Size of the median box for filtering the low-pass median filter.
kernel_size : 3, 5 or 7
Size of the Laplacian kernel for convolution.
fwhm_size : int
Size of the Gaussian kernel for the low-pass Gaussian filter.
btw_cutoff : float
Frequency cutoff for low-pass 2d Butterworth filter.
btw_order : int
Order of low-pass 2d Butterworth filter.
Returns
-------
filtered : array_like
High-pass filtered cube.
"""
if not array.ndim==3:
raise TypeError('Input array is not a cube or 3d array')
n_frames = array.shape[0]
array_out = np.zeros_like(array)
msg = 'Applying the High-Pass filter on cube frames'
bar = pyprind.ProgBar(n_frames, stream=1, title=msg)
for i in range(n_frames):
array_out[i] = frame_filter_highpass(array[i], mode, median_size,
kernel_size, fwhm_size, btw_cutoff,
btw_order)
bar.update()
return array_out
def fft(array):
""" Performs the 2d discrete Fourier transform (using numpy's fft2 function)
on the data from the original image. This produces a new representation of
the image in which each pixel represents a spatial frequency and
orientation, rather than an xy coordinate. When Fourier-transformed images
are plotted graphically, the low frequencies are found at the centre; this
is not what fft2 actually produces, so we need to also apply numpy's
fftshift (centering low frequencies).
"""
fft_array = np.fft.fftshift(np.fft.fft2(array))
return fft_array
def ifft(array):
""" Gets the inverse Fourier transform on the image. This produces an array
of complex numbers whose absolute values correspond to the image in the
original space (decentering).
"""
new_array = np.abs(np.fft.ifft2(np.fft.ifftshift(array)))
return new_array
def frame_filter_highpass(array, mode, median_size=5, kernel_size=5,
fwhm_size=5, btw_cutoff=0.2, btw_order=2):
""" High-pass filtering of input frame depending on parameter *mode*. The
results are very different with different *mode* and varying the rest of
parameters.
Parameters
----------
array : array_like
Input array, 2d frame.
mode : {'kernel-conv', 'median-subt', 'gauss-subt', 'fourier-butter'}
Type of High-pass filtering.
median_size : int
Size of the median box for filtering the low-pass median filter.
kernel_size : 3, 5 or 7
Size of the Laplacian kernel for convolution.
fwhm_size : int
Size of the Gaussian kernel for the low-pass Gaussian filter.
btw_cutoff : float
Frequency cutoff for low-pass 2d Butterworth filter.
btw_order : int
Order of low-pass 2d Butterworth filter.
Returns
-------
filtered : array_like
High-pass filtered image.
"""
def butter2d_lp(size, cutoff, n=3):
""" Create low-pass 2D Butterworth filter.
Function from PsychoPy library, credits to Jonathan Peirce, 2010
Parameters
----------
size : tuple
size of the filter
cutoff : float
relative cutoff frequency of the filter (0 - 1.0)
n : int, optional
order of the filter, the higher n is the sharper
the transition is.
Returns
-------
numpy.ndarray
filter kernel in 2D centered
"""
if not 0 < cutoff <= 1.0:
raise ValueError('Cutoff frequency must be between 0 and 1.0')
if not isinstance(n, int):
raise ValueError('n must be an integer >= 1')
rows, cols = size
x = np.linspace(-0.5, 0.5, cols) * cols
y = np.linspace(-0.5, 0.5, rows) * rows
# An array with every pixel = radius relative to center
radius = np.sqrt((x**2)[np.newaxis] + (y**2)[:, np.newaxis])
# The filter
f = 1 / (1.0 + (radius / cutoff)**(2*n))
return f
#---------------------------------------------------------------------------
if not array.ndim==2:
raise TypeError('Input array is not a frame or 2d array')
if mode=='kernel-conv':
# Performs convolution with Laplacian high-pass kernels.
# a simple and very narrow hp filter
# Kernel "Laplacian" of size 3x3+1+1 with values from -1 to 8
# Forming a output range from -8 to 8 (Zero-Summing)
kernel3 = np.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
#kernel3 = np.array([[0, -1, 0],
# [-1, 4, -1],
# [0, -1, 0]])
#kernel3 = np.array([[-0.17, -0.67, -0.17],
# [-0.67, 3.33, -0.67],
# [-0.17, -0.67, -0.17]])
#kernel5 = np.array([[-1, -1, -1, -1, -1],
# [-1, 1, 2, 1, -1],
# [-1, 2, 4, 2, -1],
# [-1, 1, 2, 1, -1],
# [-1, -1, -1, -1, -1]])
# Kernel "Laplacian" of size 5x5+2+2 with values from -4 to 4
# Forming a output range from -24 to 24 (Zero-Summing)
kernel5 = np.array([[-4, -1, 0, -1, -4],
[-1, 2, 3, 2, -1],
[ 0, 3, 4, 3, 0],
[-1, 2, 3, 2, -1],
[-4, -1, 0, -1, -4]])
# above /4. +1 in central px
#kernel5 = np.array([[-0.25, -0.25, -0.25, -0.25, -0.25],
# [-0.25, 0.25, 0.5 , 0.25, -0.25],
# [-0.25, 0.5 , 2. , 0.5 , -0.25],
# [-0.25, 0.25, 0.5 , 0.25, -0.25],
# [-0.25, -0.25, -0.25, -0.25, -0.25]])
# Kernel "Laplacian" of size 7x7+3+3 with values from -10 to 8
# Forming a output range from -1e+02 to 1e+02 (Zero-Summing)
kernel7 = np.array([[-10, -5, -2, -1, -2, -5, -10],
[-5, 0, 3, 4, 3, 0, -5],
[-2, 3, 6, 7, 6, 3, -2],
[-1, 4, 7, 8, 7, 4, -1],
[-2, 3, 6, 7, 6, 3, -2],
[-5, 0, 3, 4, 3, 0, -5],
[-10, -5, -2, -1, -2, -5, -10]])
if kernel_size==3: kernel = kernel3
elif kernel_size==5: kernel = kernel5
elif kernel_size==7: kernel = kernel7
filtered = convolve_fft(array, kernel)
elif mode=='median-subt':
# Subtracting the low_pass filtered (median) image from the image itself
medianed = median_filter(array, median_size, mode='nearest')
filtered = array - medianed
elif mode=='gauss-subt':
# Subtracting the low_pass filtered (median) image from the image itself
gaussed = frame_filter_gaussian2d(array, fwhm_size, mode='conv')
filtered = array - gaussed
elif mode=='fourier-butter':
# Designs an n-th order high-pass 2D Butterworth filter
filt = butter2d_lp(array.shape, cutoff=btw_cutoff, n=btw_order)
filt = 1. - filt
array_fft = fft(array)
fft_new = array_fft * filt
filtered = ifft(fft_new)
else:
raise TypeError('Mode not recognized')
return filtered
def frame_filter_lowpass(array, mode, median_size=5, fwhm_size=5):
""" Low-pass filtering of input frame depending on parameter *mode*.
Parameters
----------
array : array_like
Input array, 2d frame.
mode : {'median', 'gauss'}
Type of low-pass filtering.
median_size : int
Size of the median box for filtering the low-pass median filter.
fwhm_size : int
Size of the Gaussian kernel for the low-pass Gaussian filter.
Returns
-------
filtered : array_like
Low-pass filtered image.
"""
if not array.ndim==2:
raise TypeError('Input array is not a frame or 2d array')
if mode=='median':
# creating the low_pass filtered (median) image
filtered = median_filter(array, int(median_size), mode='nearest')
elif mode=='gauss':
# creating the low_pass filtered (median) image
filtered = frame_filter_gaussian2d(array, fwhm_size, mode='conv')
else:
raise TypeError('Mode not recognized')
return filtered
def frame_filter_gaussian2d(array, size_fwhm, mode='conv'):
""" 2d Gaussian filter.
Parameters
----------
array : array_like
Input array, 2d frame.
size_fwhm : float
Size in pixels of the FWHM of the gaussian kernel.
mode : {'conv', 'convfft'}
'conv' uses the multidimensional gaussian filter from scipy.ndimage and
'convfft' uses the fft convolution with a 2d Gaussian kernel.
Returns
-------
filtered : array_like
Convolved image.
"""
if not array.ndim==2:
raise TypeError('Input array is not a frame or 2d array')
if mode=='conv':
filtered = gaussian_filter(array, sigma=size_fwhm*gaussian_fwhm_to_sigma,
order=0, mode='nearest')
elif mode=='convfft':
# FFT Convolution with a 2d gaussian kernel created with Astropy.
gaus = Gaussian2DKernel(stddev=size_fwhm*gaussian_fwhm_to_sigma)
filtered = convolve_fft(array, gaus)
else:
raise TypeError('Mode not recognized')
return filtered
def gaussian_kernel(size, size_y=None):
""" Gaussian kernel.
"""
size = int(size)
if not size_y:
size_y = size
else:
size_y = int(size_y)
x, y = np.mgrid[-size:size+1, -size_y:size_y+1]
g = np.exp(-(x**2/float(size)+y**2/float(size_y)))
fwhm = size
fwhm_aper = photutils.CircularAperture((frame_center(g)), fwhm/2.)
fwhm_aper_phot = photutils.aperture_photometry(g, fwhm_aper)
g_norm = g/np.array(fwhm_aper_phot['aperture_sum'])
return g_norm/g_norm.max()
|
|
from django.test import override_settings
from django.test import TestCase
from mock import call
from mock import MagicMock
from mock import Mock
from mock import patch
from sqlalchemy.engine import Engine
from kolibri.core.content.utils.sqlalchemybridge import Bridge
from kolibri.core.content.utils.sqlalchemybridge import ClassNotFoundError
from kolibri.core.content.utils.sqlalchemybridge import get_class
from kolibri.core.content.utils.sqlalchemybridge import get_default_db_string
from kolibri.core.content.utils.sqlalchemybridge import get_engine
from kolibri.core.content.utils.sqlalchemybridge import make_session
from kolibri.core.content.utils.sqlalchemybridge import set_all_class_defaults
from kolibri.core.content.utils.sqlalchemybridge import sqlite_connection_string
@patch("kolibri.core.content.utils.sqlalchemybridge.db_matches_schema")
@patch("kolibri.core.content.utils.sqlalchemybridge.make_session", return_value=(0, 0))
@patch(
"kolibri.core.content.utils.sqlalchemybridge.sqlite_connection_string",
return_value="test",
)
class SQLAlchemyBridgeClassTestCase(TestCase):
"""
Testcase for the bridge to SQL Alchemy for Django models
"""
def test_constructor_sqlite_file_path(
self, connection_string_mock, make_session_mock, db_matches_schema_mock
):
Bridge(sqlite_file_path="test")
connection_string_mock.assert_called_once_with("test")
@patch(
"kolibri.core.content.utils.sqlalchemybridge.get_default_db_string",
return_value="test",
)
def test_constructor_default_db_path(
self,
default_db_string_mock,
connection_string_mock,
make_session_mock,
db_matches_schema_mock,
):
Bridge()
default_db_string_mock.assert_called_once_with()
def test_constructor_make_session(
self, connection_string_mock, make_session_mock, db_matches_schema_mock
):
Bridge(sqlite_file_path="test")
make_session_mock.assert_has_calls([call("test"), call("test")])
@patch("kolibri.core.content.utils.sqlalchemybridge.get_class")
def test_instance_get_class(
self,
get_class_mock,
connection_string_mock,
make_session_mock,
db_matches_schema_mock,
):
bridge = Bridge(sqlite_file_path="test")
model = MagicMock()
bridge.get_class(model)
get_class_mock.assert_called_once_with(model, bridge.Base)
@patch("kolibri.core.content.utils.sqlalchemybridge.get_class")
def test_instance_get_table(
self,
get_class_mock,
connection_string_mock,
make_session_mock,
db_matches_schema_mock,
):
bridge = Bridge(sqlite_file_path="test")
model = MagicMock()
class_mock = MagicMock()
table = "test_table"
class_mock.__table__ = table
get_class_mock.return_value = class_mock
self.assertEqual(bridge.get_table(model), table)
def test_instance_get_connection(
self, connection_string_mock, make_session_mock, db_matches_schema_mock
):
engine_mock = MagicMock()
make_session_mock.return_value = (0, engine_mock)
connection = "connection"
engine_mock.connect.return_value = connection
bridge = Bridge(sqlite_file_path="test")
bridge.get_connection()
engine_mock.connect.assert_called_once_with()
self.assertEqual(connection, bridge.connection)
def test_instance_end(
self, connection_string_mock, make_session_mock, db_matches_schema_mock
):
session_mock = MagicMock()
engine_mock = MagicMock()
make_session_mock.return_value = (session_mock, engine_mock)
connection = MagicMock()
bridge = Bridge(sqlite_file_path="test")
bridge.connection = connection
bridge.end()
session_mock.close.assert_called_once_with()
connection.close.assert_called_once_with()
class SQLAlchemyBridgeSQLAlchemyFunctionsTestCase(TestCase):
def test_sqlite_string(self):
self.assertEqual("sqlite:///test", sqlite_connection_string("test"))
def test_get_engine(self):
self.assertEquals(type(get_engine("sqlite:///")), Engine)
@patch(
"kolibri.core.content.utils.sqlalchemybridge.sessionmaker",
return_value=lambda: "test_session",
)
@patch(
"kolibri.core.content.utils.sqlalchemybridge.get_engine",
return_value="test_engine",
)
def test_make_session_get_engine(self, get_engine_mock, sessionmaker_mock):
make_session("test")
get_engine_mock.assert_called_once_with("test")
@patch(
"kolibri.core.content.utils.sqlalchemybridge.sessionmaker",
return_value=lambda: "test_session",
)
@patch(
"kolibri.core.content.utils.sqlalchemybridge.get_engine",
return_value="test_engine",
)
def test_make_session_sessionmaker(self, get_engine_mock, sessionmaker_mock):
make_session("test")
sessionmaker_mock.assert_called_once_with(bind="test_engine", autoflush=False)
@patch(
"kolibri.core.content.utils.sqlalchemybridge.sessionmaker",
return_value=lambda: "test_session",
)
@patch(
"kolibri.core.content.utils.sqlalchemybridge.get_engine",
return_value="test_engine",
)
def test_make_session_session_return(self, get_engine_mock, sessionmaker_mock):
test_session, test_engine = make_session("test")
self.assertEqual(test_session, "test_session")
@patch(
"kolibri.core.content.utils.sqlalchemybridge.sessionmaker",
return_value=lambda: "test_session",
)
@patch(
"kolibri.core.content.utils.sqlalchemybridge.get_engine",
return_value="test_engine",
)
def test_make_session_engine_return(self, get_engine_mock, sessionmaker_mock):
test_session, test_engine = make_session("test")
self.assertEqual(test_engine, "test_engine")
def test_get_class_exists(self):
DjangoModel = MagicMock()
DjangoModel._meta.db_table = "test"
Base = MagicMock(classes={"test": "test"})
self.assertEqual(get_class(DjangoModel, Base), "test")
def test_get_class_does_not_exist(self):
DjangoModel = MagicMock()
DjangoModel._meta.db_table = "test"
Base = MagicMock(classes={})
with self.assertRaises(ClassNotFoundError):
get_class(DjangoModel, Base)
def setUp(self, apps_mock, get_class_mock):
self.BaseClassMock = MagicMock()
get_class_mock.return_value = self.BaseClassMock
self.DjangoModelMock = MagicMock()
self.DjangoModelMock._meta.fields = []
apps_mock.get_models.return_value = [self.DjangoModelMock]
@patch("kolibri.core.content.utils.sqlalchemybridge.get_class")
@patch("kolibri.core.content.utils.sqlalchemybridge.apps")
class SQLAlchemyBridgeSetDefaultsTestCase(TestCase):
def test_set_defaults_calls_get_models(self, apps_mock, get_class_mock):
# Patched modules don't get passed into the TestCase setUp method
setUp(self, apps_mock, get_class_mock)
base = {}
set_all_class_defaults(base)
apps_mock.get_models.assert_called_once_with()
def test_set_defaults_calls_get_class(self, apps_mock, get_class_mock):
# Patched modules don't get passed into the TestCase setUp method
setUp(self, apps_mock, get_class_mock)
base = {}
set_all_class_defaults(base)
get_class_mock.assert_called_once_with(self.DjangoModelMock, base)
def test_field_has_no_default(self, apps_mock, get_class_mock):
# Patched modules don't get passed into the TestCase setUp method
setUp(self, apps_mock, get_class_mock)
base = {}
field_mock = MagicMock()
self.DjangoModelMock._meta.fields = [field_mock]
has_default_mock = Mock(return_value=False)
field_mock.attach_mock(has_default_mock, "has_default")
set_all_class_defaults(base)
has_default_mock.assert_called_once_with()
@patch("kolibri.core.content.utils.sqlalchemybridge.ColumnDefault")
def test_field_has_default_no_column(
self, ColumnDefaultMock, apps_mock, get_class_mock
):
# Patched modules don't get passed into the TestCase setUp method
setUp(self, apps_mock, get_class_mock)
baseclass = MagicMock()
baseclass.attach_mock(MagicMock(), "__table__")
baseclass.__table__.columns = {}
get_class_mock.return_value = baseclass
field_mock = MagicMock()
self.DjangoModelMock._meta.fields = [field_mock]
has_default_mock = Mock(return_value=True)
field_mock.attach_mock(has_default_mock, "has_default")
field_mock.attname = "test"
set_all_class_defaults({})
ColumnDefaultMock.assert_not_called()
@patch("kolibri.core.content.utils.sqlalchemybridge.ColumnDefault")
def test_field_has_default_with_column(
self, ColumnDefaultMock, apps_mock, get_class_mock
):
# Patched modules don't get passed into the TestCase setUp method
setUp(self, apps_mock, get_class_mock)
baseclass = MagicMock()
column = MagicMock()
baseclass.attach_mock(MagicMock(), "__table__")
baseclass.__table__.columns = {"test": column}
get_class_mock.return_value = baseclass
field_mock = MagicMock()
self.DjangoModelMock._meta.fields = [field_mock]
has_default_mock = Mock(return_value=True)
field_mock.attach_mock(has_default_mock, "has_default")
field_mock.attname = "test"
field_mock.default = "test_default"
set_all_class_defaults({})
ColumnDefaultMock.method()
ColumnDefaultMock.method.assert_called()
ColumnDefaultMock.assert_has_calls([call()._set_parent_with_dispatch(column)])
@patch("kolibri.core.content.utils.sqlalchemybridge.ColumnDefault")
def test_field_no_class(self, ColumnDefaultMock, apps_mock, get_class_mock):
# Patched modules don't get passed into the TestCase setUp method
setUp(self, apps_mock, get_class_mock)
baseclass = MagicMock()
baseclass.attach_mock(MagicMock(), "__table__")
baseclass.__table__.columns = {}
get_class_mock.side_effect = ClassNotFoundError()
set_all_class_defaults({})
ColumnDefaultMock.assert_not_called()
class SQLAlchemyBridgeDefaultDBStringTestCase(TestCase):
@override_settings(
DATABASES={
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "test.sqlite3"}
}
)
def test_sqlite(self):
self.assertEqual(get_default_db_string(), "sqlite:///test.sqlite3")
@override_settings(
DATABASES={
"default": {
"ENGINE": "django.db.backends.postgresql",
"USER": "postgres",
"PASSWORD": "password",
"NAME": "test",
}
}
)
def test_no_port_no_host(self):
self.assertEqual(
get_default_db_string(), "postgresql://postgres:password@localhost/test"
)
@override_settings(
DATABASES={
"default": {
"ENGINE": "django.db.backends.postgresql",
"USER": "postgres",
"PASSWORD": "password",
"NAME": "test",
"HOST": "localhost",
}
}
)
def test_no_port(self):
self.assertEqual(
get_default_db_string(), "postgresql://postgres:password@localhost/test"
)
@override_settings(
DATABASES={
"default": {
"ENGINE": "django.db.backends.postgresql",
"USER": "postgres",
"PASSWORD": "password",
"NAME": "test",
"HOST": "localhost",
"PORT": "1234",
}
}
)
def test_postgres(self):
self.assertEqual(
get_default_db_string(),
"postgresql://postgres:password@localhost:1234/test",
)
@override_settings(
DATABASES={
"default": {
"ENGINE": "django.db.backends.mysql",
"USER": "mysql",
"PASSWORD": "password",
"NAME": "test",
"HOST": "localhost",
"PORT": "1234",
}
}
)
def test_mysql(self):
self.assertEqual(
get_default_db_string(), "mysql://mysql:password@localhost:1234/test"
)
@override_settings(
DATABASES={
"default": {
"ENGINE": "django.db.backends.oracle",
"USER": "oracle",
"PASSWORD": "password",
"NAME": "test",
"HOST": "localhost",
"PORT": "1234",
}
}
)
def test_oracle(self):
self.assertEqual(
get_default_db_string(), "oracle://oracle:password@localhost:1234/test"
)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Google Kubernetes Engine operators."""
import os
import tempfile
from typing import Dict, Optional, Sequence, Union
from google.cloud.container_v1.types import Cluster
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils.process_utils import execute_in_subprocess, patch_environ
class GKEDeleteClusterOperator(BaseOperator):
"""
Deletes the cluster, including the Kubernetes endpoint and all worker nodes.
To delete a certain cluster, you must specify the ``project_id``, the ``name``
of the cluster, the ``location`` that the cluster is in, and the ``task_id``.
**Operator Creation**: ::
operator = GKEClusterDeleteOperator(
task_id='cluster_delete',
project_id='my-project',
location='cluster-location'
name='cluster-name')
.. seealso::
For more detail about deleting clusters have a look at the reference:
https://google-cloud-python.readthedocs.io/en/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.delete_cluster
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEDeleteClusterOperator`
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param name: The name of the resource to delete, in this case cluster name
:type name: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'gcp_conn_id',
'name',
'location',
'api_version',
'impersonation_chain',
]
def __init__(
self,
*,
name: str,
location: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.name = name
self.impersonation_chain = impersonation_chain
self._check_input()
def _check_input(self) -> None:
if not all([self.project_id, self.name, self.location]):
self.log.error('One of (project_id, name, location) is missing or incorrect')
raise AirflowException('Operator has incorrect or missing input.')
def execute(self, context) -> Optional[str]:
hook = GKEHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
delete_result = hook.delete_cluster(name=self.name, project_id=self.project_id)
return delete_result
class GKECreateClusterOperator(BaseOperator):
"""
Create a Google Kubernetes Engine Cluster of specified dimensions
The operator will wait until the cluster is created.
The **minimum** required to define a cluster to create is:
``dict()`` ::
cluster_def = {'name': 'my-cluster-name',
'initial_node_count': 1}
or
``Cluster`` proto ::
from google.cloud.container_v1.types import Cluster
cluster_def = Cluster(name='my-cluster-name', initial_node_count=1)
**Operator Creation**: ::
operator = GKEClusterCreateOperator(
task_id='cluster_create',
project_id='my-project',
location='my-location'
body=cluster_def)
.. seealso::
For more detail on about creating clusters have a look at the reference:
:class:`google.cloud.container_v1.types.Cluster`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKECreateClusterOperator`
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param body: The Cluster definition to create, can be protobuf or python dict, if
dict it must match protobuf message Cluster
:type body: dict or google.cloud.container_v1.types.Cluster
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'gcp_conn_id',
'location',
'api_version',
'body',
'impersonation_chain',
]
def __init__(
self,
*,
location: str,
body: Optional[Union[Dict, Cluster]],
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.body = body
self.impersonation_chain = impersonation_chain
self._check_input()
def _check_input(self) -> None:
if not all([self.project_id, self.location, self.body]) or not (
(isinstance(self.body, dict) and "name" in self.body and "initial_node_count" in self.body)
or (getattr(self.body, "name", None) and getattr(self.body, "initial_node_count", None))
):
self.log.error(
"One of (project_id, location, body, body['name'], "
"body['initial_node_count']) is missing or incorrect"
)
raise AirflowException("Operator has incorrect or missing input.")
def execute(self, context) -> str:
hook = GKEHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
create_op = hook.create_cluster(cluster=self.body, project_id=self.project_id)
return create_op
KUBE_CONFIG_ENV_VAR = "KUBECONFIG"
class GKEStartPodOperator(KubernetesPodOperator):
"""
Executes a task in a Kubernetes pod in the specified Google Kubernetes
Engine cluster
This Operator assumes that the system has gcloud installed and has configured a
connection id with a service account.
The **minimum** required to define a cluster to create are the variables
``task_id``, ``project_id``, ``location``, ``cluster_name``, ``name``,
``namespace``, and ``image``
.. seealso::
For more detail about Kubernetes Engine authentication have a look at the reference:
https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#internal_ip
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEStartPodOperator`
:param location: The name of the Google Kubernetes Engine zone in which the
cluster resides, e.g. 'us-central1-a'
:type location: str
:param cluster_name: The name of the Google Kubernetes Engine cluster the pod
should be spawned in
:type cluster_name: str
:param use_internal_ip: Use the internal IP address as the endpoint.
:param project_id: The Google Developers Console project id
:type project_id: str
:param gcp_conn_id: The google cloud connection id to use. This allows for
users to specify a service account.
:type gcp_conn_id: str
"""
template_fields = {'project_id', 'location', 'cluster_name'} | set(KubernetesPodOperator.template_fields)
def __init__(
self,
*,
location: str,
cluster_name: str,
use_internal_ip: bool = False,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
self.use_internal_ip = use_internal_ip
if self.gcp_conn_id is None:
raise AirflowException(
"The gcp_conn_id parameter has become required. If you want to use Application Default "
"Credentials (ADC) strategy for authorization, create an empty connection "
"called `google_cloud_default`.",
)
def execute(self, context) -> Optional[str]:
hook = GoogleBaseHook(gcp_conn_id=self.gcp_conn_id)
self.project_id = self.project_id or hook.project_id
if not self.project_id:
raise AirflowException(
"The project id must be passed either as "
"keyword project_id parameter or as project_id extra "
"in Google Cloud connection definition. Both are not set!"
)
# Write config to a temp file and set the environment variable to point to it.
# This is to avoid race conditions of reading/writing a single file
with tempfile.NamedTemporaryFile() as conf_file, patch_environ(
{KUBE_CONFIG_ENV_VAR: conf_file.name}
), hook.provide_authorized_gcloud():
# Attempt to get/update credentials
# We call gcloud directly instead of using google-cloud-python api
# because there is no way to write kubernetes config to a file, which is
# required by KubernetesPodOperator.
# The gcloud command looks at the env variable `KUBECONFIG` for where to save
# the kubernetes config file.
cmd = [
"gcloud",
"container",
"clusters",
"get-credentials",
self.cluster_name,
"--zone",
self.location,
"--project",
self.project_id,
]
if self.use_internal_ip:
cmd.append('--internal-ip')
execute_in_subprocess(cmd)
# Tell `KubernetesPodOperator` where the config file is located
self.config_file = os.environ[KUBE_CONFIG_ENV_VAR]
return super().execute(context)
|
|
import discord
from discord.ext import commands
import asyncio
import time
import clashroyale
lastTag = '0'
creditIcon = "https://i.imgur.com/TP8GXZb.png"
credits = "Bot by GR8 | Titan"
BOTCOMMANDER_ROLES = ["Family Representative", "Clan Manager", "Clan Deputy",
"Co-Leader", "Hub Officer", "admin", "Member", "guest"]
class tournament:
"""tournament!"""
def __init__(self, bot):
self.bot = bot
self.auth = self.bot.get_cog('crtools').auth
self.clash = clashroyale.RoyaleAPI(self.auth.getToken(), is_async=True)
self.clashAPI = clashroyale.OfficialAPI(self.auth.getOfficialToken(), is_async=True)
def getCards(self, maxPlayers):
"""Converts maxPlayers to Cards"""
cards = {
"50": 25,
"100": 100,
"200": 400,
"1000": 2000
}
return cards[str(maxPlayers)]
def getCoins(self, maxPlayers):
"""Converts maxPlayers to Coins"""
coins = {
"50": 175,
"100": 700,
"200": 2800,
"1000": 14000
}
return coins[str(maxPlayers)]
def sec2tme(self, sec):
"""Converts seconds to readable time"""
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
if h is 0:
if m is 0:
return "{} seconds".format(s)
else:
return "{} minutes, {} secs".format(m, s)
else:
return "{} hour, {} mins".format(h, m)
def emoji(self, name):
"""Emoji by name."""
for emoji in self.bot.get_all_emojis():
if emoji.name == name:
return '<:{}:{}>'.format(emoji.name, emoji.id)
return ''
# Returns a list with tournaments
async def getTopTourney(self):
global lastTag
try:
openTourney = await self.clash.get_joinable_tournaments()
except clashroyale.RequestError:
return None
for tourney in openTourney:
tag = tourney.tag
joined = tourney.current_players
maxplayers = tourney.max_players
createTime = tourney.create_time
if (((int(time.time()) - createTime) < 10800) and (maxplayers > 50) and ((joined + 4) < maxplayers) and (tag != lastTag)):
try:
tourneyAPI = await self.clashAPI.get_tournament(tag)
joined = tourneyAPI.capacity
maxplayers = tourneyAPI.max_capacity
tourneyAPI.open = True if tourneyAPI.type == "open" else False
except clashroyale.RequestError:
return None
if ((maxplayers > 50) and ((joined + 4) < maxplayers) and (tourneyAPI.status != "ended") and (tourneyAPI.open) and (tourneyAPI.first_place_card_prize > 0)):
lastTag = tag
return tourneyAPI
return None
# Returns a list with tournaments
async def getRandomTourney(self):
try:
openTourney = await self.clash.get_joinable_tournaments()
except clashroyale.RequestError:
return None
for tourney in openTourney:
tag = tourney.tag
joined = tourney.current_players
maxplayers = tourney.max_players
createTime = tourney.create_time
if (((int(time.time()) - createTime) < 10800) and ((joined + 1) < maxplayers)):
try:
tourneyAPI = await self.clashAPI.get_tournament(tag)
joined = tourneyAPI.capacity
maxplayers = tourneyAPI.max_capacity
tourneyAPI.open = True if tourneyAPI.type == "open" else False
except clashroyale.RequestError:
return None
if ((joined < maxplayers) and (tourneyAPI.status != "ended") and (tourneyAPI.open) and (tourneyAPI.first_place_card_prize > 0)):
return tourneyAPI
return None
# checks for a tourney every 5 minutes
async def checkTourney(self):
server = [x for x in self.bot.servers if x.id == "374596069989810176"][0]
role_name = "Tournaments"
if role_name is not None:
tour_role = discord.utils.get(server.roles, name=role_name)
if tour_role is None:
await self.bot.create_role(server, name=role_name)
tour_role = discord.utils.get(server.roles, name=role_name)
while self is self.bot.get_cog("tournament"):
tourneydata = await self.getTopTourney()
if tourneydata is not None:
maxPlayers = tourneydata.max_players
cards = self.getCards(maxPlayers)
coins = self.getCoins(maxPlayers)
embed = discord.Embed(title="Click this link to join the Tournament in Clash Royale!",
url="https://legendclans.com/tournaments?id={}".format(tourneydata.tag),
color=0xFAA61A)
embed.set_thumbnail(url='https://statsroyale.com/images/tournament.png')
embed.set_author(name="{} (#{})".format(tourneydata.name, tourneydata.tag),
url="https://royaleapi.com/tournament/" + tourneydata.tag)
embed.add_field(name="Players", value="{} {}/{}".format(self.emoji("members"),
tourneydata.current_players, maxPlayers), inline=True)
embed.add_field(name="Status", value=tourneydata.status.title(), inline=True)
if tourneydata.status != "inProgress":
startTime = self.sec2tme((tourneydata.create_time + tourneydata.prep_time) - int(time.time()))
embed.add_field(name="Starts In", value=startTime, inline=True)
endTime = self.sec2tme((tourneydata.create_time + tourneydata.prep_time + tourneydata.duration) - int(time.time()))
embed.add_field(name="Ends In", value=endTime, inline=True)
embed.add_field(name="Top prize", value="{} {} {} {}".format(self.emoji("tournamentcards"),
cards,
self.emoji("coin"),
coins), inline=True)
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.edit_role(server, tour_role, mentionable=True)
await self.bot.send_message(discord.Object(id='374597050530136064'),
content="{}. Type ``!r tournaments`` to turn on tournament notifications.".format(tour_role.mention),
embed=embed)
await self.bot.edit_role(server, tour_role, mentionable=False)
await asyncio.sleep(900)
await asyncio.sleep(120)
@commands.command()
@commands.cooldown(3, 60, commands.BucketType.server)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def tourney(self):
""" Get a open tournament"""
await self.bot.type()
tourneydata = await self.getRandomTourney()
if tourneydata is not None:
maxPlayers = tourneydata.max_players
cards = self.getCards(maxPlayers)
coins = self.getCoins(maxPlayers)
embed = discord.Embed(title="Click this link to join the Tournament in Clash Royale!", url="https://legendclans.com/tournaments?id={}".format(tourneydata.tag), color=0xFAA61A)
embed.set_thumbnail(url='https://statsroyale.com/images/tournament.png')
embed.set_author(name="{} (#{})".format(tourneydata.name, tourneydata.tag), url="https://royaleapi.com/tournament/" + tourneydata.tag)
embed.add_field(name="Players", value="{} {}/{}".format(self.emoji("members"), tourneydata.current_players, maxPlayers), inline=True)
embed.add_field(name="Status", value=tourneydata.status.title(), inline=True)
if tourneydata.status != "inProgress":
startTime = self.sec2tme((tourneydata.create_time + tourneydata.prep_time) - int(time.time()))
embed.add_field(name="Starts In", value=startTime, inline=True)
endTime = self.sec2tme((tourneydata.create_time + tourneydata.prep_time + tourneydata.duration) - int(time.time()))
embed.add_field(name="Ends In", value=endTime, inline=True)
embed.add_field(name="Top prize", value="{} {} {} {}".format(self.emoji("tournamentcards"), cards, self.emoji("coin"), coins), inline=True)
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.say(embed=embed)
else:
return await self.bot.say("Found nothing, please try again after a few minutes!")
def setup(bot):
n = tournament(bot)
loop = asyncio.get_event_loop()
loop.create_task(n.checkTourney())
bot.add_cog(n)
|
|
import unittest
from test import support
import contextlib
import socket
import urllib.request
import sys
import os
import email.message
import time
support.requires('network')
class URLTimeoutTest(unittest.TestCase):
# XXX this test doesn't seem to test anything useful.
TIMEOUT = 30.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
with support.transient_internet("www.example.com"):
f = urllib.request.urlopen("http://www.example.com/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.reqest.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.example.com/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
@contextlib.contextmanager
def urlopen(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
r = urllib.request.urlopen(*args, **kwargs)
try:
yield r
finally:
r.close()
def test_basic(self):
# Simple test expected to pass.
with self.urlopen("http://www.example.com/") as open_url:
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
self.assertTrue(open_url.read(), "calling 'read' failed")
def test_readlines(self):
# Test both readline and readlines.
with self.urlopen("http://www.example.com/") as open_url:
self.assertIsInstance(open_url.readline(), bytes,
"readline did not return a string")
self.assertIsInstance(open_url.readlines(), list,
"readlines did not return a list")
def test_info(self):
# Test 'info'.
with self.urlopen("http://www.example.com/") as open_url:
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.example.com/"
with self.urlopen(URL) as open_url:
gotten_url = open_url.geturl()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.pythontest.net/XXXinvalidXXX"
with support.transient_internet(URL):
with self.assertWarns(DeprecationWarning):
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
@unittest.skipIf(sys.platform in ('win32',), 'not appropriate for Windows')
def test_fileno(self):
# Make sure fd returned by fileno is valid.
with self.urlopen("http://www.google.com/", timeout=None) as open_url:
fd = open_url.fileno()
with os.fdopen(fd, 'rb') as f:
self.assertTrue(f.read(), "reading from file created using fd "
"returned by fileno failed")
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
bogus_domain = "sadflkjsasf.i.nvali.d"
try:
socket.gethostbyname(bogus_domain)
except OSError:
# socket.gaierror is too narrow, since getaddrinfo() may also
# fail with EAI_SYSTEM and ETIMEDOUT (seen on Ubuntu 13.04),
# i.e. Python's TimeoutError.
pass
else:
# This happens with some overzealous DNS providers such as OpenDNS
self.skipTest("%r should not resolve for test to work" % bogus_domain)
failure_explanation = ('opening an invalid URL did not raise OSError; '
'can be caused by a broken DNS server '
'(e.g. returns 404 or hijacks page)')
with self.assertRaises(OSError, msg=failure_explanation):
# SF patch 809915: In Sep 2003, VeriSign started highjacking
# invalid .com and .net addresses to boost traffic to their own
# site. This test started failing then. One hopes the .invalid
# domain will be spared to serve its defined purpose.
urllib.request.urlopen("http://sadflkjsasf.i.nvali.d/")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.request.urlretrieve using the network."""
@contextlib.contextmanager
def urlretrieve(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
file_location, info = urllib.request.urlretrieve(*args, **kwargs)
try:
yield file_location, info
finally:
support.unlink(file_location)
def test_basic(self):
# Test basic functionality.
with self.urlretrieve("http://www.example.com/") as (file_location, info):
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
with open(file_location, 'rb') as f:
self.assertTrue(f.read(), "reading from the file location returned"
" by urlretrieve failed")
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
with self.urlretrieve("http://www.example.com/",
support.TESTFN) as (file_location, info):
self.assertEqual(file_location, support.TESTFN)
self.assertTrue(os.path.exists(file_location))
with open(file_location, 'rb') as f:
self.assertTrue(f.read(), "reading from temporary file failed")
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
with self.urlretrieve("http://www.example.com/") as (file_location, info):
self.assertIsInstance(info, email.message.Message,
"info is not an instance of email.message.Message")
logo = "http://www.example.com/"
def test_data_header(self):
with self.urlretrieve(self.logo) as (file_location, fileheaders):
datevalue = fileheaders.get('Date')
dateformat = '%a, %d %b %Y %H:%M:%S GMT'
try:
time.strptime(datevalue, dateformat)
except ValueError:
self.fail('Date value not in %r format', dateformat)
def test_reporthook(self):
records = []
def recording_reporthook(blocks, block_size, total_size):
records.append((blocks, block_size, total_size))
with self.urlretrieve(self.logo, reporthook=recording_reporthook) as (
file_location, fileheaders):
expected_size = int(fileheaders['Content-Length'])
records_repr = repr(records) # For use in error messages.
self.assertGreater(len(records), 1, msg="There should always be two "
"calls; the first one before the transfer starts.")
self.assertEqual(records[0][0], 0)
self.assertGreater(records[0][1], 0,
msg="block size can't be 0 in %s" % records_repr)
self.assertEqual(records[0][2], expected_size)
self.assertEqual(records[-1][2], expected_size)
block_sizes = {block_size for _, block_size, _ in records}
self.assertEqual({records[0][1]}, block_sizes,
msg="block sizes in %s must be equal" % records_repr)
self.assertGreaterEqual(records[-1][0]*records[0][1], expected_size,
msg="number of blocks * block size must be"
" >= total size in %s" % records_repr)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from keystoneauth1.exceptions import catalog as key_ex
from novaclient import client
from novaclient import exceptions
from novaclient.i18n import _LE
from novaclient.v2 import agents
from novaclient.v2 import aggregates
from novaclient.v2 import availability_zones
from novaclient.v2 import certs
from novaclient.v2 import cloudpipe
from novaclient.v2 import fixed_ips
from novaclient.v2 import flavor_access
from novaclient.v2 import flavors
from novaclient.v2 import floating_ip_dns
from novaclient.v2 import floating_ip_pools
from novaclient.v2 import floating_ips
from novaclient.v2 import floating_ips_bulk
from novaclient.v2 import fping
from novaclient.v2 import hosts
from novaclient.v2 import hypervisors
from novaclient.v2 import images
from novaclient.v2 import keypairs
from novaclient.v2 import limits
from novaclient.v2 import networks
from novaclient.v2 import quota_classes
from novaclient.v2 import quotas
from novaclient.v2 import security_group_default_rules
from novaclient.v2 import security_group_rules
from novaclient.v2 import security_groups
from novaclient.v2 import server_groups
from novaclient.v2 import server_migrations
from novaclient.v2 import servers
from novaclient.v2 import services
from novaclient.v2 import usage
from novaclient.v2 import versions
from novaclient.v2 import virtual_interfaces
from novaclient.v2 import volumes
from novaclient.v2 import licence
from novaclient.v2 import snapshot
from novaclient.v2 import systemlogs
from novaclient.v2 import cdrom
class Client(object):
"""Top-level object to access the OpenStack Compute API.
.. warning:: All scripts and projects should not initialize this class
directly. It should be done via `novaclient.client.Client` interface.
"""
def __init__(self, username=None, api_key=None, project_id=None,
auth_url=None, insecure=False, timeout=None,
proxy_tenant_id=None, proxy_token=None, region_name=None,
endpoint_type='publicURL', extensions=None,
service_type='compute', service_name=None,
volume_service_name=None, timings=False, bypass_url=None,
os_cache=False, no_cache=True, http_log_debug=False,
auth_system='keystone', auth_plugin=None, auth_token=None,
cacert=None, tenant_id=None, user_id=None,
connection_pool=False, session=None, auth=None,
api_version=None, direct_use=True, logger=None, **kwargs):
"""Initialization of Client object.
:param str username: Username
:param str api_key: API Key
:param str project_id: Project ID
:param str auth_url: Auth URL
:param bool insecure: Allow insecure
:param float timeout: API timeout, None or 0 disables
:param str proxy_tenant_id: Tenant ID
:param str proxy_token: Proxy Token
:param str region_name: Region Name
:param str endpoint_type: Endpoint Type
:param str extensions: Extensions
:param str service_type: Service Type
:param str service_name: Service Name
:param str volume_service_name: Volume Service Name
:param bool timings: Timings
:param str bypass_url: Bypass URL
:param bool os_cache: OS cache
:param bool no_cache: No cache
:param bool http_log_debug: Enable debugging for HTTP connections
:param str auth_system: Auth system
:param str auth_plugin: Auth plugin
:param str auth_token: Auth token
:param str cacert: cacert
:param str tenant_id: Tenant ID
:param str user_id: User ID
:param bool connection_pool: Use a connection pool
:param str session: Session
:param str auth: Auth
:param api_version: Compute API version
:param direct_use: Inner variable of novaclient. Do not use it outside
novaclient. It's restricted.
:param logger: Logger
:type api_version: novaclient.api_versions.APIVersion
"""
if direct_use:
raise exceptions.Forbidden(
403, _LE("'novaclient.v2.client.Client' is not designed to be "
"initialized directly. It is inner class of "
"novaclient. You should use "
"'novaclient.client.Client' instead. Related lp "
"bug-report: 1493576"))
# FIXME(comstud): Rename the api_key argument above when we
# know it's not being used as keyword argument
# NOTE(cyeoh): In the novaclient context (unlike Nova) the
# project_id is not the same as the tenant_id. Here project_id
# is a name (what the Nova API often refers to as a project or
# tenant name) and tenant_id is a UUID (what the Nova API
# often refers to as a project_id or tenant_id).
password = kwargs.pop('password', api_key)
self.projectid = project_id
self.tenant_id = tenant_id
self.user_id = user_id
self.flavors = flavors.FlavorManager(self)
self.flavor_access = flavor_access.FlavorAccessManager(self)
self.images = images.ImageManager(self)
self.glance = images.GlanceManager(self)
self.limits = limits.LimitsManager(self)
self.servers = servers.ServerManager(self)
self.versions = versions.VersionManager(self)
# extensions
self.agents = agents.AgentsManager(self)
self.dns_domains = floating_ip_dns.FloatingIPDNSDomainManager(self)
self.dns_entries = floating_ip_dns.FloatingIPDNSEntryManager(self)
self.cloudpipe = cloudpipe.CloudpipeManager(self)
self.certs = certs.CertificateManager(self)
self.floating_ips = floating_ips.FloatingIPManager(self)
self.floating_ip_pools = floating_ip_pools.FloatingIPPoolManager(self)
self.fping = fping.FpingManager(self)
self.volumes = volumes.VolumeManager(self)
self.keypairs = keypairs.KeypairManager(self)
self.networks = networks.NetworkManager(self)
self.neutron = networks.NeutronManager(self)
self.quota_classes = quota_classes.QuotaClassSetManager(self)
self.quotas = quotas.QuotaSetManager(self)
self.security_groups = security_groups.SecurityGroupManager(self)
self.security_group_rules = \
security_group_rules.SecurityGroupRuleManager(self)
self.security_group_default_rules = \
security_group_default_rules.SecurityGroupDefaultRuleManager(self)
self.usage = usage.UsageManager(self)
self.virtual_interfaces = \
virtual_interfaces.VirtualInterfaceManager(self)
self.aggregates = aggregates.AggregateManager(self)
self.hosts = hosts.HostManager(self)
self.hypervisors = hypervisors.HypervisorManager(self)
self.hypervisor_stats = hypervisors.HypervisorStatsManager(self)
self.services = services.ServiceManager(self)
self.fixed_ips = fixed_ips.FixedIPsManager(self)
self.floating_ips_bulk = floating_ips_bulk.FloatingIPBulkManager(self)
self.os_cache = os_cache or not no_cache
self.availability_zones = \
availability_zones.AvailabilityZoneManager(self)
self.server_groups = server_groups.ServerGroupsManager(self)
self.server_migrations = \
server_migrations.ServerMigrationsManager(self)
self.licence = licence.LicenceManager(self)
self.snapshot = snapshot.SnapshotManager(self)
self.systemlogs = systemlogs.SystemlogManager(self)
self.cdrom = cdrom.CDromManager(self)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
if not logger:
logger = logging.getLogger(__name__)
self.client = client._construct_http_client(
username=username,
password=password,
user_id=user_id,
project_id=project_id,
tenant_id=tenant_id,
auth_url=auth_url,
auth_token=auth_token,
insecure=insecure,
timeout=timeout,
auth_system=auth_system,
auth_plugin=auth_plugin,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name,
timings=timings,
bypass_url=bypass_url,
os_cache=self.os_cache,
http_log_debug=http_log_debug,
cacert=cacert,
connection_pool=connection_pool,
session=session,
auth=auth,
api_version=api_version,
logger=logger,
**kwargs)
@property
def api_version(self):
return self.client.api_version
@api_version.setter
def api_version(self, value):
self.client.api_version = value
@client._original_only
def __enter__(self):
self.client.open_session()
return self
@client._original_only
def __exit__(self, t, v, tb):
self.client.close_session()
@client._original_only
def set_management_url(self, url):
self.client.set_management_url(url)
def get_timings(self):
return self.client.get_timings()
def reset_timings(self):
self.client.reset_timings()
def has_neutron(self):
"""Check the service catalog to figure out if we have neutron.
This is an intermediary solution for the window of time where
we still have nova-network support in the client, but we
expect most people have neutron. This ensures that if they
have neutron we understand, we talk to it, if they don't, we
fail back to nova proxies.
"""
try:
endpoint = self.client.get_endpoint(service_type='network')
if endpoint:
return True
return False
except key_ex.EndpointNotFound:
return False
@client._original_only
def authenticate(self):
"""Authenticate against the server.
Normally this is called automatically when you first access the API,
but you can call this method to force authentication right now.
Returns on success; raises :exc:`exceptions.Unauthorized` if the
credentials are wrong.
"""
self.client.authenticate()
|
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_lsh_forest_deprecation():
assert_warns_message(DeprecationWarning,
"LSHForest has poor performance and has been "
"deprecated in 0.19. It will be removed "
"in version 0.21.", LSHForest)
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
print('accuracies:', accuracies)
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0, n_candidates=n_points, random_state=42).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = ignore_warnings(LSHForest, category=DeprecationWarning)(
radius=1, random_state=0).fit(X1)
forest_dense = ignore_warnings(LSHForest, category=DeprecationWarning)(
radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
|
# -*- coding: utf-8 -*-
import os
import re
import glob
import base64
import codecs
import mimetypes
import jinja2
import markdown
import pygments
import tempfile
import sys
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from subprocess import *
BASE_DIR = os.path.dirname(__file__)
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
class Generator:
def __init__(self, source, destination_file='presentation.html',
template_file=None, direct=False, debug=False, verbose=True,
embed=False, encoding='utf8', logger=None):
"""Configures this generator from its properties. "args" are not used
(yet?)
"""
self.debug = debug
self.direct = direct
self.encoding = encoding
self.logger = None
if logger:
if callable(logger):
self.logger = logger
else:
raise ValueError(u"Invalid logger set, must be a callable")
self.verbose = False if direct else verbose and self.logger
if source and os.path.exists(source):
self.source = source
self.source_base_dir = os.path.split(os.path.abspath(source))[0]
else:
raise IOError(u"Source file/directory %s does not exist"
% source)
if (os.path.exists(destination_file)
and not os.path.isfile(destination_file)):
raise IOError(u"Destination %s exists and is not a file"
% destination_file)
else:
self.destination_file = destination_file
if self.destination_file.endswith('.html'):
self.file_type = 'html'
elif self.destination_file.endswith('.pdf'):
self.file_type = 'pdf'
else:
raise IOError(u"This program can only write html or pdf files, "
"please use one of these file extensions in the "
"destination")
self.embed = True if self.file_type == 'pdf' else embed
if not template_file:
template_file = os.path.join(TEMPLATE_DIR, 'base.html')
if os.path.exists(template_file):
self.template_file = template_file
else:
raise IOError(u"Template file %s does not exist" % template_file)
def embed_images(self, html_contents, from_source):
"""Extracts images url and embed them using the base64 algorithm
"""
images = re.findall(r'<img\s.*?src="(.+?)"\s?.*?/?>', html_contents,
re.DOTALL | re.UNICODE)
if not images:
return html_contents
for image_url in images:
if not image_url or image_url.startswith('data:'):
continue
if image_url.startswith('file:///'):
self.log(u"Warning: file:/// image urls are not supported: "
"skipped", 'warning')
continue
if (image_url.startswith('http://')
or image_url.startswith('https://')):
continue
elif os.path.isabs(image_url):
image_real_path = image_url
else:
image_real_path = os.path.join(os.path.dirname(from_source), image_url)
if not os.path.exists(image_real_path):
self.log(u"Warning: image file %s not found: skipped"
% image_real_path, 'warning')
continue
mime_type, encoding = mimetypes.guess_type(image_real_path)
if not mime_type:
self.log(u"Warning: unknown image mime-type (%s): skipped"
% image_real_path, 'warning')
continue
try:
image_contents = open(image_real_path).read()
encoded_image = base64.b64encode(image_contents)
except IOError:
self.log(u"Warning: unable to read image contents %s: skipping"
% image_real_path, 'warning')
continue
except Exception:
self.log(u"Warning: unable to base64-encode image %s: skipping"
% image_real_path, 'warning')
continue
encoded_url = u"data:%s;base64,%s" % (mime_type, encoded_image)
html_contents = html_contents.replace(image_url, encoded_url, 1)
self.log(u"Embedded image %s" % image_real_path)
return html_contents
def execute(self):
"""Execute this generator regarding its current configuration
"""
if self.direct:
if self.file_type == 'pdf':
raise IOError(u"Direct output mode is not available for PDF "
"export")
else:
print self.render().encode(self.encoding)
else:
self.write()
self.log(u"Generated file: %s" % self.destination_file)
def fetch_contents(self, source):
"""Recursively fetches Markdown contents from a single file or
directory containing itself Markdown files
"""
contents = ""
if os.path.isdir(source):
self.log(u"Entering %s/" % source)
for entry in os.listdir(source):
current = os.path.join(source, entry)
if (os.path.isdir(current) or current.endswith('.md')
or current.endswith('.markdown')):
contents = contents + self.fetch_contents(current)
else:
self.log(u"Adding %s" % source)
md_contents = codecs.open(source, encoding=self.encoding).read()
contents = markdown.markdown(md_contents)
if self.embed:
contents = self.embed_images(contents, source)
if not contents.strip():
self.log(u"No contents found in %s" % source, 'warning')
return contents
def get_slide_vars(self, slide_src):
"""Computes a single slide template vars from its html source code
"""
vars = {'header': None, 'content': None}
find = re.search(r'^\s?(<h\d?>.+?</h\d>)\s?(.+)?', slide_src,
re.DOTALL | re.UNICODE)
if not find:
header = None
content = slide_src
else:
header = find.group(1)
content = find.group(2)
if content:
content = self.highlight_code(content.strip())
return {'header': header, 'content': content}
def get_template_vars(self, slides_src):
"""Computes template vars from slides html source code
"""
try:
head_title = slides_src[0].split('>')[1].split('<')[0]
except IndexError:
head_title = "Untitled Presentation"
slides = []
for slide_src in slides_src:
slide_vars = self.get_slide_vars(slide_src.strip())
if not slide_vars['header'] and not slide_vars['content']:
self.log(u"empty slide contents, skipping")
continue
slides.append(slide_vars)
return {'head_title': head_title, 'slides': slides}
def highlight_code(self, content):
"""Performs syntax coloration in slide code blocks
"""
while u'<code>!' in content:
code_match = re.search('<code>!(.+?)\n(.+?)</code>', content,
re.DOTALL)
if code_match:
lang, code = code_match.groups()
code = code.replace('<', '<').replace('>', '>')
code = code.replace('&', '&')
try:
lexer = get_lexer_by_name(lang)
except Exception:
self.log(u"Unknown pygment lexer \"%s\", code higlighting "
"skipped" % lang)
continue
formatter = HtmlFormatter(linenos='inline', noclasses=True,
nobackground=True)
pretty_code = pygments.highlight(code, lexer, formatter)
before_code = content.split(u'<code>', 1)[0]
after_code = content.split(u'</code>', 1)[1]
content = before_code + pretty_code + after_code
return content
def log(self, message, type='notice'):
"""Log a message (eventually, override to do something more clever)
"""
if self.verbose and self.logger:
self.logger(message, type)
def render(self):
"""Returns generated html code
"""
slides_src = self.fetch_contents(self.source).split(u'<hr />')
template_src = codecs.open(self.template_file, encoding=self.encoding)
template = jinja2.Template(template_src.read())
template_vars = self.get_template_vars(slides_src)
return template.render(template_vars)
def write(self):
"""Writes generated presentation code into the destination file
"""
html = self.render()
if self.file_type == 'pdf':
self.write_pdf(html)
else:
outfile = codecs.open(self.destination_file, 'w',
encoding=self.encoding)
outfile.write(html)
def write_pdf(self, html):
"""Tries to write a PDF export from the command line using PrinceXML if
available
"""
try:
f = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
f.write(html.encode(self.encoding))
f.close()
except Exception:
raise IOError(u"Unable to create temporary file, aborting")
dummy_fh = open(os.path.devnull, 'w')
try:
command = ["prince", f.name, self.destination_file]
process = Popen(command, stderr=dummy_fh).communicate()
except Exception:
raise EnvironmentError(u"Unable to generate PDF file using prince."
"Is it installed and available?")
finally:
dummy_fh.close()
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import cpp_util
import schema_util
import util_cc_helper
from cpp_namespace_environment import CppNamespaceEnvironment
class CCGenerator(object):
def __init__(self, type_generator):
self._type_generator = type_generator
def Generate(self, namespace):
return _Generator(namespace, self._type_generator).Generate()
class _Generator(object):
"""A .cc generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator):
assert type(namespace.environment) is CppNamespaceEnvironment
self._namespace = namespace
self._type_helper = cpp_type_generator
self._util_cc_helper = (
util_cc_helper.UtilCCHelper(self._type_helper))
self._generate_error_messages = namespace.compiler_options.get(
'generate_error_messages', False)
def Generate(self):
"""Generates a Code object with the .cc for a single namespace.
"""
cpp_namespace = cpp_util.GetCppNamespace(
self._namespace.environment.namespace_pattern,
self._namespace.unix_name)
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
.Append(self._util_cc_helper.GetIncludePath())
.Append('#include "base/logging.h"')
.Append('#include "base/strings/string_number_conversions.h"')
.Append('#include "base/strings/utf_string_conversions.h"')
.Append('#include "base/values.h"')
.Append('#include "%s/%s.h"' %
(self._namespace.source_file_dir, self._namespace.short_filename))
.Append('#include <set>')
.Append('#include <utility>')
.Cblock(self._type_helper.GenerateIncludes(include_soft=True))
.Append()
.Append('using base::UTF8ToUTF16;')
.Append()
.Concat(cpp_util.OpenNamespace(cpp_namespace))
)
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for prop in self._namespace.properties.values():
property_code = self._type_helper.GeneratePropertyValues(
prop,
'const %(type)s %(name)s = %(value)s;',
nodoc=True)
if property_code:
c.Cblock(property_code)
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
.Cblock(self._GenerateTypes(None, self._namespace.types.values()))
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
c.Cblock(self._GenerateFunction(function))
if self._namespace.events:
(c.Append('//')
.Append('// Events')
.Append('//')
.Append()
)
for event in self._namespace.events.values():
c.Cblock(self._GenerateEvent(event))
c.Cblock(cpp_util.CloseNamespace(cpp_namespace))
c.Append()
return c
def _GenerateType(self, cpp_namespace, type_):
"""Generates the function definitions for a type.
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
if type_.functions:
# Wrap functions within types in the type's namespace.
(c.Append('namespace %s {' % classname)
.Append())
for function in type_.functions.values():
c.Cblock(self._GenerateFunction(function))
c.Append('} // namespace %s' % classname)
elif type_.property_type == PropertyType.ARRAY:
c.Cblock(self._GenerateType(cpp_namespace, type_.item_type))
elif type_.property_type in (PropertyType.CHOICES,
PropertyType.OBJECT):
if cpp_namespace is None:
classname_in_namespace = classname
else:
classname_in_namespace = '%s::%s' % (cpp_namespace, classname)
if type_.property_type == PropertyType.OBJECT:
c.Cblock(self._GeneratePropertyFunctions(classname_in_namespace,
type_.properties.values()))
else:
c.Cblock(self._GenerateTypes(classname_in_namespace, type_.choices))
(c.Append('%s::%s()' % (classname_in_namespace, classname))
.Cblock(self._GenerateInitializersAndBody(type_))
.Append('%s::~%s() {}' % (classname_in_namespace, classname))
)
# Note: we use 'rhs' because some API objects have a member 'other'.
(c.Append('%s::%s(%s&& rhs)' %
(classname_in_namespace, classname, classname))
.Cblock(self._GenerateMoveCtor(type_))
.Append('%s& %s::operator=(%s&& rhs)' %
(classname_in_namespace, classname_in_namespace,
classname))
.Cblock(self._GenerateMoveAssignOperator(type_))
)
if type_.origin.from_json:
c.Cblock(self._GenerateTypePopulate(classname_in_namespace, type_))
if cpp_namespace is None: # only generate for top-level types
c.Cblock(self._GenerateTypeFromValue(classname_in_namespace, type_))
if type_.origin.from_client:
c.Cblock(self._GenerateTypeToValue(classname_in_namespace, type_))
elif type_.property_type == PropertyType.ENUM:
(c.Cblock(self._GenerateEnumToString(cpp_namespace, type_))
.Cblock(self._GenerateEnumFromString(cpp_namespace, type_))
)
return c
def _GenerateInitializersAndBody(self, type_):
items = []
for prop in type_.properties.values():
t = prop.type_
real_t = self._type_helper.FollowRef(t)
if real_t.property_type == PropertyType.ENUM:
namespace_prefix = ('%s::' % real_t.namespace.unix_name
if real_t.namespace != self._namespace
else '')
items.append('%s(%s%s)' % (prop.unix_name,
namespace_prefix,
self._type_helper.GetEnumNoneValue(t)))
elif prop.optional:
continue
elif t.property_type == PropertyType.INTEGER:
items.append('%s(0)' % prop.unix_name)
elif t.property_type == PropertyType.DOUBLE:
items.append('%s(0.0)' % prop.unix_name)
elif t.property_type == PropertyType.BOOLEAN:
items.append('%s(false)' % prop.unix_name)
elif (t.property_type == PropertyType.ANY or
t.property_type == PropertyType.ARRAY or
t.property_type == PropertyType.BINARY or
t.property_type == PropertyType.CHOICES or
t.property_type == PropertyType.OBJECT or
t.property_type == PropertyType.FUNCTION or
t.property_type == PropertyType.REF or
t.property_type == PropertyType.STRING):
# TODO(miket): It would be nice to initialize CHOICES, but we
# don't presently have the semantics to indicate which one of a set
# should be the default.
continue
else:
raise TypeError(t)
if items:
s = ': %s' % (',\n'.join(items))
else:
s = ''
s = s + ' {}'
return Code().Append(s)
def _GetMoveProps(self, type_, copy_str, move_str):
"""Returns a tuple of (props, dicts) for the type.
|props| is a list of all the copyable or movable properties generated using
the copy_str and move_str, and |dicts| is a list of all the dictionary
properties by name.
Properties:
- |type_| the Type to get the properties from
- |copy_str| the string to use when copying a value; should have two
placeholders to take the property name.
- |move_str| the string to use when moving a value; should have two
placeholders to take the property name.
"""
props = []
dicts = []
for prop in type_.properties.values():
t = prop.type_
real_t = self._type_helper.FollowRef(t)
if (real_t.property_type != PropertyType.ENUM and
(prop.optional or
t.property_type == PropertyType.ANY or
t.property_type == PropertyType.ARRAY or
t.property_type == PropertyType.BINARY or
t.property_type == PropertyType.CHOICES or
t.property_type == PropertyType.OBJECT or
t.property_type == PropertyType.REF or
t.property_type == PropertyType.STRING)):
props.append(move_str % (prop.unix_name, prop.unix_name))
elif t.property_type == PropertyType.FUNCTION:
dicts.append(prop.unix_name)
elif (real_t.property_type == PropertyType.ENUM or
t.property_type == PropertyType.INTEGER or
t.property_type == PropertyType.DOUBLE or
t.property_type == PropertyType.BOOLEAN):
props.append(copy_str % (prop.unix_name, prop.unix_name))
else:
raise TypeError(t)
if type_.property_type == PropertyType.CHOICES:
for choice in type_.choices:
prop_name = 'as_%s' % choice.unix_name
props.append(move_str % (prop_name, prop_name))
if (type_.property_type == PropertyType.OBJECT and
type_.additional_properties is not None):
if type_.additional_properties.property_type == PropertyType.ANY:
dicts.append('additional_properties')
else:
props.append(move_str % ('additional_properties',
'additional_properties'))
return (props, dicts)
def _GenerateMoveCtor(self, type_):
props, dicts = self._GetMoveProps(type_, '%s(rhs.%s)',
'%s(std::move(rhs.%s))')
s = ''
if props:
s = s + ': %s' % (',\n'.join(props))
s = s + '{'
for item in dicts:
s = s + ('\n%s.Swap(&rhs.%s);' % (item, item))
s = s + '\n}'
return Code().Append(s)
def _GenerateMoveAssignOperator(self, type_):
props, dicts = self._GetMoveProps(type_, '%s = rhs.%s;',
'%s = std::move(rhs.%s);')
s = '{\n'
if props:
s = s + '\n'.join(props)
for item in dicts:
s = s + ('%s.Swap(&rhs.%s);' % (item, item))
s = s + '\nreturn *this;\n}'
return Code().Append(s)
def _GenerateTypePopulate(self, cpp_namespace, type_):
"""Generates the function for populating a type given a pointer to it.
E.g for type "Foo", generates Foo::Populate()
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
(c.Append('// static')
.Append('bool %(namespace)s::Populate(')
.Sblock(' %s) {' % self._GenerateParams(
('const base::Value& value', '%(name)s* out'))))
if self._generate_error_messages:
c.Append('DCHECK(error);')
if type_.property_type == PropertyType.CHOICES:
for choice in type_.choices:
(c.Sblock('if (%s) {' % self._GenerateValueIsTypeExpression('value',
choice))
.Concat(self._GeneratePopulateVariableFromValue(
choice,
'(&value)',
'out->as_%s' % choice.unix_name,
'false',
is_ptr=True))
.Append('return true;')
.Eblock('}')
)
(c.Concat(self._GenerateError(
'"expected %s, got " + %s' %
(" or ".join(choice.name for choice in type_.choices),
self._util_cc_helper.GetValueTypeString('value'))))
.Append('return false;'))
elif type_.property_type == PropertyType.OBJECT:
(c.Sblock('if (!value.is_dict()) {')
.Concat(self._GenerateError(
'"expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('value')))
.Append('return false;')
.Eblock('}'))
if type_.properties or type_.additional_properties is not None:
c.Append('const base::DictionaryValue* dict = '
'static_cast<const base::DictionaryValue*>(&value);')
if self._generate_error_messages:
c.Append('std::set<std::string> keys;')
for prop in type_.properties.values():
c.Concat(self._InitializePropertyToDefault(prop, 'out'))
for prop in type_.properties.values():
if self._generate_error_messages:
c.Append('keys.insert("%s");' % (prop.name))
c.Concat(self._GenerateTypePopulateProperty(prop, 'dict', 'out'))
# Check for extra values.
if self._generate_error_messages:
(c.Sblock('for (base::DictionaryValue::Iterator it(*dict); '
'!it.IsAtEnd(); it.Advance()) {')
.Sblock('if (!keys.count(it.key())) {')
.Concat(self._GenerateError('"found unexpected key \'" + '
'it.key() + "\'"'))
.Eblock('}')
.Eblock('}')
)
if type_.additional_properties is not None:
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('out->additional_properties.MergeDictionary(dict);')
else:
cpp_type = self._type_helper.GetCppType(type_.additional_properties,
is_in_container=True)
(c.Append('for (base::DictionaryValue::Iterator it(*dict);')
.Sblock(' !it.IsAtEnd(); it.Advance()) {')
.Append('%s tmp;' % cpp_type)
.Concat(self._GeneratePopulateVariableFromValue(
type_.additional_properties,
'(&it.value())',
'tmp',
'false'))
.Append('out->additional_properties[it.key()] = tmp;')
.Eblock('}')
)
c.Append('return true;')
(c.Eblock('}')
.Substitute({'namespace': cpp_namespace, 'name': classname}))
return c
def _GenerateValueIsTypeExpression(self, var, type_):
real_type = self._type_helper.FollowRef(type_)
if real_type.property_type is PropertyType.CHOICES:
return '(%s)' % ' || '.join(self._GenerateValueIsTypeExpression(var,
choice)
for choice in real_type.choices)
return '%s.type() == %s' % (var, cpp_util.GetValueType(real_type))
def _GenerateTypePopulateProperty(self, prop, src, dst):
"""Generate the code to populate a single property in a type.
src: base::DictionaryValue*
dst: Type*
"""
c = Code()
value_var = prop.unix_name + '_value'
c.Append('const base::Value* %(value_var)s = NULL;')
if prop.optional:
(c.Sblock(
'if (%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false')))
underlying_type = self._type_helper.FollowRef(prop.type_)
if underlying_type.property_type == PropertyType.ENUM:
namespace_prefix = ('%s::' % underlying_type.namespace.unix_name
if underlying_type.namespace != self._namespace
else '')
(c.Append('} else {')
.Append('%%(dst)s->%%(name)s = %s%s;' %
(namespace_prefix,
self._type_helper.GetEnumNoneValue(prop.type_))))
c.Eblock('}')
else:
(c.Sblock(
'if (!%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {')
.Concat(self._GenerateError('"\'%%(key)s\' is required"'))
.Append('return false;')
.Eblock('}')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
)
c.Append()
c.Substitute({
'value_var': value_var,
'key': prop.name,
'src': src,
'dst': dst,
'name': prop.unix_name
})
return c
def _GenerateTypeFromValue(self, cpp_namespace, type_):
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
(c.Append('// static')
.Append('std::unique_ptr<%s> %s::FromValue(%s) {' % (classname,
cpp_namespace, self._GenerateParams(('const base::Value& value',))))
)
if self._generate_error_messages:
c.Append('DCHECK(error);')
(c.Append(' std::unique_ptr<%s> out(new %s());' % (classname, classname))
.Append(' if (!Populate(%s))' % self._GenerateArgs(
('value', 'out.get()')))
.Append(' return nullptr;')
.Append(' return out;')
.Append('}')
)
return c
def _GenerateTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes the type into a base::Value.
E.g. for type "Foo" generates Foo::ToValue()
"""
if type_.property_type == PropertyType.OBJECT:
return self._GenerateObjectTypeToValue(cpp_namespace, type_)
elif type_.property_type == PropertyType.CHOICES:
return self._GenerateChoiceTypeToValue(cpp_namespace, type_)
else:
raise ValueError("Unsupported property type %s" % type_.type_)
def _GenerateObjectTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes an object-representing type
into a base::DictionaryValue.
"""
c = Code()
(c.Sblock('std::unique_ptr<base::DictionaryValue> %s::ToValue() const {' %
cpp_namespace)
.Append('std::unique_ptr<base::DictionaryValue> to_value_result('
'new base::DictionaryValue());')
.Append()
)
for prop in type_.properties.values():
prop_var = 'this->%s' % prop.unix_name
if prop.optional:
underlying_type = self._type_helper.FollowRef(prop.type_)
if underlying_type.property_type == PropertyType.ENUM:
# Optional enum values are generated with a NONE enum value,
# potentially from another namespace.
maybe_namespace = ''
if underlying_type.namespace != self._namespace:
maybe_namespace = '%s::' % underlying_type.namespace.unix_name
c.Sblock('if (%s != %s%s) {' %
(prop_var,
maybe_namespace,
self._type_helper.GetEnumNoneValue(prop.type_)))
else:
c.Sblock('if (%s.get()) {' % prop_var)
# ANY is a base::Value which is abstract and cannot be a direct member, so
# it will always be a pointer.
is_ptr = prop.optional or prop.type_.property_type == PropertyType.ANY
c.Cblock(self._CreateValueFromType(
'to_value_result->SetWithoutPathExpansion("%s", %%s);' % prop.name,
prop.name,
prop.type_,
prop_var,
is_ptr=is_ptr))
if prop.optional:
c.Eblock('}')
if type_.additional_properties is not None:
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('to_value_result->MergeDictionary(&additional_properties);')
else:
(c.Sblock('for (const auto& it : additional_properties) {')
.Cblock(self._CreateValueFromType(
'to_value_result->SetWithoutPathExpansion(it.first, %s);',
type_.additional_properties.name,
type_.additional_properties,
'it.second'))
.Eblock('}')
)
return (c.Append()
.Append('return to_value_result;')
.Eblock('}'))
def _GenerateChoiceTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes a choice-representing type
into a base::Value.
"""
c = Code()
c.Sblock('std::unique_ptr<base::Value> %s::ToValue() const {' %
cpp_namespace)
c.Append('std::unique_ptr<base::Value> result;')
for choice in type_.choices:
choice_var = 'as_%s' % choice.unix_name
# Enums cannot be wrapped with scoped_ptr, but the XXX_NONE enum value
# is equal to 0.
(c.Sblock('if (%s) {' % choice_var)
.Append('DCHECK(!result) << "Cannot set multiple choices for %s";' %
type_.unix_name).Cblock(self._CreateValueFromType(
'result = %s;', choice.name, choice, choice_var, True))
.Eblock('}'))
(c.Append('DCHECK(result) << "Must set at least one choice for %s";' %
type_.unix_name).Append('return result;').Eblock('}'))
return c
def _GenerateFunction(self, function):
"""Generates the definitions for function structs.
"""
c = Code()
# TODO(kalman): use function.unix_name not Classname.
function_namespace = cpp_util.Classname(function.name)
# Windows has a #define for SendMessage, so to avoid any issues, we need
# to not use the name.
if function_namespace == 'SendMessage':
function_namespace = 'PassMessage'
(c.Append('namespace %s {' % function_namespace)
.Append()
)
# Params::Populate function
if function.params:
c.Concat(self._GeneratePropertyFunctions('Params', function.params))
(c.Append('Params::Params() {}')
.Append('Params::~Params() {}')
.Append()
.Cblock(self._GenerateFunctionParamsCreate(function))
)
# Results::Create function
if function.callback:
c.Concat(self._GenerateCreateCallbackArguments('Results',
function.callback))
c.Append('} // namespace %s' % function_namespace)
return c
def _GenerateEvent(self, event):
# TODO(kalman): use event.unix_name not Classname.
c = Code()
event_namespace = cpp_util.Classname(event.name)
(c.Append('namespace %s {' % event_namespace)
.Append()
.Cblock(self._GenerateEventNameConstant(event))
.Cblock(self._GenerateCreateCallbackArguments(None, event))
.Append('} // namespace %s' % event_namespace)
)
return c
def _CreateValueFromType(self, code, prop_name, type_, var, is_ptr=False):
"""Creates a base::Value given a type. Generated code passes ownership
to caller via std::unique_ptr.
var: variable or variable*
E.g for std::string, generate new base::Value(var)
"""
c = Code()
underlying_type = self._type_helper.FollowRef(type_)
if underlying_type.property_type == PropertyType.ARRAY:
# Enums are treated specially because C++ templating thinks that they're
# ints, but really they're strings. So we create a vector of strings and
# populate it with the names of the enum in the array. The |ToString|
# function of the enum can be in another namespace when the enum is
# referenced. Templates can not be used here because C++ templating does
# not support passing a namespace as an argument.
item_type = self._type_helper.FollowRef(underlying_type.item_type)
if item_type.property_type == PropertyType.ENUM:
varname = ('*' if is_ptr else '') + '(%s)' % var
maybe_namespace = ''
if type_.item_type.property_type == PropertyType.REF:
maybe_namespace = '%s::' % item_type.namespace.unix_name
enum_list_var = '%s_list' % prop_name
# Scope the std::vector variable declaration inside braces.
(c.Sblock('{')
.Append('std::vector<std::string> %s;' % enum_list_var)
.Append('for (const auto& it : %s) {' % varname)
.Append('%s.push_back(%sToString(it));' % (enum_list_var,
maybe_namespace))
.Eblock('}'))
# Because the std::vector above is always created for both required and
# optional enum arrays, |is_ptr| is set to false and uses the
# std::vector to create the values.
(c.Append(code %
self._GenerateCreateValueFromType(type_, enum_list_var, False))
.Append('}'))
return c
c.Append(code % self._GenerateCreateValueFromType(type_, var, is_ptr))
return c
def _GenerateCreateValueFromType(self, type_, var, is_ptr):
"""Generates the statement to create a base::Value given a type.
type_: The type of the values being converted.
var: The name of the variable.
is_ptr: Whether |type_| is optional.
"""
underlying_type = self._type_helper.FollowRef(type_)
if (underlying_type.property_type == PropertyType.CHOICES or
underlying_type.property_type == PropertyType.OBJECT):
if is_ptr:
return '(%s)->ToValue()' % var
else:
return '(%s).ToValue()' % var
elif (underlying_type.property_type == PropertyType.ANY or
underlying_type.property_type == PropertyType.FUNCTION):
if is_ptr:
vardot = '(%s)->' % var
else:
vardot = '(%s).' % var
return '%sCreateDeepCopy()' % vardot
elif underlying_type.property_type == PropertyType.ENUM:
maybe_namespace = ''
if type_.property_type == PropertyType.REF:
maybe_namespace = '%s::' % underlying_type.namespace.unix_name
return 'std::make_unique<base::Value>(%sToString(%s))' % (
maybe_namespace, var)
elif underlying_type.property_type == PropertyType.BINARY:
if is_ptr:
var = '*%s' % var
return 'std::make_unique<base::Value>(%s)' % var
elif underlying_type.property_type == PropertyType.ARRAY:
return '%s' % self._util_cc_helper.CreateValueFromArray(
var,
is_ptr)
elif underlying_type.property_type.is_fundamental:
if is_ptr:
var = '*%s' % var
if underlying_type.property_type == PropertyType.STRING:
return 'std::make_unique<base::Value>(%s)' % var
else:
return 'std::make_unique<base::Value>(%s)' % var
else:
raise NotImplementedError('Conversion of %s to base::Value not '
'implemented' % repr(type_.type_))
def _GenerateParamsCheck(self, function, var):
"""Generates a check for the correct number of arguments when creating
Params.
"""
c = Code()
num_required = 0
for param in function.params:
if not param.optional:
num_required += 1
if num_required == len(function.params):
c.Sblock('if (%(var)s.GetSize() != %(total)d) {')
elif not num_required:
c.Sblock('if (%(var)s.GetSize() > %(total)d) {')
else:
c.Sblock('if (%(var)s.GetSize() < %(required)d'
' || %(var)s.GetSize() > %(total)d) {')
(c.Concat(self._GenerateError(
'"expected %%(total)d arguments, got " '
'+ base::NumberToString(%%(var)s.GetSize())'))
.Append('return nullptr;')
.Eblock('}')
.Substitute({
'var': var,
'required': num_required,
'total': len(function.params),
}))
return c
def _GenerateFunctionParamsCreate(self, function):
"""Generate function to create an instance of Params. The generated
function takes a base::ListValue of arguments.
E.g for function "Bar", generate Bar::Params::Create()
"""
c = Code()
(c.Append('// static')
.Sblock('std::unique_ptr<Params> Params::Create(%s) {' %
self._GenerateParams(['const base::ListValue& args']))
)
if self._generate_error_messages:
c.Append('DCHECK(error);')
(c.Concat(self._GenerateParamsCheck(function, 'args'))
.Append('std::unique_ptr<Params> params(new Params());')
)
for param in function.params:
c.Concat(self._InitializePropertyToDefault(param, 'params'))
for i, param in enumerate(function.params):
# Any failure will cause this function to return. If any argument is
# incorrect or missing, those following it are not processed. Note that
# for optional arguments, we allow missing arguments and proceed because
# there may be other arguments following it.
failure_value = 'std::unique_ptr<Params>()'
c.Append()
value_var = param.unix_name + '_value'
(c.Append('const base::Value* %(value_var)s = NULL;')
.Append('if (args.Get(%(i)s, &%(value_var)s) &&')
.Sblock(' !%(value_var)s->is_none()) {')
.Concat(self._GeneratePopulatePropertyFromValue(
param, value_var, 'params', failure_value))
.Eblock('}')
)
if not param.optional:
(c.Sblock('else {')
.Concat(self._GenerateError('"\'%%(key)s\' is required"'))
.Append('return %s;' % failure_value)
.Eblock('}'))
c.Substitute({'value_var': value_var, 'i': i, 'key': param.name})
(c.Append()
.Append('return params;')
.Eblock('}')
.Append()
)
return c
def _GeneratePopulatePropertyFromValue(self,
prop,
src_var,
dst_class_var,
failure_value):
"""Generates code to populate property |prop| of |dst_class_var| (a
pointer) from a Value*. See |_GeneratePopulateVariableFromValue| for
semantics.
"""
return self._GeneratePopulateVariableFromValue(prop.type_,
src_var,
'%s->%s' % (dst_class_var,
prop.unix_name),
failure_value,
is_ptr=prop.optional)
def _GeneratePopulateVariableFromValue(self,
type_,
src_var,
dst_var,
failure_value,
is_ptr=False):
"""Generates code to populate a variable |dst_var| of type |type_| from a
Value* at |src_var|. The Value* is assumed to be non-NULL. In the generated
code, if |dst_var| fails to be populated then Populate will return
|failure_value|.
"""
c = Code()
underlying_type = self._type_helper.FollowRef(type_)
if underlying_type.property_type.is_fundamental:
if is_ptr:
(c.Append('%(cpp_type)s temp;')
.Sblock('if (!%s) {' % cpp_util.GetAsFundamentalValue(
self._type_helper.FollowRef(type_), src_var, '&temp'))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected ' + '%s, got " + %s' % (
type_.name,
self._util_cc_helper.GetValueTypeString(
'%%(src_var)s', True)))))
c.Append('%(dst_var)s.reset();')
if not self._generate_error_messages:
c.Append('return %(failure_value)s;')
(c.Eblock('}')
.Append('else')
.Append(' %(dst_var)s.reset(new %(cpp_type)s(temp));')
)
else:
(c.Sblock('if (!%s) {' % cpp_util.GetAsFundamentalValue(
self._type_helper.FollowRef(type_),
src_var,
'&%s' % dst_var))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected ' + '%s, got " + %s' % (
type_.name,
self._util_cc_helper.GetValueTypeString(
'%%(src_var)s', True))))
.Append('return %(failure_value)s;')
.Eblock('}')
)
elif underlying_type.property_type == PropertyType.OBJECT:
if is_ptr:
(c.Append('const base::DictionaryValue* dictionary = NULL;')
.Sblock('if (!%(src_var)s->GetAsDictionary(&dictionary)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True))))
# If an optional property fails to populate, the population can still
# succeed with a warning. If no error messages are generated, this
# warning is not set and we fail out instead.
if not self._generate_error_messages:
c.Append('return %(failure_value)s;')
(c.Eblock('}')
.Sblock('else {')
.Append('std::unique_ptr<%(cpp_type)s> temp(new %(cpp_type)s());')
.Append('if (!%%(cpp_type)s::Populate(%s)) {' % self._GenerateArgs(
('*dictionary', 'temp.get()')))
.Append(' return %(failure_value)s;')
)
(c.Append('}')
.Append('else')
.Append(' %(dst_var)s = std::move(temp);')
.Eblock('}')
)
else:
(c.Append('const base::DictionaryValue* dictionary = NULL;')
.Sblock('if (!%(src_var)s->GetAsDictionary(&dictionary)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
.Append('return %(failure_value)s;')
.Eblock('}')
.Append('if (!%%(cpp_type)s::Populate(%s)) {' % self._GenerateArgs(
('*dictionary', '&%(dst_var)s')))
.Append(' return %(failure_value)s;')
.Append('}')
)
elif underlying_type.property_type == PropertyType.FUNCTION:
if is_ptr:
c.Append('%(dst_var)s.reset(new base::DictionaryValue());')
elif underlying_type.property_type == PropertyType.ANY:
c.Append('%(dst_var)s = %(src_var)s->CreateDeepCopy();')
elif underlying_type.property_type == PropertyType.ARRAY:
# util_cc_helper deals with optional and required arrays
(c.Append('const base::ListValue* list = NULL;')
.Sblock('if (!%(src_var)s->GetAsList(&list)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected list, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
)
if is_ptr and self._generate_error_messages:
c.Append('%(dst_var)s.reset();')
else:
c.Append('return %(failure_value)s;')
c.Eblock('}')
c.Sblock('else {')
item_type = self._type_helper.FollowRef(underlying_type.item_type)
if item_type.property_type == PropertyType.ENUM:
c.Concat(self._GenerateListValueToEnumArrayConversion(
item_type,
'list',
dst_var,
failure_value,
is_ptr=is_ptr))
else:
c.Sblock('if (!%s(%s)) {' % (
self._util_cc_helper.PopulateArrayFromListFunction(is_ptr),
self._GenerateArgs(('*list', '&%(dst_var)s'))))
c.Concat(self._GenerateError(
'"unable to populate array \'%%(parent_key)s\'"'))
if is_ptr and self._generate_error_messages:
c.Append('%(dst_var)s.reset();')
else:
c.Append('return %(failure_value)s;')
c.Eblock('}')
c.Eblock('}')
elif underlying_type.property_type == PropertyType.CHOICES:
if is_ptr:
(c.Append('std::unique_ptr<%(cpp_type)s> temp(new %(cpp_type)s());')
.Append('if (!%%(cpp_type)s::Populate(%s))' % self._GenerateArgs(
('*%(src_var)s', 'temp.get()')))
.Append(' return %(failure_value)s;')
.Append('%(dst_var)s = std::move(temp);')
)
else:
(c.Append('if (!%%(cpp_type)s::Populate(%s))' % self._GenerateArgs(
('*%(src_var)s', '&%(dst_var)s')))
.Append(' return %(failure_value)s;'))
elif underlying_type.property_type == PropertyType.ENUM:
c.Concat(self._GenerateStringToEnumConversion(underlying_type,
src_var,
dst_var,
failure_value))
elif underlying_type.property_type == PropertyType.BINARY:
(c.Sblock('if (!%(src_var)s->is_blob()) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected binary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
)
if not self._generate_error_messages:
c.Append('return %(failure_value)s;')
(c.Eblock('}')
.Sblock('else {')
)
if is_ptr:
c.Append('%(dst_var)s.reset(new std::vector<uint8_t>('
'%(src_var)s->GetBlob()));')
else:
c.Append('%(dst_var)s = %(src_var)s->GetBlob();')
c.Eblock('}')
else:
raise NotImplementedError(type_)
if c.IsEmpty():
return c
return Code().Sblock('{').Concat(c.Substitute({
'cpp_type': self._type_helper.GetCppType(type_),
'src_var': src_var,
'dst_var': dst_var,
'failure_value': failure_value,
'key': type_.name,
'parent_key': type_.parent.name,
})).Eblock('}')
def _GenerateListValueToEnumArrayConversion(self,
item_type,
src_var,
dst_var,
failure_value,
is_ptr=False):
"""Returns Code that converts a ListValue of string constants from
|src_var| into an array of enums of |type_| in |dst_var|. On failure,
returns |failure_value|.
"""
c = Code()
accessor = '.'
if is_ptr:
accessor = '->'
cpp_type = self._type_helper.GetCppType(item_type, is_in_container=True)
c.Append('%s.reset(new std::vector<%s>);' %
(dst_var, cpp_type))
(c.Sblock('for (const auto& it : *(%s)) {' % src_var)
.Append('%s tmp;' % self._type_helper.GetCppType(item_type))
.Concat(self._GenerateStringToEnumConversion(item_type,
'(it)',
'tmp',
failure_value,
is_ptr=False))
.Append('%s%spush_back(tmp);' % (dst_var, accessor))
.Eblock('}')
)
return c
def _GenerateStringToEnumConversion(self,
type_,
src_var,
dst_var,
failure_value,
is_ptr=True):
"""Returns Code that converts a string type in |src_var| to an enum with
type |type_| in |dst_var|. In the generated code, if |src_var| is not
a valid enum name then the function will return |failure_value|.
"""
if type_.property_type != PropertyType.ENUM:
raise TypeError(type_)
c = Code()
enum_as_string = '%s_as_string' % type_.unix_name
cpp_type_namespace = ''
if type_.namespace != self._namespace:
cpp_type_namespace = '%s::' % type_.namespace.unix_name
accessor = '->' if is_ptr else '.'
(c.Append('std::string %s;' % enum_as_string)
.Sblock('if (!%s%sGetAsString(&%s)) {' % (src_var,
accessor,
enum_as_string))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected string, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', is_ptr)))
.Append('return %s;' % failure_value)
.Eblock('}')
.Append('%s = %sParse%s(%s);' % (dst_var,
cpp_type_namespace,
cpp_util.Classname(type_.name),
enum_as_string))
.Sblock('if (%s == %s%s) {' % (dst_var,
cpp_type_namespace,
self._type_helper.GetEnumNoneValue(type_)))
.Concat(self._GenerateError(
'\"\'%%(key)s\': expected \\"' +
'\\" or \\"'.join(
enum_value.name
for enum_value in self._type_helper.FollowRef(type_).enum_values) +
'\\", got \\"" + %s + "\\""' % enum_as_string))
.Append('return %s;' % failure_value)
.Eblock('}')
.Substitute({'src_var': src_var, 'key': type_.name})
)
return c
def _GeneratePropertyFunctions(self, namespace, params):
"""Generates the member functions for a list of parameters.
"""
return self._GenerateTypes(namespace, (param.type_ for param in params))
def _GenerateTypes(self, namespace, types):
"""Generates the member functions for a list of types.
"""
c = Code()
for type_ in types:
c.Cblock(self._GenerateType(namespace, type_))
return c
def _GenerateEnumToString(self, cpp_namespace, type_):
"""Generates ToString() which gets the string representation of an enum.
"""
c = Code()
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
if cpp_namespace is not None:
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
c.Sblock('const char* %sToString(%s enum_param) {' %
(maybe_namespace, classname))
c.Sblock('switch (enum_param) {')
for enum_value in self._type_helper.FollowRef(type_).enum_values:
name = enum_value.name
if 'camel_case_enum_to_string' in self._namespace.compiler_options:
name = enum_value.CamelName()
(c.Append('case %s: ' % self._type_helper.GetEnumValue(type_, enum_value))
.Append(' return "%s";' % name))
(c.Append('case %s:' % self._type_helper.GetEnumNoneValue(type_))
.Append(' return "";')
.Eblock('}')
.Append('NOTREACHED();')
.Append('return "";')
.Eblock('}')
)
return c
def _GenerateEnumFromString(self, cpp_namespace, type_):
"""Generates FromClassNameString() which gets an enum from its string
representation.
"""
c = Code()
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
if cpp_namespace is not None:
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
c.Sblock('%s%s %sParse%s(const std::string& enum_string) {' %
(maybe_namespace, classname, maybe_namespace, classname))
for _, enum_value in enumerate(
self._type_helper.FollowRef(type_).enum_values):
# This is broken up into all ifs with no else ifs because we get
# "fatal error C1061: compiler limit : blocks nested too deeply"
# on Windows.
name = enum_value.name
if 'camel_case_enum_to_string' in self._namespace.compiler_options:
name = enum_value.CamelName()
(c.Append('if (enum_string == "%s")' % name)
.Append(' return %s;' %
self._type_helper.GetEnumValue(type_, enum_value)))
(c.Append('return %s;' % self._type_helper.GetEnumNoneValue(type_))
.Eblock('}')
)
return c
def _GenerateCreateCallbackArguments(self,
function_scope,
callback):
"""Generate all functions to create Value parameters for a callback.
E.g for function "Bar", generate Bar::Results::Create
E.g for event "Baz", generate Baz::Create
function_scope: the function scope path, e.g. Foo::Bar for the function
Foo::Bar::Baz(). May be None if there is no function scope.
callback: the Function object we are creating callback arguments for.
"""
c = Code()
params = callback.params
c.Concat(self._GeneratePropertyFunctions(function_scope, params))
(c.Sblock('std::unique_ptr<base::ListValue> %(function_scope)s'
'Create(%(declaration_list)s) {')
.Append('std::unique_ptr<base::ListValue> create_results('
'new base::ListValue());')
)
declaration_list = []
for param in params:
declaration_list.append(cpp_util.GetParameterDeclaration(
param, self._type_helper.GetCppType(param.type_)))
c.Cblock(self._CreateValueFromType('create_results->Append(%s);',
param.name,
param.type_,
param.unix_name))
c.Append('return create_results;')
c.Eblock('}')
c.Substitute({
'function_scope': ('%s::' % function_scope) if function_scope else '',
'declaration_list': ', '.join(declaration_list),
'param_names': ', '.join(param.unix_name for param in params)
})
return c
def _GenerateEventNameConstant(self, event):
"""Generates a constant string array for the event name.
"""
c = Code()
c.Append('const char kEventName[] = "%s.%s";' % (
self._namespace.name, event.name))
return c
def _InitializePropertyToDefault(self, prop, dst):
"""Initialize a model.Property to its default value inside an object.
E.g for optional enum "state", generate dst->state = STATE_NONE;
dst: Type*
"""
c = Code()
underlying_type = self._type_helper.FollowRef(prop.type_)
if (underlying_type.property_type == PropertyType.ENUM and
prop.optional):
namespace_prefix = ('%s::' % underlying_type.namespace.unix_name
if underlying_type.namespace != self._namespace
else '')
c.Append('%s->%s = %s%s;' % (
dst,
prop.unix_name,
namespace_prefix,
self._type_helper.GetEnumNoneValue(prop.type_)))
return c
def _GenerateError(self, body):
"""Generates an error message pertaining to population failure.
E.g 'expected bool, got int'
"""
c = Code()
if not self._generate_error_messages:
return c
(c.Append('if (error->length())')
.Append(' error->append(UTF8ToUTF16("; "));')
.Append('error->append(UTF8ToUTF16(%s));' % body))
return c
def _GenerateParams(self, params):
"""Builds the parameter list for a function, given an array of parameters.
"""
if self._generate_error_messages:
params = list(params) + ['base::string16* error']
return ', '.join(str(p) for p in params)
def _GenerateArgs(self, args):
"""Builds the argument list for a function, given an array of arguments.
"""
if self._generate_error_messages:
args = list(args) + ['error']
return ', '.join(str(a) for a in args)
|
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume-related Utilities and helpers."""
import math
from Crypto.Random import random
from oslo.config import cfg
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import strutils
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
from cinder import rpc
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def null_safe_str(s):
return str(s) if s else ''
def _usage_from_volume(context, volume_ref, **kw):
usage_info = dict(tenant_id=volume_ref['project_id'],
host=volume_ref['host'],
user_id=volume_ref['user_id'],
instance_uuid=volume_ref['instance_uuid'],
availability_zone=volume_ref['availability_zone'],
volume_id=volume_ref['id'],
volume_type=volume_ref['volume_type_id'],
display_name=volume_ref['display_name'],
launched_at=null_safe_str(volume_ref['launched_at']),
created_at=null_safe_str(volume_ref['created_at']),
status=volume_ref['status'],
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'],
replication_status=volume_ref['replication_status'],
replication_extended_status=
volume_ref['replication_extended_status'],
replication_driver_data=
volume_ref['replication_driver_data'],
)
usage_info.update(kw)
return usage_info
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
usage_info)
def _usage_from_snapshot(context, snapshot_ref, **extra_usage_info):
usage_info = {
'tenant_id': snapshot_ref['project_id'],
'user_id': snapshot_ref['user_id'],
'availability_zone': snapshot_ref.volume['availability_zone'],
'volume_id': snapshot_ref['volume_id'],
'volume_size': snapshot_ref['volume_size'],
'snapshot_id': snapshot_ref['id'],
'display_name': snapshot_ref['display_name'],
'created_at': str(snapshot_ref['created_at']),
'status': snapshot_ref['status'],
'deleted': null_safe_str(snapshot_ref['deleted'])
}
usage_info.update(extra_usage_info)
return usage_info
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_snapshot(context, snapshot, **extra_usage_info)
rpc.get_notifier('snapshot', host).info(context,
'snapshot.%s' % event_suffix,
usage_info)
def notify_about_replication_usage(context, volume, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context,
volume,
**extra_usage_info)
rpc.get_notifier('replication', host).info(context,
'replication.%s' % suffix,
usage_info)
def notify_about_replication_error(context, volume, suffix,
extra_error_info=None, host=None):
if not host:
host = CONF.host
if not extra_error_info:
extra_error_info = {}
usage_info = _usage_from_volume(context,
volume,
**extra_error_info)
rpc.get_notifier('replication', host).error(context,
'replication.%s' % suffix,
usage_info)
def _usage_from_consistencygroup(context, group_ref, **kw):
usage_info = dict(tenant_id=group_ref['project_id'],
user_id=group_ref['user_id'],
availability_zone=group_ref['availability_zone'],
consistencygroup_id=group_ref['id'],
name=group_ref['name'],
created_at=null_safe_str(group_ref['created_at']),
status=group_ref['status'])
usage_info.update(kw)
return usage_info
def notify_about_consistencygroup_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_consistencygroup(context,
group,
**extra_usage_info)
rpc.get_notifier("consistencygroup", host).info(
context,
'consistencygroup.%s' % event_suffix,
usage_info)
def _usage_from_cgsnapshot(context, cgsnapshot_ref, **kw):
usage_info = dict(
tenant_id=cgsnapshot_ref['project_id'],
user_id=cgsnapshot_ref['user_id'],
cgsnapshot_id=cgsnapshot_ref['id'],
name=cgsnapshot_ref['name'],
consistencygroup_id=cgsnapshot_ref['consistencygroup_id'],
created_at=null_safe_str(cgsnapshot_ref['created_at']),
status=cgsnapshot_ref['status'])
usage_info.update(kw)
return usage_info
def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_cgsnapshot(context,
cgsnapshot,
**extra_usage_info)
rpc.get_notifier("cgsnapshot", host).info(
context,
'cgsnapshot.%s' % event_suffix,
usage_info)
def setup_blkio_cgroup(srcpath, dstpath, bps_limit, execute=utils.execute):
if not bps_limit:
LOG.debug('Not using bps rate limiting on volume copy')
return None
try:
srcdev = utils.get_blkdev_major_minor(srcpath)
except exception.Error as e:
msg = (_('Failed to get device number for read throttling: %(error)s')
% {'error': e})
LOG.error(msg)
srcdev = None
try:
dstdev = utils.get_blkdev_major_minor(dstpath)
except exception.Error as e:
msg = (_('Failed to get device number for write throttling: %(error)s')
% {'error': e})
LOG.error(msg)
dstdev = None
if not srcdev and not dstdev:
return None
group_name = CONF.volume_copy_blkio_cgroup_name
LOG.debug('Setting rate limit to %s bps for blkio '
'group: %s' % (bps_limit, group_name))
try:
execute('cgcreate', '-g', 'blkio:%s' % group_name, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.warn(_('Failed to create blkio cgroup'))
return None
try:
if srcdev:
execute('cgset', '-r', 'blkio.throttle.read_bps_device=%s %d'
% (srcdev, bps_limit), group_name, run_as_root=True)
if dstdev:
execute('cgset', '-r', 'blkio.throttle.write_bps_device=%s %d'
% (dstdev, bps_limit), group_name, run_as_root=True)
except processutils.ProcessExecutionError:
msg = (_('Failed to setup blkio cgroup to throttle the devices: '
'\'%(src)s\',\'%(dst)s\'')
% {'src': srcdev, 'dst': dstdev})
LOG.warn(msg)
return None
return ['cgexec', '-g', 'blkio:%s' % group_name]
def _calculate_count(size_in_m, blocksize):
# Check if volume_dd_blocksize is valid
try:
# Rule out zero-sized/negative/float dd blocksize which
# cannot be caught by strutils
if blocksize.startswith(('-', '0')) or '.' in blocksize:
raise ValueError
bs = strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
msg = (_("Incorrect value error: %(blocksize)s, "
"it may indicate that \'volume_dd_blocksize\' "
"was configured incorrectly. Fall back to default.")
% {'blocksize': blocksize})
LOG.warn(msg)
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
bs = strutils.string_to_bytes('%sB' % blocksize)
count = math.ceil(size_in_m * units.Mi / bs)
return blocksize, int(count)
def copy_volume(srcstr, deststr, size_in_m, blocksize, sync=False,
execute=utils.execute, ionice=None):
# Use O_DIRECT to avoid thrashing the system buffer cache
extra_flags = []
# Check whether O_DIRECT is supported to iflag and oflag separately
for flag in ['iflag=direct', 'oflag=direct']:
try:
execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr,
flag, run_as_root=True)
extra_flags.append(flag)
except processutils.ProcessExecutionError:
pass
# If the volume is being unprovisioned then
# request the data is persisted before returning,
# so that it's not discarded from the cache.
if sync and not extra_flags:
extra_flags.append('conv=fdatasync')
blocksize, count = _calculate_count(size_in_m, blocksize)
cmd = ['dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % count, 'bs=%s' % blocksize]
cmd.extend(extra_flags)
if ionice is not None:
cmd = ['ionice', ionice] + cmd
cgcmd = setup_blkio_cgroup(srcstr, deststr, CONF.volume_copy_bps_limit)
if cgcmd:
cmd = cgcmd + cmd
# Perform the copy
start_time = timeutils.utcnow()
execute(*cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
mbps = (size_in_m / duration)
mesg = ("Volume copy details: src %(src)s, dest %(dest)s, "
"size %(sz).2f MB, duration %(duration).2f sec")
LOG.debug(mesg % {"src": srcstr,
"dest": deststr,
"sz": size_in_m,
"duration": duration})
mesg = _("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s")
LOG.info(mesg % {'size_in_m': size_in_m, 'mbps': mbps})
def clear_volume(volume_size, volume_path, volume_clear=None,
volume_clear_size=None, volume_clear_ionice=None):
"""Unprovision old volumes to prevent data leaking between users."""
if volume_clear is None:
volume_clear = CONF.volume_clear
if volume_clear_size is None:
volume_clear_size = CONF.volume_clear_size
if volume_clear_size == 0:
volume_clear_size = volume_size
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
LOG.info(_("Performing secure delete on volume: %s") % volume_path)
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
CONF.volume_dd_blocksize,
sync=True, execute=utils.execute,
ionice=volume_clear_ionice)
elif volume_clear == 'shred':
clear_cmd = ['shred', '-n3']
if volume_clear_size:
clear_cmd.append('-s%dMiB' % volume_clear_size)
else:
raise exception.InvalidConfigurationValue(
option='volume_clear',
value=volume_clear)
clear_cmd.append(volume_path)
start_time = timeutils.utcnow()
utils.execute(*clear_cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
LOG.info(_('Elapsed time for clear volume: %.2f sec') % duration)
def supports_thin_provisioning():
return brick_lvm.LVM.supports_thin_provisioning(
utils.get_root_helper())
def get_all_volumes(vg_name=None):
return brick_lvm.LVM.get_all_volumes(
utils.get_root_helper(),
vg_name)
def get_all_physical_volumes(vg_name=None):
return brick_lvm.LVM.get_all_physical_volumes(
utils.get_root_helper(),
vg_name)
def get_all_volume_groups(vg_name=None):
return brick_lvm.LVM.get_all_volume_groups(
utils.get_root_helper(),
vg_name)
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [random.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
random.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([random.choice(symbols) for _i in xrange(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
random.shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
DEFAULT_POOL_NAME = '_pool0'
def extract_host(host, level='backend', default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
:param default_pool_name: this flag specify what to do if level == 'pool'
and there is no 'pool' info encoded in host
string. default_pool_name=True will return
DEFAULT_POOL_NAME, otherwise we return None.
Default value of this parameter is False.
:return: expected level of information
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if level == 'host':
# make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
|
|
#!/usr/bin/env python
from sys import argv
from string import strip
from os import listdir,path
from optparse import OptionParser
from datetime import datetime
import tarfile
_author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Jesse Zaneveld", "Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Jesse Zaneveld"
__email__ = "[email protected]"
__status__ = "Development"
"""A parser for the KEGG 'ko' file
containing information on KEGG orthology groups and their associated pathways.
"""
def parse_ko_file(filepath,dir_prefix=None,debug = True):
"""Parse the NCBI KO file lines, and output several tab-delimited files
filepath - the full filepath to the input KO file from KEGG
dir_prefix - the directory to which tab-delimited output files will be saved.
debug - if set to True, pring debugging output to the screen
"""
lines = open(filepath,"U")
ko_gene_fname = 'ko_to_gene.tab'
ko_fname = 'ko.tab'
ko_pathway_fname = 'ko_to_pathway.tab'
pathway_fname = 'pathway.tab'
ko_cog_fname = 'ko_to_cog.tab'
ko_cazy_fname = 'ko_to_cazy.tab'
ko_go_fname = 'ko_to_go.tab'
fnames = [ko_gene_fname, ko_fname, ko_pathway_fname,\
pathway_fname, ko_cog_fname, ko_cazy_fname,\
ko_go_fname]
if dir_prefix:
fnames = [dir_prefix + '/' + f for f in fnames]
if debug:
for res_fp in fnames:
print "Outputting parsed info to: %s" %(res_fp)
ko_gene, ko, ko_pathway, pathway, ko_cog, ko_cazy, ko_go = \
[open(i, 'w') for i in fnames]
#figure out what fields we want (and get them), and get pathway data
fields = ['ENTRY', 'NAME', 'DEFINITION']
ko_to_pathway = {}
for rec in parse_ko(lines):
ko.write('\t'.join([rec.get(f,'') for f in fields]))
ko.write('\n')
entry = rec['ENTRY']
if 'GENES' not in rec:
continue #apparently, some records don't have genes...
genes = rec['GENES']
for species, gene_list in genes.items():
for g in gene_list:
ko_gene.write('%s\t%s:%s\n' % (entry, species.lower(), g))
if 'CLASS' not in rec:
continue #apparently they also lack classes...
ko_to_pathway[entry] = rec['CLASS']
dblinks = rec.get('DBLINKS', None)
if dblinks:
cogs = dblinks.get('COG', None)
cazy = dblinks.get('CAZy', None)
go = dblinks.get('GO', None)
if cogs:
for c in cogs:
ko_cog.write("%s\t%s\n" % (entry, c))
if go:
for g in go:
ko_go.write("%s\t%s\n" % (entry, g))
if cazy:
for c in cazy:
ko_cazy.write("%s\t%s\n" % (entry,c))
#postprocess the ko_to_pathway data to find out what the pathway terms
#are and to write them out into a join file
max_terms = 10
unique_recs = {} #will hold tuple(fields) -> unique_id
curr_uid = 0
for ko, classes in ko_to_pathway.items():
for (id_, fields) in classes:
if fields not in unique_recs:
unique_recs[fields] = curr_uid
fields_for_output = fields[:]
if len(fields_for_output) > max_terms:
fields_for_output = fields_for_output[:max_terms]
elif len(fields_for_output) < max_terms:
fields_for_output += \
('',)*(max_terms - len(fields_for_output))
pathway.write('\t'.join((str(curr_uid),str(id_)) +\
fields_for_output)+'\n')
curr_uid += 1
uid = unique_recs[fields]
ko_pathway.write(str(ko)+ '\t'+ str(uid) + '\n')
def make_tab_delimited_line_parser(columns_to_convert):
"""Generates a function that parses a tab-delimited line
columns_to_convert: a list of column indexes to convert into integers
by splitting on ':' and taking the second entry (e.g. to convert listings
like GO:0008150 to 0008150 or ncbi-gi:14589889 to 14589889)"""
def parse_tab_delimited_line(line):
"""Parse a tab-delimited line taking only the second item of cols %s""" %\
str(columns_to_convert)
fields = line.split("\t")
for index in columns_to_convert:
fields[index] = fields[index].split(":")[1]
return "\t".join(fields)
return parse_tab_delimited_line
def ko_default_parser(lines):
"""Handle default KEGG KO entry lines
lines -- default format of space separated lines.
Examples include the NAME and DEFINITION
entries
Strips out newlines and joins lines together."""
return ' '.join(map(strip, lines)).split(None, 1)[1]
def ko_first_field_parser(lines):
"""Handles KEGG KO entries where only the first field is of interest
For example, ENTRY fields like:
'ENTRY K01559 KO\n'
Strips out newlines and joins lines together for the first field only."""
return ' '.join(map(strip, lines)).split()[1]
def delete_comments(line):
"""Deletes comments in parentheses from a line."""
fields = line.split(')')
result = []
for f in fields:
if '(' in f:
result.append(f.split('(',1)[0])
else:
result.append(f)
return ''.join(result)
def ko_colon_fields(lines, without_comments=True):
"""Converts line to (key, [list of values])
lines -- colon fields such as DBLINKS or GENES
in the KEGG KO file.
Example:
' BXE: Bxe_B0037 Bxe_C0683 Bxe_C1002 Bxe_C1023\n'
"""
merged = ' '.join(map(strip, lines))
if without_comments:
merged = delete_comments(merged)
key, remainder = merged.split(':',1)
vals = remainder.split()
return key, vals
def ko_colon_delimited_parser(lines, without_comments=True):
"""For lines of the form LABEL: id: values.
Returns dict of id:values.
"""
first_line = lines[0]
without_first_field = first_line.split(None, 1)[1]
data_start = len(first_line) - len(without_first_field)
result = {}
curr = []
for line in lines:
line = line[data_start:]
if line[0] != ' ': #start of new block
if curr:
key, vals = ko_colon_fields(curr, without_comments)
result[key] = vals
curr = []
curr.append(line)
if curr:
key, vals = ko_colon_fields(curr, without_comments)
result[key] = vals
return result
def _is_new_kegg_rec_group(prev, curr):
"""Check for irregular record group terminators"""
return curr[0].isupper() and not prev.endswith(';') and \
not curr.startswith('CoA biosynthesis') and not prev.endswith(' and') and \
not prev.endswith('-') and not prev.endswith(' in') and not \
prev.endswith(' type') and not prev.endswith('Bindng') and not \
prev.endswith('Binding')
def group_by_end_char(lines, end_char = ']', \
is_new_rec=_is_new_kegg_rec_group):
"""Yields successive groups of lines that end with the specified char.
Note: also returns the last group of lines whether or not the end char
is present.
"""
curr_lines = []
prev_line = ''
for line in lines:
stripped = line.strip()
#unfortunately, not all records in kegg actually end with the
#terminator, so need to check for termination condition
if is_new_rec(prev_line, stripped):
if curr_lines:
yield curr_lines
curr_lines = []
#if the line ends with the character we're looking for, assume we've
#found a new record
if stripped.endswith(end_char):
yield curr_lines + [line]
curr_lines = []
else:
curr_lines.append(line)
prev_line = stripped
if curr_lines:
yield curr_lines
def class_lines_to_fields(lines):
"""Converts a list of lines in a single pathway within one KO class definition.
"""
rec = ' '.join(map(strip, lines))
#need to split off the class declaration if it is present
if rec.startswith('CLASS'):
rec = rec.split(None,1)[1]
#figure out if it has an id and process accordingly
if rec.endswith(']'):
rec, class_id = rec.rsplit('[', 1)
class_id = class_id[:-1]
else:
class_id = None
rec_fields = map(strip, rec.split(';'))
return class_id, tuple(rec_fields)
def ko_class_parser(lines, without_comments='ignored'):
"""For the CLASS declaration lines.
These take the form of multi-line semicolon-delimited fields (where
each field is a successive entry in the KEGG pathway hierarchy), ending
in a field of the form [PATH:ko00071].
Strategy:
- iterate over groups of lines that end in ] (each represents one pathway)
- for each line:
- split off and extract the pathway id
- split the rest of the terms on semicolon
- return a tuple of (pathway_id, [terms_in_order])
Don't consolidate the terms in this parser because each KO group has
its own class declaration so we would have to merge them for each class:
instead, merge at higher level.
"""
for group in group_by_end_char(lines):
yield class_lines_to_fields(group)
def parse_ko(lines):
"""Parses a KO record into fields."""
# Here we define records by their category
# to allow parsers to be reused on
# similar entries.
default_fields = ['NAME', 'DEFINITION']
colon_fields = ['DBLINKS', 'GENES']
first_field_only = ['ENTRY']
class_fields = ['CLASS']
for rec in ko_record_iterator(lines):
split_fields = ko_record_splitter(rec)
result = {}
for k, v in split_fields.items():
if k in default_fields:
result[k] = ko_default_parser(v)
elif k in colon_fields:
result[k] = ko_colon_delimited_parser(v)
elif k in first_field_only:
result[k] = ko_first_field_parser(v)
elif k in class_fields:
result[k] = list(ko_class_parser(v))
yield result
#parse_ko: lightweight standalone ko parser
def ko_record_iterator(lines):
"""Iterates over KO records, delimited by '///'"""
curr = []
for line in lines:
if line.startswith('///') and curr:
yield curr
curr = []
else:
curr.append(line)
if curr:
yield curr
def ko_record_splitter(lines):
"""Splits KO lines into dict of groups keyed by type."""
result = {}
curr_label = None
curr = []
i = 0
for line in lines:
i+= 1
if line[0] != ' ':
if curr_label is not None:
result[curr_label] = curr
fields = line.split(None, 1)
# Annoyingly, can have blank REFERENCE lines
# Lacking PMID, these still have auth/title info, however...
if len(fields) == 1:
curr_label = fields[0]
curr_line = ''
else:
curr_label, curr_line = fields
curr = [line]
else:
curr.append(line)
if curr:
result[curr_label] = curr
return result
if __name__ == '__main__':
from sys import argv
filename = argv[1]
out_dir = argv[2]
parse_ko_file(filename, \
dir_prefix = out_dir, \
debug = True)
|
|
#!/usr/bin/env python
# Copyright (c) 2005 Trent Mick
# License: MIT License
"""Test specially formated cmdln_*.py files
Each cmdln_*.py implemented a cmdln.Cmdln subclass and its module
docstring is an 'expect' script to test running it.
Usage:
Run all cmdln_*.py tests:
python test_cmdln.py
As part of a large test suite:
import test_cmdln
test_cmdln.suite() # returns a unittest.TestSuite
Test just specified cmdln_* files:
python test_cmdln.py <file-pattern>...
"""
import sys
import os
import unittest
import difflib
import pprint
import subprocess
import shutil
import glob
PY3 = (sys.version_info[0] >= 3)
#---- support stuff
def banner(text, ch='=', length=78):
"""Return a banner line centering the given text.
"text" is the text to show in the banner. None can be given to have
no text.
"ch" (optional, default '=') is the banner line character (can
also be a short string to repeat).
"length" (optional, default 78) is the length of banner to make.
Examples:
>>> banner("Peggy Sue")
'================================= Peggy Sue =================================='
>>> banner("Peggy Sue", ch='-', length=50)
'------------------- Peggy Sue --------------------'
>>> banner("Pretty pretty pretty pretty Peggy Sue", length=40)
'Pretty pretty pretty pretty Peggy Sue'
"""
if text is None:
return ch * length
elif len(text) + 2 + len(ch)*2 > length:
# Not enough space for even one line char (plus space) around text.
return text
else:
remain = length - (len(text) + 2)
prefix_len = remain // 2
suffix_len = remain - prefix_len
if len(ch) == 1:
prefix = ch * prefix_len
suffix = ch * suffix_len
else:
prefix = ch * (prefix_len/len(ch)) + ch[:prefix_len%len(ch)]
suffix = ch * (suffix_len/len(ch)) + ch[:suffix_len%len(ch)]
return prefix + ' ' + text + ' ' + suffix
def indented(text, indent=' '*4):
lines = text.splitlines(1)
return indent + indent.join(lines)
#---- Expect shorthand to expect translation
SHELL_PROMPT = "$ "
class SpawnBlock:
def __init__(self, spawnline):
self._parse(spawnline) # sets self.cmd and self.options
self.lines = []
def _parse(self, line):
self.options = {}
parts = line[len(SHELL_PROMPT):].split("#", 1)
if len(parts) == 1:
self.cmd = parts[0]
else:
self.cmd, optstr = parts
landmark = "expecttest:"
if optstr.startswith(landmark):
for opt in optstr[len(landmark):].split(","):
opt = opt.strip()
if '=' in opt:
name, value = opt.split('=')
if value.startswith('"'):
value = value[1:-1]
else:
name, value = opt, True
self.options[name] = value
def addline(self, line):
self.lines.append(line)
def generate(self):
"""Return executable "expect" code for this spawn-block."""
# If the python we are running with isn't "python", then update `cmd`
# accordingly (they all call Python as "python").
pythonExeName = os.path.basename(sys.executable)
expect = ["spawn " + self.cmd.replace("python", pythonExeName, 1)]
interactive = self.options.get("INTERACTIVE", False)
if interactive:
prompt = self.options["PROMPT"]
if sys.platform == "win32":
eol_expect = r"\n"
eol_expect_repr = r"\\n"
eof_expect = r"\032\r" # Ctrl-Z + return
else:
eol_expect = r"\r\n"
eol_expect_repr = r"\\r\\n"
eof_expect = r"\004" # Ctrl-D
for line in self.lines:
if interactive and line.startswith(prompt):
expect.append(r"""expect {
-i $spawn_id
-re "^%s" {}
default {
puts stderr {ERROR: expected "%s"}
puts stderr " got \"$expect_out(buffer)\""
exit 1
}
}""" % (prompt, prompt))
input = line[len(prompt):]
if input in ("^D", "^Z"):
#XXX Around the post-10.4 (Tiger) OS X release
# updates for 10.3 this 'expect' started failing.
# Adding the "(....)?" helps. I don't know enough
# Tcl to figure out exactly what those friggin'
# chars are.
expect += [r'send "%s"' % eof_expect,
r'expect -re "^(....)?%s$"' % eol_expect]
else:
expect += [r'send "%s\r"' % input,
r'expect -re "^%s%s"' % (input, eol_expect)]
else:
expected = tcl_escape(line)
if line == "<BLANKLINE>":
expected = r"\s*" # a "blank line" can have whitespace
expect.append(r"""expect {
-i $spawn_id
-re {^%s%s} {}
default {
puts stderr {ERROR: expected "%s%s"}
puts stderr " got \"$expect_out(buffer)\""
exit 1
}
}""" % (expected,
eol_expect,
expected.replace('\\', ''),
eol_expect_repr))
# Trap EOF for current process and make sure there isn't
# unexpected trailing output.
expect.append(r"""expect {
-i $spawn_id
eof {
} -re "^.+$" {
puts stderr "error: unexpected trailing output: '$expect_out(buffer)'\n"
exit 1
} timeout {
puts stderr {ERROR: timed out waiting for EOF from '%s'}
exit 1
}
}""" % self.cmd)
return '\n'.join(expect)
def tcl_escape(s):
"""Escape the given string as appropriate for using in a Tcl string
and regex.
"""
return s.replace("[", "\\[").replace("]", "\\]") \
.replace("$", "\\$") \
.replace("?", "\\?") \
.replace("(", "\\(").replace(")", "\\)")
def strip_prefix(line, prefix):
junk, content = line[:len(prefix)], line[len(prefix):].rstrip()
if junk.strip(): # line in block with short indentation
raise ValueError("too-short indentation on line: '%s'"
% line)
assert '\t' not in junk, \
"error: tab in expect-line prefix: '%s'" % line
return content
def parse_expect_content(content):
"""Generate parsed "expect" lines.
"Expect" blocks begin with a "spawn" line -- one that is prefixed
with a shell prompt -- and end with a blank line or the end of the
content. A "parsed" line is one with the indentation removed, if
any.
Generates 2-tuples
(<line-type>, <parsed-line>)
where <line-type> is "spawn" for spawn-lines or "other" for other
lines.
"""
if not content:
raise StopIteration
prefix = None
for line in content.splitlines(0):
if not line.strip():
prefix = None # end of a block
elif line.lstrip().startswith(SHELL_PROMPT):
if prefix is None: # start of a new block
idx = line.index(SHELL_PROMPT)
prefix, content = line[:idx], line[idx:].rstrip()
assert '\t' not in prefix, \
"error: tab in expect-line prefix: '%s'" % line
else:
content = strip_prefix(line, prefix)
yield "spawn", content
elif prefix is not None:
yield "other", strip_prefix(line, prefix)
def generate_expect(content):
# Break into "spawn"-block. A new spawn block starts with what
# will become an expect "spawn" command. Specifically a block
# that begins with the '$ ' shell prompt.
blocks = []
block = None
for type, line in parse_expect_content(content):
assert type in ("spawn", "other"), \
"unexpected spawn line type: %r" % type
if type == "spawn":
block = SpawnBlock(line)
blocks.append(block)
else:
assert block is not None, \
"'other' spawn line without leading 'spawn' line: %r" % line
block.addline(line)
expect = ["#!/usr/bin/env tclsh",
"",
"package require Expect",
"set timeout 3",
"set send_slow {10 .001}",
""]
for block in blocks:
expect.append(block.generate())
return '\n'.join(expect) + '\n'
#----- test cases
class CmdlnTestCase(unittest.TestCase):
pass
def _testOneCmdln(self, modname, fname):
_debug = False # Set to true to dump status info for each test run.
mod = __import__(modname)
doc = mod.__doc__
if not PY3 and isinstance(doc, unicode):
doc = doc.encode("utf-8")
expect = generate_expect(doc)
if False:
tmpfname = ".%s.exp.tmp" % modname
open(tmpfname, 'w').write(expect)
retval = os.system("tclsh "+tmpfname)
if hasattr(os, "WEXITSTATUS"):
retval = os.WEXITSTATUS(retval)
stdout = stderr = ""
else:
if _debug:
tmpfname = ".%s.exp.tmp" % modname
open(tmpfname, 'w').write(expect)
p = subprocess.Popen(["tclsh"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = p.communicate(expect)
retval = p.returncode
self.failIf(retval, """\
'%s' did not behave as expected:
%s
%s
%s
%s
%s
%s
%s""" % (fname,
indented(banner("expect shorthand", length=72)),
indented(doc or ""),
indented(banner("stdout", length=72)),
indented(stdout),
indented(banner("stderr", length=72)),
indented(stderr),
indented(banner(None, length=72))))
if __name__ == "__main__" and sys.argv[1:]:
testfiles = []
for arg in sys.argv[1:]:
testfiles += glob.glob(arg)
else:
testfiles = glob.glob("cmdln_*.py")
for fname in testfiles:
if not fname.endswith('.py'):
continue
base = os.path.basename(os.path.splitext(fname)[0])
testfunc = lambda self, base=base, fname=fname: _testOneCmdln(self, base, fname)
if base.startswith("cmdln_"):
base = base[len("cmdln_"):]
testname = 'test_'+base
setattr(CmdlnTestCase, testname, testfunc)
#---- mainline
def suite():
"""Return a unittest.TestSuite to be used by test.py."""
return unittest.makeSuite(CmdlnTestCase)
if __name__ == "__main__":
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
s = suite()
result = runner.run(s)
|
|
"""SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwcc.py 2013/03/03 09:48:35 garyo"
import os
import os.path
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while True:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion(object):
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
import time
import random
import os
import string
from flask import Flask
from flask import jsonify
from flask import request as flask_request
from flask import render_template
from flask import abort
from flask import Response
from flask import make_response
from core.alert import write_to_api_console
from core.alert import messages
from core._die import __die_success
from api.api_core import __structure
from api.api_core import __get_value
from api.api_core import root_dir
from api.api_core import get_file
from api.api_core import __mime_types
from api.api_core import __scan_methods
from api.api_core import __profiles
from api.api_core import __graphs
from api.api_core import __languages
from core.load_modules import load_all_method_args
from core.config import _core_config
from core.config_builder import _core_default_config
from core.config_builder import _builder
from api.api_core import __remove_non_api_keys
from api.api_core import __rules
from api.api_core import __api_key_check
from database.db import __select_results
from database.db import __get_result
from database.db import __last_host_logs
from database.db import __logs_to_report_json
from database.db import __search_logs
from database.db import __logs_to_report_html
from api.__start_scan import __scan
from core._time import now
template_dir = os.path.join(os.path.join(
os.path.dirname(os.path.dirname(__file__)), "web"), "static")
app = Flask(__name__, template_folder=template_dir)
app.config.from_object(__name__)
def __language(app=app):
"""
find the language in config
Args:
app: flask app
Returns:
the language in string
"""
return app.config["OWASP_NETTACKER_CONFIG"]["language"]
@app.errorhandler(400)
def error_400(error):
"""
handle the 400 HTTP error
Args:
error: the flask error
Returns:
400 JSON error
"""
return jsonify(__structure(status="error", msg=error.description)), 400
@app.errorhandler(401)
def error_401(error):
"""
handle the 401 HTTP error
Args:
error: the flask error
Returns:
401 JSON error
"""
return jsonify(__structure(status="error", msg=error.description)), 401
@app.errorhandler(403)
def error_403(error):
"""
handle the 403 HTTP error
Args:
error: the flask error
Returns:
403 JSON error
"""
return jsonify(__structure(status="error", msg=error.description)), 403
@app.errorhandler(404)
def error_404(error):
"""
handle the 404 HTTP error
Args:
error: the flask error
Returns:
404 JSON error
"""
return jsonify(__structure(status="error",
msg=messages(app.config["OWASP_NETTACKER_CONFIG"]["language"], "not_found"))), 404
@app.before_request
def limit_remote_addr():
"""
check if IP filtering applied and API address is in whitelist
Returns:
None if it's in whitelist otherwise abort(403)
"""
# IP Limitation
if app.config["OWASP_NETTACKER_CONFIG"]["api_client_white_list"]:
if flask_request.remote_addr not in app.config["OWASP_NETTACKER_CONFIG"]["api_client_white_list_ips"]:
abort(403, messages(__language(), "unauthorized_IP"))
return
@app.after_request
def access_log(response):
"""
if access log enabled, its writing the logs
Args:
response: the flask response
Returns:
the flask response
"""
if app.config["OWASP_NETTACKER_CONFIG"]["api_access_log"]:
r_log = open(app.config["OWASP_NETTACKER_CONFIG"][
"api_access_log_filename"], "ab")
# if you need to log POST data
# r_log.write(
# "{0} [{1}] {2} \"{3} {4}\" {5} {6} {7}\r\n".format(flask_request.remote_addr, now(), flask_request.host,
# flask_request.method, flask_request.full_path,
# flask_request.user_agent, response.status_code,
# json.dumps(flask_request.form)))
r_log.write("{0} [{1}] {2} \"{3} {4}\" {5} {6}\r\n".format(flask_request.remote_addr, now(), flask_request.host,
flask_request.method, flask_request.full_path,
flask_request.user_agent, response.status_code))
r_log.close()
return response
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def get_statics(path):
"""
getting static files and return content mime types
Args:
path: path and filename
Returns:
file content and content type if file found otherwise abort(404)
"""
static_types = __mime_types()
return Response(get_file(os.path.join(root_dir(), path)),
mimetype=static_types.get(os.path.splitext(path)[1], "text/html"))
@app.route("/", methods=["GET", "POST"])
def index():
"""
index page for WebUI
Returns:
rendered HTML page
"""
filename = _builder(_core_config(), _core_default_config())["log_in_file"]
return render_template("index.html", scan_method=__scan_methods(), profile=__profiles(),
graphs=__graphs(), languages=__languages(), filename=filename,
method_args_list=load_all_method_args(__language(), API=True))
@app.route("/new/scan", methods=["GET", "POST"])
def new_scan():
"""
new scan through the API
Returns:
a JSON message with scan details if success otherwise a JSON error
"""
_start_scan_config = {}
__api_key_check(app, flask_request, __language())
for key in _core_default_config():
if __get_value(flask_request, key) is not None:
_start_scan_config[key] = __get_value(flask_request, key)
_start_scan_config["backup_ports"] = __get_value(flask_request, "ports")
_start_scan_config = __rules(__remove_non_api_keys(_builder(_start_scan_config,
_builder(_core_config(), _core_default_config()))),
_core_default_config(), __language())
p = multiprocessing.Process(target=__scan, args=[_start_scan_config])
p.start()
# Sometimes method_args is too big!
_start_scan_config["methods_args"] = {
"as_user_set": "set_successfully"
}
return jsonify(_start_scan_config), 200
@app.route("/session/check", methods=["GET"])
def __session_check():
"""
check the session if it's valid
Returns:
a JSON message if it's valid otherwise abort(401)
"""
__api_key_check(app, flask_request, __language())
return jsonify(__structure(status="ok", msg=messages(__language(), "browser_session_valid"))), 200
@app.route("/session/set", methods=["GET"])
def __session_set():
"""
set session on the browser
Returns:
200 HTTP response if session is valid and a set-cookie in the response if success otherwise abort(403)
"""
__api_key_check(app, flask_request, __language())
res = make_response(
jsonify(__structure(status="ok", msg=messages(__language(), "browser_session_valid"))))
res.set_cookie("key", value=app.config[
"OWASP_NETTACKER_CONFIG"]["api_access_key"])
return res
@app.route("/session/kill", methods=["GET"])
def __session_kill():
"""
unset session on the browser
Returns:
a 200 HTTP response with set-cookie to "expired" to unset the cookie on the browser
"""
res = make_response(
jsonify(__structure(status="ok", msg=messages(__language(), "browser_session_killed"))))
res.set_cookie("key", "", expires=0)
return res
@app.route("/results/get_list", methods=["GET"])
def __get_results():
"""
get list of scan's results through the API
Returns:
an array of JSON scan's results if success otherwise abort(403)
"""
__api_key_check(app, flask_request, __language())
try:
page = int(__get_value(flask_request, "page"))
except:
page = 1
return jsonify(__select_results(__language(), page)), 200
@app.route("/results/get", methods=["GET"])
def __get_result_content():
"""
get a result HTML/TEXT/JSON content
Returns:
content of the scan result
"""
__api_key_check(app, flask_request, __language())
try:
id = int(__get_value(flask_request, "id"))
except:
return jsonify(__structure(status="error", msg="your scan id is not valid!")), 400
return __get_result(__language(), id)
@app.route("/logs/get_list", methods=["GET"])
def __get_last_host_logs():
"""
get list of logs through the API
Returns:
an array of JSON logs if success otherwise abort(403)
"""
__api_key_check(app, flask_request, __language())
try:
page = int(__get_value(flask_request, "page"))
except:
page = 1
return jsonify(__last_host_logs(__language(), page)), 200
@app.route("/logs/get_html", methods=["GET"])
def __get_logs_html():
"""
get host's logs through the API in HTML type
Returns:
HTML report
"""
__api_key_check(app, flask_request, __language())
try:
host = __get_value(flask_request, "host")
except:
host = ""
return make_response(__logs_to_report_html(host, __language()))
@app.route("/logs/get_json", methods=["GET"])
def __get_logs():
"""
get host's logs through the API in JSON type
Returns:
an array with JSON events
"""
__api_key_check(app, flask_request, __language())
try:
host = __get_value(flask_request, "host")
except:
host = ""
return jsonify(__logs_to_report_json(host, __language())), 200
@app.route("/logs/search", methods=["GET"])
def ___go_for_search_logs():
"""
search in all events
Returns:
an array with JSON events
"""
__api_key_check(app, flask_request, __language())
try:
page = int(__get_value(flask_request, "page"))
except:
page = 1
try:
query = __get_value(flask_request, "q")
except:
query = ""
return jsonify(__search_logs(__language(), page, query)), 200
def __process_it(api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename, language):
"""
a function to run flask in a subprocess to make kill signal in a better way!
Args:
api_host: host/IP to bind address
api_port: bind port
api_debug_mode: debug mode flag
api_access_key: API access key
api_client_white_list: clients while list flag
api_client_white_list_ips: clients white list IPs
api_access_log: access log flag
api_access_log_filename: access log filename
language: language
"""
app.config["OWASP_NETTACKER_CONFIG"] = {
"api_access_key": api_access_key,
"api_client_white_list": api_client_white_list,
"api_client_white_list_ips": api_client_white_list_ips,
"api_access_log": api_access_log,
"api_access_log_filename": api_access_log_filename,
"language": language
}
app.run(host=api_host, port=api_port, debug=api_debug_mode, threaded=True)
def _start_api(api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename, language):
"""
entry point to run the API through the flask
Args:
api_host: host/IP to bind address
api_port: bind port
api_debug_mode: debug mode
api_access_key: API access key
api_client_white_list: clients while list flag
api_client_white_list_ips: clients white list IPs
api_access_log: access log flag
api_access_log_filename: access log filename
language: language
"""
# Starting the API
write_to_api_console(messages(language, "API_key").format(api_access_key))
p = multiprocessing.Process(target=__process_it,
args=(api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename, language))
p.start()
# Sometimes it's take much time to terminate flask with CTRL+C
# So It's better to use KeyboardInterrupt to terminate!
while 1:
try:
exitflag = True
if len(multiprocessing.active_children()) is not 0:
exitflag = False
time.sleep(0.3)
if exitflag:
break
except KeyboardInterrupt:
for process in multiprocessing.active_children():
process.terminate()
break
__die_success()
|
|
import re
from dataclasses import dataclass, fields
from typing import Callable, Optional, Union, cast
from mitmproxy.coretypes import serializable
from mitmproxy.net.http import encoding
from mitmproxy.net.http.headers import Headers, assemble_content_type, parse_content_type
from mitmproxy.utils import strutils, typecheck
@dataclass
class MessageData(serializable.Serializable):
http_version: bytes
headers: Headers
content: Optional[bytes]
trailers: Optional[Headers]
timestamp_start: float
timestamp_end: Optional[float]
# noinspection PyUnreachableCode
if __debug__:
def __post_init__(self):
for field in fields(self):
val = getattr(self, field.name)
typecheck.check_option_type(field.name, val, field.type)
def set_state(self, state):
for k, v in state.items():
if k in ("headers", "trailers") and v is not None:
v = Headers.from_state(v)
setattr(self, k, v)
def get_state(self):
state = vars(self).copy()
state["headers"] = state["headers"].get_state()
if state["trailers"] is not None:
state["trailers"] = state["trailers"].get_state()
return state
@classmethod
def from_state(cls, state):
state["headers"] = Headers.from_state(state["headers"])
if state["trailers"] is not None:
state["trailers"] = Headers.from_state(state["trailers"])
return cls(**state)
class Message(serializable.Serializable):
@classmethod
def from_state(cls, state):
return cls(**state)
def get_state(self):
return self.data.get_state()
def set_state(self, state):
self.data.set_state(state)
data: MessageData
stream: Union[Callable, bool] = False
@property
def http_version(self) -> str:
"""
Version string, e.g. "HTTP/1.1"
"""
return self.data.http_version.decode("utf-8", "surrogateescape")
@http_version.setter
def http_version(self, http_version: Union[str, bytes]) -> None:
self.data.http_version = strutils.always_bytes(http_version, "utf-8", "surrogateescape")
@property
def is_http2(self) -> bool:
return self.data.http_version == b"HTTP/2.0"
@property
def headers(self) -> Headers:
"""
The HTTP headers.
"""
return self.data.headers
@headers.setter
def headers(self, h: Headers) -> None:
self.data.headers = h
@property
def trailers(self) -> Optional[Headers]:
"""
The HTTP trailers.
"""
return self.data.trailers
@trailers.setter
def trailers(self, h: Optional[Headers]) -> None:
self.data.trailers = h
@property
def raw_content(self) -> Optional[bytes]:
"""
The raw (potentially compressed) HTTP message body as bytes.
See also: :py:attr:`content`, :py:class:`text`
"""
return self.data.content
@raw_content.setter
def raw_content(self, content: Optional[bytes]) -> None:
self.data.content = content
def get_content(self, strict: bool = True) -> Optional[bytes]:
"""
The uncompressed HTTP message body as bytes.
Raises:
ValueError, when the HTTP content-encoding is invalid and strict is True.
See also: :py:class:`raw_content`, :py:attr:`text`
"""
if self.raw_content is None:
return None
ce = self.headers.get("content-encoding")
if ce:
try:
content = encoding.decode(self.raw_content, ce)
# A client may illegally specify a byte -> str encoding here (e.g. utf8)
if isinstance(content, str):
raise ValueError("Invalid Content-Encoding: {}".format(ce))
return content
except ValueError:
if strict:
raise
return self.raw_content
else:
return self.raw_content
def set_content(self, value: Optional[bytes]) -> None:
if value is None:
self.raw_content = None
return
if not isinstance(value, bytes):
raise TypeError(
f"Message content must be bytes, not {type(value).__name__}. "
"Please use .text if you want to assign a str."
)
ce = self.headers.get("content-encoding")
try:
self.raw_content = encoding.encode(value, ce or "identity")
except ValueError:
# So we have an invalid content-encoding?
# Let's remove it!
del self.headers["content-encoding"]
self.raw_content = value
self.headers["content-length"] = str(len(self.raw_content))
content = property(get_content, set_content)
@property
def timestamp_start(self) -> float:
"""
First byte timestamp
"""
return self.data.timestamp_start
@timestamp_start.setter
def timestamp_start(self, timestamp_start: float) -> None:
self.data.timestamp_start = timestamp_start
@property
def timestamp_end(self) -> Optional[float]:
"""
Last byte timestamp
"""
return self.data.timestamp_end
@timestamp_end.setter
def timestamp_end(self, timestamp_end: Optional[float]):
self.data.timestamp_end = timestamp_end
def _get_content_type_charset(self) -> Optional[str]:
ct = parse_content_type(self.headers.get("content-type", ""))
if ct:
return ct[2].get("charset")
return None
def _guess_encoding(self, content: bytes = b"") -> str:
enc = self._get_content_type_charset()
if not enc:
if "json" in self.headers.get("content-type", ""):
enc = "utf8"
if not enc:
meta_charset = re.search(rb"""<meta[^>]+charset=['"]?([^'">]+)""", content)
if meta_charset:
enc = meta_charset.group(1).decode("ascii", "ignore")
if not enc:
enc = "latin-1"
# Use GB 18030 as the superset of GB2312 and GBK to fix common encoding problems on Chinese websites.
if enc.lower() in ("gb2312", "gbk"):
enc = "gb18030"
return enc
def get_text(self, strict: bool = True) -> Optional[str]:
"""
The uncompressed and decoded HTTP message body as text.
Raises:
ValueError, when either content-encoding or charset is invalid and strict is True.
See also: :py:attr:`content`, :py:class:`raw_content`
"""
content = self.get_content(strict)
if content is None:
return None
enc = self._guess_encoding(content)
try:
return cast(str, encoding.decode(content, enc))
except ValueError:
if strict:
raise
return content.decode("utf8", "surrogateescape")
def set_text(self, text: Optional[str]) -> None:
if text is None:
self.content = None
return
enc = self._guess_encoding()
try:
self.content = encoding.encode(text, enc)
except ValueError:
# Fall back to UTF-8 and update the content-type header.
ct = parse_content_type(self.headers.get("content-type", "")) or ("text", "plain", {})
ct[2]["charset"] = "utf-8"
self.headers["content-type"] = assemble_content_type(*ct)
enc = "utf8"
self.content = text.encode(enc, "surrogateescape")
text = property(get_text, set_text)
def decode(self, strict: bool = True) -> None:
"""
Decodes body based on the current Content-Encoding header, then
removes the header. If there is no Content-Encoding header, no
action is taken.
Raises:
ValueError, when the content-encoding is invalid and strict is True.
"""
decoded = self.get_content(strict)
self.headers.pop("content-encoding", None)
self.content = decoded
def encode(self, e: str) -> None:
"""
Encodes body with the encoding e, where e is "gzip", "deflate", "identity", "br", or "zstd".
Any existing content-encodings are overwritten,
the content is not decoded beforehand.
Raises:
ValueError, when the specified content-encoding is invalid.
"""
self.headers["content-encoding"] = e
self.content = self.raw_content
if "content-encoding" not in self.headers:
raise ValueError("Invalid content encoding {}".format(repr(e)))
|
|
from unittest import TestCase
from beautiful_date import Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sept, Oct, Dec, hours, days, Nov
from gcsa.attachment import Attachment
from gcsa.attendee import Attendee, ResponseStatus
from gcsa.conference import ConferenceSolution, EntryPoint, SolutionType, ConferenceSolutionCreateRequest
from gcsa.event import Event, Visibility
from gcsa.recurrence import Recurrence, DAILY, SU, SA, MONDAY, WEEKLY
from gcsa.reminders import PopupReminder, EmailReminder
from gcsa.serializers.event_serializer import EventSerializer
from gcsa.util.date_time_util import insure_localisation
TEST_TIMEZONE = 'Pacific/Fiji'
class TestEvent(TestCase):
def test_init(self):
event = Event(
'Breakfast',
event_id='123',
start=(1 / Feb / 2019)[9:00],
end=(31 / Dec / 2019)[23:59],
_created=insure_localisation((20 / Nov / 2020)[16:19], TEST_TIMEZONE),
_updated=insure_localisation((25 / Nov / 2020)[16:19], TEST_TIMEZONE),
timezone=TEST_TIMEZONE,
description='Everyday breakfast',
location='Home',
guests_can_invite_others=False,
guests_can_modify=True,
guests_can_see_other_guests=False,
recurrence=[
Recurrence.rule(freq=DAILY),
Recurrence.exclude_rule(by_week_day=[SU, SA]),
Recurrence.exclude_dates([
19 / Apr / 2019,
22 / Apr / 2019,
12 / May / 2019
])
],
visibility=Visibility.PRIVATE,
minutes_before_popup_reminder=15
)
self.assertEqual(event.summary, 'Breakfast')
self.assertEqual(event.id, '123')
self.assertEqual(event.start, insure_localisation((1 / Feb / 2019)[9:00], TEST_TIMEZONE))
self.assertEqual(event.end, insure_localisation((31 / Dec / 2019)[23:59], TEST_TIMEZONE))
self.assertEqual(event.created, insure_localisation((20 / Nov / 2020)[16:19], TEST_TIMEZONE))
self.assertEqual(event.updated, insure_localisation((25 / Nov / 2020)[16:19], TEST_TIMEZONE))
self.assertEqual(event.description, 'Everyday breakfast')
self.assertEqual(event.location, 'Home')
self.assertEqual(len(event.recurrence), 3)
self.assertEqual(event.visibility, Visibility.PRIVATE)
self.assertIsInstance(event.reminders[0], PopupReminder)
self.assertEqual(event.reminders[0].minutes_before_start, 15)
self.assertFalse(event.guests_can_invite_others)
self.assertTrue(event.guests_can_modify)
self.assertFalse(event.guests_can_see_other_guests)
def test_init_no_end(self):
start = 1 / Jun / 2019
event = Event('Good day', start, timezone=TEST_TIMEZONE)
self.assertEqual(event.end, start + 1 * days)
start = insure_localisation((1 / Jul / 2019)[12:00], TEST_TIMEZONE)
event = Event('Lunch', start, timezone=TEST_TIMEZONE)
self.assertEqual(event.end, start + 1 * hours)
def test_init_no_start_or_end(self):
event = Event('Good day', start=None, timezone=TEST_TIMEZONE)
self.assertIsNone(event.start)
self.assertIsNone(event.end)
def test_init_different_date_types(self):
with self.assertRaises(TypeError):
Event('Good day', start=(1 / Jan / 2019), end=(2 / Jan / 2019)[5:55], timezone=TEST_TIMEZONE)
def test_add_attachment(self):
e = Event('Good day', start=(1 / Aug / 2019), timezone=TEST_TIMEZONE)
e.add_attachment('https://file.url', 'My file', "application/vnd.google-apps.document")
self.assertIsInstance(e.attachments[0], Attachment)
self.assertEqual(e.attachments[0].title, 'My file')
def test_add_reminders(self):
e = Event('Good day', start=(28 / Mar / 2019), timezone=TEST_TIMEZONE)
self.assertEqual(len(e.reminders), 0)
e.add_email_reminder(35)
self.assertEqual(len(e.reminders), 1)
self.assertIsInstance(e.reminders[0], EmailReminder)
self.assertEqual(e.reminders[0].minutes_before_start, 35)
e.add_popup_reminder(41)
self.assertEqual(len(e.reminders), 2)
self.assertIsInstance(e.reminders[1], PopupReminder)
self.assertEqual(e.reminders[1].minutes_before_start, 41)
def test_add_attendees(self):
e = Event('Good day',
start=(17 / Jul / 2020),
timezone=TEST_TIMEZONE,
attendees=[
Attendee(email="[email protected]"),
"[email protected]",
])
self.assertEqual(len(e.attendees), 2)
e.add_attendee(Attendee("[email protected]"))
e.add_attendee(Attendee(email="[email protected]"))
self.assertEqual(len(e.attendees), 4)
self.assertEqual(e.attendees[0].email, "[email protected]")
self.assertEqual(e.attendees[1].email, "[email protected]")
self.assertEqual(e.attendees[2].email, "[email protected]")
self.assertEqual(e.attendees[3].email, "[email protected]")
def test_reminders_checks(self):
with self.assertRaises(ValueError):
Event('Too many reminders',
start=20 / Jul / 2020,
reminders=[EmailReminder()] * 6)
with self.assertRaises(ValueError):
Event('Default and overrides together',
start=20 / Jul / 2020,
reminders=EmailReminder(),
default_reminders=True)
e = Event('Almost too many reminders',
start=20 / Jul / 2020,
reminders=[EmailReminder()] * 5)
with self.assertRaises(ValueError):
e.add_email_reminder()
def test_repr_str(self):
e = Event('Good event',
start=20 / Jul / 2020)
self.assertEqual(str(e), '2020-07-20 - Good event')
self.assertEqual(repr(e), '<Event 2020-07-20 - Good event>')
def test_equal(self):
dp = {
'summary': 'Breakfast',
'start': (1 / Feb / 2019)[9:00]
}
attachments_dp = {
"file_url": 'https://file.com',
"mime_type": "application/vnd.google-apps.map"
}
event1 = Event(
**dp,
event_id='123',
end=(31 / Dec / 2019)[23:59],
timezone=TEST_TIMEZONE,
description='Everyday breakfast',
location='Home',
recurrence=Recurrence.rule(freq=DAILY),
color_id=1,
visibility=Visibility.PRIVATE,
attendees='[email protected]',
attachments=Attachment(title='My doc', **attachments_dp),
minutes_before_popup_reminder=15,
other={"key": "value"}
)
self.assertEqual(event1, event1)
self.assertNotEqual(Event(**dp), Event('Breakfast', start=(22 / Jun / 2020)[22:22]))
self.assertNotEqual(Event(**dp, event_id='123'),
Event(**dp, event_id='abc'))
self.assertNotEqual(Event(**dp, description='Desc1'),
Event(**dp, description='Desc2'))
self.assertNotEqual(Event(**dp, location='Home'),
Event(**dp, location='Work'))
self.assertNotEqual(Event(**dp, recurrence=Recurrence.rule(freq=DAILY)),
Event(**dp, recurrence=Recurrence.rule(freq=WEEKLY)))
self.assertNotEqual(Event(**dp, color_id=1),
Event(**dp, color_id=2))
self.assertNotEqual(Event(**dp, visibility=Visibility.PRIVATE),
Event(**dp, visibility=Visibility.PUBLIC))
self.assertNotEqual(Event(**dp, attendees='[email protected]'),
Event(**dp, attendees='[email protected]'))
self.assertNotEqual(Event(**dp, attachments=Attachment(title='Attachment1', **attachments_dp)),
Event(**dp, attachments=Attachment(title='Attachment2', **attachments_dp)))
self.assertNotEqual(Event(**dp, minutes_before_email_reminder=10),
Event(**dp, minutes_before_popup_reminder=10))
self.assertNotEqual(Event(**dp, other={"key1": "value1"}),
Event(**dp, other={"key2": "value2"}))
def test_ordering(self):
e1 = Event('Good day', start=(28 / Sept / 2020), end=(30 / Sept / 2020), timezone=TEST_TIMEZONE)
e2 = Event('Good day', start=(28 / Sept / 2020), end=(16 / Oct / 2020), timezone=TEST_TIMEZONE)
e3 = Event('Good day', start=(29 / Sept / 2020), end=(30 / Sept / 2020), timezone=TEST_TIMEZONE)
e4 = Event('Good day', start=(29 / Sept / 2020)[22:22], end=(30 / Sept / 2020)[15:15], timezone=TEST_TIMEZONE)
e5 = Event('Good day', start=(29 / Sept / 2020)[22:22], end=(30 / Sept / 2020)[18:15], timezone=TEST_TIMEZONE)
e6 = Event('Good day', start=(29 / Sept / 2020)[23:22], end=(30 / Sept / 2020)[18:15], timezone=TEST_TIMEZONE)
self.assertEqual(list(sorted([e5, e6, e1, e3, e2, e4])), [e1, e2, e3, e4, e5, e6])
self.assertTrue(e1 < e2)
self.assertTrue(e3 > e2)
self.assertTrue(e5 >= e2)
self.assertTrue(e2 >= e2)
self.assertTrue(e5 <= e5)
self.assertTrue(e5 <= e6)
class TestEventSerializer(TestCase):
def setUp(self):
self.maxDiff = None
def test_to_json(self):
e = Event('Good day', start=(28 / Sept / 2019), timezone=TEST_TIMEZONE)
expected_event_json = {
'summary': 'Good day',
'start': {'date': '2019-09-28'},
'end': {'date': '2019-09-29'},
'recurrence': [],
'visibility': 'default',
'attendees': [],
'reminders': {'useDefault': False},
'attachments': [],
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
e = Event('Good day', start=(28 / Oct / 2019)[11:22:33], timezone=TEST_TIMEZONE)
expected_event_json = {
'summary': 'Good day',
'start': {'dateTime': '2019-10-28T11:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2019-10-28T12:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'recurrence': [],
'visibility': 'default',
'attendees': [],
'reminders': {'useDefault': False},
'attachments': [],
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
def test_to_json_recurrence(self):
e = Event('Good day',
start=(1 / Jan / 2019)[11:22:33],
end=(1 / Jan / 2020)[11:22:33],
timezone=TEST_TIMEZONE,
recurrence=[
Recurrence.rule(freq=DAILY),
Recurrence.exclude_rule(by_week_day=MONDAY),
Recurrence.exclude_dates([
19 / Apr / 2019,
22 / Apr / 2019,
12 / May / 2019
])
])
expected_event_json = {
'summary': 'Good day',
'start': {'dateTime': '2019-01-01T11:22:33+13:00', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2020-01-01T11:22:33+13:00', 'timeZone': TEST_TIMEZONE},
'recurrence': [
'RRULE:FREQ=DAILY;WKST=SU',
'EXRULE:FREQ=DAILY;BYDAY=MO;WKST=SU',
'EXDATE;VALUE=DATE:20190419,20190422,20190512'
],
'visibility': 'default',
'attendees': [],
'reminders': {'useDefault': False},
'attachments': [],
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
def test_to_json_attachments(self):
e = Event('Good day',
start=(1 / Jan / 2019)[11:22:33],
timezone=TEST_TIMEZONE,
attachments=[
Attachment('https://file.url1', 'My file1', "application/vnd.google-apps.document"),
Attachment('https://file.url2', 'My file2', "application/vnd.google-apps.document")
])
expected_event_json = {
'summary': 'Good day',
'start': {'dateTime': '2019-01-01T11:22:33+13:00', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2019-01-01T12:22:33+13:00', 'timeZone': TEST_TIMEZONE},
'recurrence': [],
'visibility': 'default',
'attendees': [],
'reminders': {'useDefault': False},
'attachments': [
{
'title': 'My file1',
'fileUrl': 'https://file.url1',
'mimeType': 'application/vnd.google-apps.document'
},
{
'title': 'My file2',
'fileUrl': 'https://file.url2',
'mimeType': 'application/vnd.google-apps.document'
}
],
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
def test_to_json_reminders(self):
e = Event('Good day',
start=(1 / Jan / 2019)[11:22:33],
timezone=TEST_TIMEZONE,
minutes_before_popup_reminder=30,
minutes_before_email_reminder=120)
expected_event_json = {
'summary': 'Good day',
'start': {'dateTime': '2019-01-01T11:22:33+13:00', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2019-01-01T12:22:33+13:00', 'timeZone': TEST_TIMEZONE},
'recurrence': [],
'visibility': 'default',
'attendees': [],
'reminders': {
'overrides': [
{'method': 'popup', 'minutes': 30},
{'method': 'email', 'minutes': 120}
],
'useDefault': False
},
'attachments': [],
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
def test_to_json_attendees(self):
e = Event('Good day',
start=(1 / Jul / 2020)[11:22:33],
timezone=TEST_TIMEZONE,
attendees=[
Attendee(email='[email protected]', _response_status=ResponseStatus.NEEDS_ACTION),
Attendee(email='[email protected]', _response_status=ResponseStatus.ACCEPTED),
])
expected_event_json = {
'summary': 'Good day',
'start': {'dateTime': '2020-07-01T11:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2020-07-01T12:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'recurrence': [],
'visibility': 'default',
'attendees': [
{'email': '[email protected]', 'responseStatus': ResponseStatus.NEEDS_ACTION},
{'email': '[email protected]', 'responseStatus': ResponseStatus.ACCEPTED},
],
'reminders': {'useDefault': False},
'attachments': [],
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
e = Event('Good day2',
start=20 / Jul / 2020,
default_reminders=True)
expected_event_json = {
'summary': 'Good day2',
'start': {'date': '2020-07-20'},
'end': {'date': '2020-07-21'},
'recurrence': [],
'visibility': 'default',
'attendees': [],
'reminders': {'useDefault': True},
'attachments': [],
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
def test_to_json_conference_solution(self):
e = Event(
'Good day',
start=(1 / Jul / 2020)[11:22:33],
timezone=TEST_TIMEZONE,
conference_solution=ConferenceSolution(
entry_points=EntryPoint(EntryPoint.VIDEO, uri='https://video.com'),
solution_type=SolutionType.HANGOUTS_MEET,
name='Hangout',
icon_uri='https://icon.com',
conference_id='aaa-bbbb-ccc',
signature='abc4efg12345',
notes='important notes'
)
)
expected_event_json = {
'summary': 'Good day',
'start': {'dateTime': '2020-07-01T11:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2020-07-01T12:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'recurrence': [],
'visibility': 'default',
'attendees': [],
'reminders': {'useDefault': False},
'attachments': [],
'conferenceData': {
'entryPoints': [
{
'entryPointType': 'video',
'uri': 'https://video.com',
}
],
'conferenceSolution': {
'key': {
'type': 'hangoutsMeet'
},
'name': 'Hangout',
'iconUri': 'https://icon.com'
},
'conferenceId': 'aaa-bbbb-ccc',
'signature': 'abc4efg12345',
'notes': 'important notes'
},
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
def test_to_json_conference_solution_create_request(self):
e = Event(
'Good day',
start=(1 / Jul / 2020)[11:22:33],
timezone=TEST_TIMEZONE,
conference_solution=ConferenceSolutionCreateRequest(
solution_type=SolutionType.HANGOUTS_MEET,
request_id='hello1234',
conference_id='conference-id',
signature='signature',
notes='important notes',
_status='pending'
)
)
expected_event_json = {
'summary': 'Good day',
'start': {'dateTime': '2020-07-01T11:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2020-07-01T12:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'recurrence': [],
'visibility': 'default',
'attendees': [],
'reminders': {'useDefault': False},
'attachments': [],
'conferenceData': {
'createRequest': {
'requestId': 'hello1234',
'conferenceSolutionKey': {
'type': 'hangoutsMeet'
},
'status': {
'statusCode': 'pending'
}
},
'conferenceId': 'conference-id',
'signature': 'signature',
'notes': 'important notes'
},
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
def test_to_json_updated(self):
e = Event(
'Good day',
start=(1 / Jul / 2020)[11:22:33],
timezone=TEST_TIMEZONE,
_updated=insure_localisation((25 / Nov / 2020)[11:22:33], timezone=TEST_TIMEZONE)
)
expected_event_json = {
'summary': 'Good day',
'start': {'dateTime': '2020-07-01T11:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2020-07-01T12:22:33+12:00', 'timeZone': TEST_TIMEZONE},
'recurrence': [],
'visibility': 'default',
'attendees': [],
'reminders': {'useDefault': False},
'attachments': [],
'guestsCanInviteOthers': True,
'guestsCanModify': False,
'guestsCanSeeOtherGuests': True,
}
self.assertDictEqual(EventSerializer.to_json(e), expected_event_json)
def test_to_object(self):
event_json = {
'summary': 'Good day',
'description': 'Very good day indeed',
'location': 'Prague',
'start': {'dateTime': '2019-01-01T11:22:33', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2019-01-01T12:22:33', 'timeZone': TEST_TIMEZONE},
'updated': '2020-11-25T14:53:46.0Z',
'created': '2020-11-24T14:53:46.0Z',
'recurrence': [
'RRULE:FREQ=DAILY;WKST=SU',
'EXRULE:FREQ=DAILY;BYDAY=MO;WKST=SU',
'EXDATE:VALUE=DATE:20190419,20190422,20190512'
],
'visibility': 'public',
'attendees': [
{'email': '[email protected]', 'responseStatus': ResponseStatus.NEEDS_ACTION},
{'email': '[email protected]', 'responseStatus': ResponseStatus.ACCEPTED},
],
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'popup', 'minutes': 30},
{'method': 'email', 'minutes': 120}
]
},
'attachments': [
{
'title': 'My file1',
'fileUrl': 'https://file.url1',
'mimeType': 'application/vnd.google-apps.document'
},
{
'title': 'My file2',
'fileUrl': 'https://file.url2',
'mimeType': 'application/vnd.google-apps.document'
}
],
'conferenceData': {
'entryPoints': [
{
'entryPointType': 'video',
'uri': 'https://video.com',
}
],
'conferenceSolution': {
'key': {
'type': 'hangoutsMeet'
},
'name': 'Hangout',
'iconUri': 'https://icon.com'
},
'conferenceId': 'aaa-bbbb-ccc',
'signature': 'abc4efg12345',
'notes': 'important notes'
},
'guestsCanInviteOthers': False,
'guestsCanModify': True,
'guestsCanSeeOtherGuests': False,
'transparency': 'transparent',
'creator': {
'id': '123123',
'email': '[email protected]',
'displayName': 'Creator',
'self': True
},
'organizer': {
'id': '456456',
'email': '[email protected]',
'displayName': 'Organizer',
'self': False
}
}
serializer = EventSerializer(event_json)
event = serializer.get_object()
self.assertEqual(event.summary, 'Good day')
self.assertEqual(event.start, insure_localisation((1 / Jan / 2019)[11:22:33], TEST_TIMEZONE))
self.assertEqual(event.end, insure_localisation((1 / Jan / 2019)[12:22:33], TEST_TIMEZONE))
self.assertEqual(event.updated, insure_localisation((25 / Nov / 2020)[14:53:46], 'UTC'))
self.assertEqual(event.created, insure_localisation((24 / Nov / 2020)[14:53:46], 'UTC'))
self.assertEqual(event.description, 'Very good day indeed')
self.assertEqual(event.location, 'Prague')
self.assertEqual(len(event.recurrence), 3)
self.assertEqual(event.visibility, Visibility.PUBLIC)
self.assertEqual(len(event.attendees), 2)
self.assertIsInstance(event.reminders[0], PopupReminder)
self.assertEqual(event.reminders[0].minutes_before_start, 30)
self.assertIsInstance(event.reminders[1], EmailReminder)
self.assertEqual(event.reminders[1].minutes_before_start, 120)
self.assertEqual(len(event.attachments), 2)
self.assertIsInstance(event.attachments[0], Attachment)
self.assertEqual(event.attachments[0].title, 'My file1')
self.assertIsInstance(event.conference_solution, ConferenceSolution)
self.assertEqual(event.conference_solution.solution_type, 'hangoutsMeet')
self.assertEqual(event.conference_solution.entry_points[0].uri, 'https://video.com')
self.assertFalse(event.guests_can_invite_others)
self.assertTrue(event.guests_can_modify)
self.assertFalse(event.guests_can_see_other_guests)
self.assertEqual(event.transparency, 'transparent')
self.assertEqual(event.creator.email, '[email protected]')
self.assertEqual(event.organizer.email, '[email protected]')
event_json_str = """{
"summary": "Good day",
"description": "Very good day indeed",
"location": "Prague",
"start": {"date": "2020-07-20"},
"end": {"date": "2020-07-22"}
}"""
event = EventSerializer.to_object(event_json_str)
self.assertEqual(event.summary, 'Good day')
self.assertEqual(event.description, 'Very good day indeed')
self.assertEqual(event.location, 'Prague')
self.assertEqual(event.start, 20 / Jul / 2020)
self.assertEqual(event.end, 22 / Jul / 2020)
def test_to_object_recurring_event(self):
event_json_str = {
"id": 'recurring_event_id_20201107T070000Z',
"summary": "Good day",
"description": "Very good day indeed",
"location": "Prague",
"start": {"date": "2020-07-20"},
"end": {"date": "2020-07-22"},
"recurringEventId": 'recurring_event_id'
}
event = EventSerializer.to_object(event_json_str)
self.assertEqual(event.id, 'recurring_event_id_20201107T070000Z')
self.assertTrue(event.is_recurring_instance)
self.assertEqual(event.recurring_event_id, 'recurring_event_id')
def test_to_object_conference_data(self):
event_json = {
'summary': 'Good day',
'description': 'Very good day indeed',
'location': 'Prague',
'start': {'dateTime': '2019-01-01T11:22:33', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2019-01-01T12:22:33', 'timeZone': TEST_TIMEZONE},
'conferenceData': {
'createRequest': {
'requestId': 'hello1234',
'conferenceSolutionKey': {
'type': 'hangoutsMeet'
},
'status': {
'statusCode': 'pending'
}
},
'conferenceId': 'conference-id',
'signature': 'signature',
'notes': 'important notes'
}
}
event = EventSerializer.to_object(event_json)
self.assertIsInstance(event.conference_solution, ConferenceSolutionCreateRequest)
self.assertEqual(event.conference_solution.solution_type, 'hangoutsMeet')
# with successful conference create request
event_json = {
'summary': 'Good day',
'description': 'Very good day indeed',
'location': 'Prague',
'start': {'dateTime': '2019-01-01T11:22:33', 'timeZone': TEST_TIMEZONE},
'end': {'dateTime': '2019-01-01T12:22:33', 'timeZone': TEST_TIMEZONE},
'conferenceData': {
'entryPoints': [
{
'entryPointType': 'video',
'uri': 'https://video.com',
}
],
'conferenceSolution': {
'key': {
'type': 'hangoutsMeet'
},
'name': 'Hangout',
'iconUri': 'https://icon.com'
},
'createRequest': {
'requestId': 'hello1234',
'conferenceSolutionKey': {
'type': 'hangoutsMeet'
},
'status': {
'statusCode': 'success'
}
},
'conferenceId': 'conference-id',
'signature': 'signature',
'notes': 'important notes'
}
}
event = EventSerializer.to_object(event_json)
self.assertIsInstance(event.conference_solution, ConferenceSolution)
self.assertEqual(event.conference_solution.solution_type, 'hangoutsMeet')
self.assertEqual(event.conference_solution.entry_points[0].uri, 'https://video.com')
|
|
# see https://blog.alexandruioan.me/2017/01/31/the-2017-university-of-bristol-arm-hackathon for more details
import math
from http.server import BaseHTTPRequestHandler, HTTPServer, urllib
from sys import argv
# import matplotlib.pyplot as plt
import serial
import threading
import queue
import numpy as np
import time
q = queue.Queue()
radius = 250
UP = b'c60'
DOWN = b'c120'
INIT_A = b'a180'
INIT_B = b'b180'
DET = b'd'
ATA = b'e'
# serial flag for debugging
SEND = True
if SEND:
ser = serial.Serial('/dev/ttyACM0', 115200)
def park_and_detach():
if SEND:
ser.write(INIT_A)
ser.write(INIT_B)
ser.write(UP)
ser.write(DET)
park_and_detach()
# separate thread for sending data to serial
def serial_send(q):
while True:
to_send = q.get()
if SEND:
ser.write(to_send)
ser.flush()
q.task_done()
t = threading.Thread(target=serial_send, args=(q,))
t.start()
class req_handler(BaseHTTPRequestHandler):
prevX = -1
prevY = -1
# this runs when points are POSTed to the server
# it triggers the calculation of the angles of the servos
# and puts them in a queue
# the tread at the other end of the queue sends the data over serial
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
count = int((post_data['count'])[0])
q.put_nowait(UP)
# to_plot = []
q.put_nowait(ATA)
for i in range(count):
pointX = float((post_data['p' + str(i) + 'x'])[0])
pointY = float((post_data['p' + str(i) + 'y'])[0])
# don't draw points that are too close
# if (req_handler.prevX, req_handler.prevY) != (-1, -1):
# # print(math.sqrt((req_handler.prevX - pointX)**2 + (req_handler.prevY - pointY)**2))
# if math.sqrt((req_handler.prevX - pointX)**2 + (req_handler.prevY - pointY)**2) < 2:
# continue
# timing
# t0 = time.time()
(theta1, theta2) = bruteforce(pointX, pointY)
# t1 = time.time()
# total = t1 - t0
# print(total)
lift = False
# lift the pen if it has to move more than __ pixels,
# in which case we probably don't want a continuous line
if (req_handler.prevX, req_handler.prevY) != (-1, -1):
if math.sqrt((req_handler.prevX - pointX)**2 + (req_handler.prevY - pointY)**2) > 30:
lift = True
q.put_nowait(UP if lift else DOWN)
# if the motors had a 360 degree range of motion you could get 2 solutions
# (t11, t12), (t21, t22) = bruteforce360(pointX, pointY)
# tx1 - main circle angle
# tx2 - second circle angle
# logic in case there are 2 solutions
# theta1r = int(round(theta1))
# theta2r = int(round(theta2))
# t21r = int(round(t22))
# t22r = int(round(t21))
# print(t11r, t12r)
# print(t21r, t22r)
# if (t11r, t12r) == (0, 0):
# if (t21r, t22r) == (0, 0):
# (sol1, sol2) = (0, 0)
# else:
# (sol1, sol2) = (t21r, t22r)
# else:
# if (t21r, t22r) == (0, 0):
# (sol1, sol2) = (t11r, t12r)
# else:
# # TODO: decision logic (find solution closest to the previous one?)
# (sol1, sol2) = (t11r, t12r)
(sol1, sol2) = (int(round(theta1)), int(round(theta2)))
a = str(sol1)
b = str(sol2)
to_send = ('a' + a + 'b' + b + '\n').encode('ascii')
q.put_nowait(to_send)
# save the current position
(req_handler.prevX, req_handler.prevY) = (pointX, pointY)
# save the points for plotting
# to_plot.append((sol1, sol2, pointX, pointY))
# plot point that needs to be drawn
# plt.plot(pointX, pointY, 'ro')
# draw the circles corresponding to the solution - this will take a long time
# for theta in range(0, 360):
# for (t11, t21, pointX, pointY) in to_plot:
# center2X = radius * math.cos(math.radians(t21)) + radius * math.cos(math.radians(theta))
# center2Y = radius * math.sin(math.radians(t21)) + radius * math.sin(math.radians(theta))
# plt.plot(center2X, center2Y, 'rx')
# center2X = radius * math.cos(math.radians(t21)) + radius * math.cos(math.radians(theta))
# center2Y = radius * math.sin(math.radians(t21)) + radius * math.sin(math.radians(theta))
# plt.plot(center2X, center2Y, 'rx')
park_and_detach()
# start the HTTP server, binding it to all addresses, port 1180
def run(server_class = HTTPServer, handler_class = req_handler, port = 1180):
server_address = ('0.0.0.0', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
def main():
if len(argv) == 2:
run(port = int(argv[1]))
else:
run()
# https://uk.mathworks.com/help/fuzzy/examples/modeling-inverse-kinematics-in-a-robotic-arm.html
# pregenerate grid
theta1range = np.arange(0, math.pi, 0.01)
theta2range = np.arange(0, math.pi, 0.01)
THETA1, THETA2 = np.meshgrid(theta1range, theta2range)
X_pred = radius * np.cos(THETA1) + radius * np.cos(THETA1 + THETA2)
Y_pred = radius * np.sin(THETA1) + radius * np.sin(THETA1 + THETA2)
def bruteforce(pointX, pointY):
last_theta = -100
switched = False
list1 = []
list2 = []
min_dist = 100000
min_theta1 = 0
min_theta2 = 0
# slow solution
# ~0.12s
# for theta1 in np.arange(0, math.pi, 0.01):
# for theta2 in np.arange(0, math.pi, 0.01):
# x_pred = radius * math.cos(theta1) + radius * math.cos(theta1 + theta2)
# y_pred = radius * math.sin(theta1) + radius * math.sin(theta1 + theta2)
# look_dist = math.sqrt((x_pred - pointX) ** 2 + (y_pred - pointY) ** 2)
# if look_dist < min_dist:
# min_dist = look_dist
# min_theta1 = theta1
# min_theta2 = theta2
# numpy solution
# ~0.005s
# generate 3D array of repeated target point
point = np.array([[[pointX, pointY]]])
point3D = np.repeat(np.repeat(point, X_pred.shape[0], axis = 0), X_pred.shape[1], axis = 1)
# create 3D array with potential X and Y values
grid = np.stack((X_pred, Y_pred), axis = 2)
# compute the Euclidean distance
diff = np.subtract(point3D, grid)
dists = np.linalg.norm(diff, ord = 2, axis = 2)
# find the minimum distance (grid point closest to the target point)
idx1, idx2 = np.unravel_index(dists.argmin(), dists.shape)
# extract its theta values
min_theta1 = THETA1[idx1][idx2]
min_theta2 = THETA2[idx1][idx2]
return (math.degrees(min_theta1), math.degrees(min_theta2))
# algorithm:
# sweep the main circle angle. each point on the circumference
# is the center of the second circle - a potential solution
# calculate the circle equation for the second one, and see if the
# target point satisfies it (with a tolerance)
# as we're dealing with pixels and not true geometrical points,
# it will clusters of solutions close together - average the points
# within the clusters
# there are up to two solutions - save these separately
def bruteforce360(pointX, pointY):
last_theta = -100
switched = False
list1 = []
list2 = []
min_dist = 100000
min_theta1 = 0
min_theta2 = 0
# theta - main circle angle
for theta in range(0, 360):
center2X = radius * math.cos(math.radians(theta))
center2Y = radius * math.sin(math.radians(theta))
# plt.plot(center2X, center2Y, 'bx')
sr = ((center2X - pointX) ** 2) + ((center2Y - pointY) ** 2)
if sr > (radius * 0.95)**2 and sr < (radius * 1.05) ** 2:
# found the second solution, switch arrays
if abs(last_theta - theta) > 30 and last_theta >= 0:
switched = True
# the angle of the 2nd circle is the angle between its center and the target point
if switched:
list1.append((theta, math.degrees(math.atan2(center2Y - pointY, center2X - pointX))))
else:
list2.append((theta, math.degrees(math.atan2(center2Y - pointY, center2X - pointX))))
last_theta = theta
sumt1 = 0
sumt2 = 0
# averaging
# tx1 - main circle angle
# tx2 - second circle angle
for (t1, t2) in list1:
sumt1 += t1
sumt2 += t2
t11 = sumt1 / len(list1) if len(list1) != 0 else 0
t12 = sumt2 / len(list1) if len(list1) != 0 else 0
sumt1 = 0
sumt2 = 0
# tx1 - main circle angle
# tx2 - second circle angle
for (t1, t2) in list2:
sumt1 += t1
sumt2 += t2
t21 = sumt1 / len(list2) if len(list2) != 0 else 0
t22 = sumt2 / len(list2) if len(list2) != 0 else 0
return (t11, t12), (t21, t22)
if __name__ == "__main__":
main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import pretend
import pytest
from packaging.utils import canonicalize_name
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from warehouse.admin.views import blacklist as views
from warehouse.packaging.models import BlacklistedProject, Project
from ....common.db.accounts import UserFactory
from ....common.db.packaging import (
ProjectFactory,
ReleaseFactory,
RoleFactory,
FileFactory,
BlacklistedProjectFactory,
)
class TestBlacklistList:
def test_no_query(self, db_request):
db_request.db.query(BlacklistedProject).delete()
blacklisted = sorted(
[BlacklistedProjectFactory.create() for _ in range(30)],
key=lambda b: canonicalize_name(b.name),
)
result = views.blacklist(db_request)
assert result == {"blacklist": blacklisted[:25], "query": None}
def test_with_page(self, db_request):
db_request.db.query(BlacklistedProject).delete()
blacklisted = sorted(
[BlacklistedProjectFactory.create() for _ in range(30)],
key=lambda b: canonicalize_name(b.name),
)
db_request.GET["page"] = "2"
result = views.blacklist(db_request)
assert result == {"blacklist": blacklisted[25:], "query": None}
def test_with_invalid_page(self):
request = pretend.stub(params={"page": "not an integer"})
with pytest.raises(HTTPBadRequest):
views.blacklist(request)
def test_basic_query(self, db_request):
db_request.db.query(BlacklistedProject).delete()
blacklisted = sorted(
[BlacklistedProjectFactory.create() for _ in range(30)],
key=lambda b: canonicalize_name(b.name),
)
db_request.GET["q"] = blacklisted[0].name
result = views.blacklist(db_request)
assert result == {"blacklist": [blacklisted[0]], "query": blacklisted[0].name}
def test_wildcard_query(self, db_request):
db_request.db.query(BlacklistedProject).delete()
blacklisted = sorted(
[BlacklistedProjectFactory.create() for _ in range(30)],
key=lambda b: canonicalize_name(b.name),
)
db_request.GET["q"] = blacklisted[0].name[:-1] + "%"
result = views.blacklist(db_request)
assert result == {
"blacklist": [blacklisted[0]],
"query": blacklisted[0].name[:-1] + "%",
}
class TestConfirmBlacklist:
def test_no_project(self):
request = pretend.stub(GET={})
with pytest.raises(HTTPBadRequest):
views.confirm_blacklist(request)
def test_nothing_to_delete(self, db_request):
db_request.GET["project"] = "foo"
result = views.confirm_blacklist(db_request)
assert result == {
"blacklist": {"project": "foo", "comment": ""},
"existing": {"project": None, "releases": [], "files": [], "roles": []},
}
def test_stuff_to_delete(self, db_request):
project = ProjectFactory.create()
db_request.GET["project"] = project.name
result = views.confirm_blacklist(db_request)
assert result == {
"blacklist": {"project": project.name, "comment": ""},
"existing": {"project": project, "releases": [], "files": [], "roles": []},
}
class TestAddBlacklist:
def test_no_project(self):
request = pretend.stub(POST={})
with pytest.raises(HTTPBadRequest):
views.add_blacklist(request)
def test_no_confirm(self):
request = pretend.stub(
POST={"project": "foo"},
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
current_route_path=lambda: "/foo/bar/",
)
result = views.add_blacklist(request)
assert request.session.flash.calls == [
pretend.call("Confirm the blacklist request", queue="error")
]
assert result.status_code == 303
assert result.headers["Location"] == "/foo/bar/"
def test_wrong_confirm(self):
request = pretend.stub(
POST={"project": "foo", "confirm": "bar"},
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
current_route_path=lambda: "/foo/bar/",
)
result = views.add_blacklist(request)
assert request.session.flash.calls == [
pretend.call("'bar' is not the same as 'foo'", queue="error")
]
assert result.status_code == 303
assert result.headers["Location"] == "/foo/bar/"
def test_already_existing_blacklist(self, db_request):
blacklist = BlacklistedProjectFactory.create()
db_request.db.expire_all()
db_request.user = UserFactory.create()
db_request.POST["project"] = blacklist.name
db_request.POST["confirm"] = blacklist.name
db_request.POST["comment"] = "This is a comment"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = lambda a: "/admin/blacklist/"
result = views.add_blacklist(db_request)
assert db_request.session.flash.calls == [
pretend.call(
f"{blacklist.name!r} has already been blacklisted.", queue="error"
)
]
assert result.status_code == 303
assert result.headers["Location"] == "/admin/blacklist/"
def test_adds_blacklist(self, db_request):
db_request.user = UserFactory.create()
db_request.POST["project"] = "foo"
db_request.POST["confirm"] = "foo"
db_request.POST["comment"] = "This is a comment"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = lambda a: "/admin/blacklist/"
views.add_blacklist(db_request)
assert db_request.session.flash.calls == [
pretend.call("Blacklisted 'foo'", queue="success")
]
blacklist = (
db_request.db.query(BlacklistedProject)
.filter(BlacklistedProject.name == "foo")
.one()
)
assert blacklist.name == "foo"
assert blacklist.blacklisted_by == db_request.user
assert blacklist.comment == "This is a comment"
def test_adds_blacklist_with_deletes(self, db_request):
db_request.user = UserFactory.create()
db_request.POST["project"] = "foo"
db_request.POST["confirm"] = "foo"
db_request.POST["comment"] = "This is a comment"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = lambda a: "/admin/blacklist/"
db_request.remote_addr = "192.168.1.1"
project = ProjectFactory.create(name="foo")
release = ReleaseFactory.create(project=project)
FileFactory.create(release=release, filename="who cares")
RoleFactory.create(project=project, user=db_request.user)
views.add_blacklist(db_request)
assert db_request.session.flash.calls == [
pretend.call("Deleted the project 'foo'", queue="success"),
pretend.call("Blacklisted 'foo'", queue="success"),
]
blacklist = (
db_request.db.query(BlacklistedProject)
.filter(BlacklistedProject.name == "foo")
.one()
)
assert blacklist.name == "foo"
assert blacklist.blacklisted_by == db_request.user
assert blacklist.comment == "This is a comment"
assert not (db_request.db.query(Project).filter(Project.name == "foo").count())
class TestRemoveBlacklist:
def test_no_blacklist_id(self):
request = pretend.stub(POST={})
with pytest.raises(HTTPBadRequest):
views.remove_blacklist(request)
def test_blacklist_id_not_exist(self, db_request):
db_request.POST["blacklist_id"] = str(uuid.uuid4())
with pytest.raises(HTTPNotFound):
views.remove_blacklist(db_request)
def test_deletes_blacklist(self, db_request):
blacklist = BlacklistedProjectFactory.create()
db_request.POST["blacklist_id"] = str(blacklist.id)
db_request.route_path = lambda a: "/admin/blacklist/"
resp = views.remove_blacklist(db_request)
assert resp.status_code == 303
assert resp.headers["Location"] == "/admin/blacklist/"
assert not (
db_request.db.query(BlacklistedProject)
.filter(BlacklistedProject.id == blacklist.id)
.count()
)
def test_deletes_blacklist_with_redirect(self, db_request):
blacklist = BlacklistedProjectFactory.create()
db_request.POST["blacklist_id"] = str(blacklist.id)
db_request.POST["next"] = "/another/url/"
db_request.route_path = lambda a: "/admin/blacklist/"
resp = views.remove_blacklist(db_request)
assert resp.status_code == 303
assert resp.headers["Location"] == "/another/url/"
assert not (
db_request.db.query(BlacklistedProject)
.filter(BlacklistedProject.id == blacklist.id)
.count()
)
|
|
import numpy as np
import pandas as pd
import datetime as dt
import pkg_resources as pkg
class PyCrop(object):
"""
Simple Crop model class
"""
def __init__(self, doyp):
"""
Initialise the class
:rtype : object
"""
self.irrigation = GetData.get_irrigation()
self.weather = GetData.get_weather()
self.soil = GetData.get_soil()
self.sw_state = self.initial_sw_state
self.plant = GetData.get_plant()
self.plant['doyp'] = doyp
self.status = self.initial_status
@property
def initial_sw_state(self):
sw_state = {'WP': self.soil['DP'] * self.soil['WPp'] * 10.0,
'FC': self.soil['DP'] * self.soil['FCp'] * 10.0,
'ST': self.soil['DP'] * self.soil['STp'] * 10.0,
'SWC_INIT': self.soil['SWC'], 'TRAIN': 0.0,
'TIRR': 0.0, 'TESA': 0.0, 'TEPA': 0.0, 'TROF': 0.0,
'TDRN': 0.0, 'TINF': 0.0, 'SWC_ADJ': 0.0}
start = self.weather.index[0] # First date of weather data
if start in self.irrigation.index: # If it exists in irrigation data
sw_state['TIRR'] += self.irrigation.irr[start]
sw_state['POTINF'] = self.weather.rain[start] + self.irrigation.irr[start]
else: # , or if there is only weather data
sw_state['POTINF'] = self.weather.rain[start]
sw_state['TRAIN'] += self.weather.rain[start]
sw_state['ROF'] = Soil_Water.runoff(POTINF=sw_state['POTINF'], CN=self.soil['CN'])
sw_state['THE'] = sw_state['WP'] + 0.75 * (sw_state['FC'] - sw_state['WP'])
sw_state['SWFAC1'], sw_state['SWFAC2'] = Soil_Water.stress(
SWC=self.soil['SWC'], DP=self.soil['DP'],
FC=sw_state['FC'], ST=sw_state['ST'],
WP=sw_state['WP'], THE=sw_state['THE'])
return sw_state
@property
def initial_status(self):
"""
Set a status dictionary used for control flow
"""
status = {'endsim': False, 'initial': True}
return status
def simulate(self):
""" Run the model using model class"""
sw_out, p_out = Model.run_model(self)
return sw_out, p_out
class GetData(object):
"""
A class to group functions for getting the data
"""
@staticmethod
def get_irrigation():
"""
Get irrigation data from a file in a relative path.
"""
irrigation_file = pkg.resource_filename('PyCrop', "Data/IRRIG.INP")
tmp = []
with open(irrigation_file, 'r') as f:
for line in f:
date, irr = line.split()
tmp.append([int(date), float(irr)])
dlist = [(CustomFunctions.gen_datetimes(dateint[0])) for dateint in tmp]
return pd.DataFrame(data=tmp, index=dlist, columns=['date', 'irr'])
@staticmethod
def get_plant():
"""
Get initial data from Plant.inp file.
"""
plant_file = pkg.resource_filename('PyCrop', "Data/Plant.inp")
plant = {}
with open(plant_file, 'r') as f:
firstline = f.readline().split()
var = ['Lfmax', 'EMP2', 'EMP1', 'PD', 'nb', 'rm', 'fc', 'tb', 'intot',
'n', 'lai', 'w', 'wr', 'wc', 'p1', 'sla']
for n, i in enumerate(firstline):
plant[var[n]] = float(i)
plant['int'] = 0.0
plant['di'] = 0.0
plant['wf'] = 0.0
return plant
@staticmethod
def get_soil():
"""
Get soil data from input file in relative path.
Returns a dictionary obj as a class variable.
"""
soil_file = pkg.resource_filename('PyCrop', "Data/Soil.inp")
soil = {}
with open(soil_file, 'r') as f:
firstline = f.readline().split()
var = f.readline().split()
for n, i in enumerate(firstline):
soil[var[n]] = float(i)
return soil
@staticmethod
def get_weather():
"""
Get weather data from input file in relative path.
Returns a pandas Dataframe object as a class variable.
"""
weather_file = pkg.resource_filename('PyCrop', "Data/weather.inp")
tmp = []
with open(weather_file, 'r') as f:
for line in f:
date, srad, tmax, tmin, rain, par = line.split()
par = 0.5 * float(srad) # as in Weather.for
tmp.append([int(date), float(srad), float(tmax), float(tmin),
float(rain), float(par)])
dlist = [(CustomFunctions.gen_datetimes(dateint[0])) for dateint in tmp]
return pd.DataFrame(data=tmp, index=dlist, columns=['date', 'srad', 'tmax', 'tmin', 'rain', 'par'])
class Soil_Water(object):
"""
Subroutines of SW
These subroutine calculates the soil water availability for the plant,
considering the rain, runoff, deep percolation (drainage) and water
use by the plant (evapotranspiration). It is divided in subroutines
that calculate those parameters separately. Daily data from climate
comes from WEATHER and daily LAI from PLANT subroutines. SW supplies
PLANT with daily soil water factor of availability (SWFAC)
soil water availability for the plant,
CALL SW(
DOY, LAI, RAIN, SRAD, TMAX, TMIN, !Input
SWFAC1, SWFAC2, !Output
'INITIAL ') !Control
"""
@staticmethod
def draine(SWC, FC, DRNp):
"""
DRAINE, calculates vertical drainage.
Input: SWC, FC, DRNp
Output: DRN
"""
if SWC > FC:
DRN = (SWC - FC) * DRNp
else:
DRN = 0.0
return DRN
@staticmethod
def ESaS(SWC, WP, FC, ESp):
"""
Calculates the actual daily soil evaporation.
Input: SWC, WP, FC, ESp
Output: ESa
"""
if SWC < WP:
a = 0.
elif SWC < FC:
a = 1.
else:
a = (SWC - WP) / (FC - WP)
ESa = ESp * a
return ESa
@staticmethod
def ETpS(LAI, TMAX, TMIN, SRAD):
"""
Calculates the daily potential evapotranspiration.
Input: LAI, TMAX, TMIN, SRAD
Output: ETp
Local Variables
ALB = ALBEDO OF CROP-SOIL SURFACE
EEQ = EQUILIBRIUM EVAPOTRANSPIRATION (mm)
Tmed = ESTIMATED AVERAGE DAILY TEMPERATURE (C)
f =
SUBROUTINE ETpS(SRAD,TMAX,TMIN,LAI,ETp)
"""
ALB = 0.1 * np.exp(-0.7 * LAI) + 0.2 * (1 - np.exp(-0.7 * LAI))
Tmed = 0.6 * TMAX + 0.4 * TMIN
EEQ = SRAD * (4.88E-03 - 4.37E-03 * ALB) * (Tmed + 29)
if TMAX < 5:
f = 0.01 * np.exp(0.18 * (TMAX + 20.))
elif TMAX > 35:
f = 1.1 + 0.05 * (TMAX - 35.)
else:
f = 1.1
ETp = f * EEQ
return ETp
@staticmethod
def runoff(POTINF, CN):
"""
SW subroutine RUNOFF calculates the daily runoff
Input: POTINF, CN, state(a string indicating flow required)
Output: ROF
Local Variables:
CN = CURVE NUMBER SCS EQUATION
S = WATERSHED STORAGE SCS EQUATION (MM)
"""
S = 254. * (100. / CN - 1.) # do this. Else do the rest.
if POTINF > 0.2 * S:
ROF = ((POTINF - 0.2 * S) ** 2) / (POTINF + 0.8 * S)
else:
ROF = 0.0
return ROF
@staticmethod
def stress(SWC, DP, FC, ST, WP, THE):
"""
Sub-subroutine STRESS calculates soil water stresses.
Today's stresses will be applied to tomorrow's rate calcs.
Input: SWC, DP, FC, ST, WP
Output: SWFAC1, SWFAC2
stress_depth is the water table dpth. (mm) below which no stress occurs
THE is the threshold for drought stress (mm)
Excess water stress factor - SWFAC2
FC water is distributed evenly throughout soil profile. Any
water in excess of FC creates a free water surface
WTABLE - thickness of water table (mm)
DWT - depth to water table from surface (mm)
The initial version of this program had two states, initial
and integration. I moved the one line of intiail (which creates
a value of THE) outside in the initilization of the sw_state dic.
"""
stress_depth = 250.
if SWC < WP:
SWFAC1 = 0.0
elif SWC > THE:
SWFAC1 = 1.0
else:
SWFAC1 = (SWC - WP) / (THE - WP)
SWFAC1 = max([min([SWFAC1, 1.0]), 0.0]) # ...this to restrain possible vals.
if SWC <= FC:
WTABLE = 0.0 # Appears to be unused variable
DWT = DP * 10. # !DP in cm, DWT in mm # Appears to be unused variable
SWFAC2 = 1.0
else:
WTABLE = (SWC - FC) / (ST - FC) * DP * 10.
DWT = DP * 10. - WTABLE
if DWT > stress_depth:
SWFAC2 = 1.0
else:
SWFAC2 = DWT / stress_depth
SWFAC2 = max([min([SWFAC2, 1.0]), 0.0])
return SWFAC1, SWFAC2
class PlantMethods(object):
"""
PLANT
These functions subroutine simulates the growth of the plant using pre-determined
conditions.Hourly values of temperature and photosyntetically active
radiation come from WEATHER subroutine and daily values of availability
of water in the soil come from SW subroutine. This subroutine supplies
the SW subroutine with daily values of leaf area index (LAI).
SUBROUTINE PLANT(
DOY, endsim,TMAX,TMIN, PAR, SWFAC1, SWFAC2, !Input
LAI, !Output
DYN) !Control
"""
@staticmethod
def lais(FL, di, PD, EMP1, EMP2, N, nb, SWFAC1, SWFAC2, PT, dN,
sla, p1):
"""
Calculates the canopy leaf area index (LAI)
Input: FL, di, PD, EMP1, EMP2, N, nb, SWFAC1, SWFAC2, PT, dN
Output: dLAI
SUBROUTINE LAIS(FL,di,PD,EMP1,EMP2,N,nb,SWFAC1,SWFAC2,PT,
& dN,p1, sla, dLAI)
REAL PD,EMP1,EMP2,N,nb,dLAI, SWFAC,a, dN, p1,sla
REAL SWFAC1, SWFAC2, PT, di, FL
"""
SWFAC = np.min([SWFAC1, SWFAC2])
if FL == 1.0:
a = np.exp(EMP2 * (N - nb))
dLAI = SWFAC * PD * EMP1 * PT * (a / (1.0 + a)) * dN
elif FL == 2.0:
dLAI = - PD * di * p1 * sla
return dLAI
@staticmethod
def PGS(SWFAC1, SWFAC2, PAR, PD, PT, LAI):
"""
Calculates the canopy gross photosysntesis rate (PG)
SUBROUTINE PGS(SWFAC1, SWFAC2,PAR, PD, PT, Lai, Pg)
REAL PAR, Lai, Pg, PT, Y1
REAL SWFAC1, SWFAC2, SWFAC,ROWSPC,PD
ROWSP = row spacing
Y1 = canopy light extinction coefficient
"""
SWFAC = np.min([SWFAC1, SWFAC2])
ROWSPC = 60.0
Y1 = 1.5 - 0.768 * ((ROWSPC * 0.01) ** 2 * PD) ** 0.1
Pg = PT * SWFAC * 2.1 * PAR / PD * (1.0 - np.exp(-Y1 * LAI))
return Pg
@staticmethod
def PTS(TMAX, TMIN):
"""
Calculates the factor that incorporates the effect of temperature
on photosynthesis
SUBROUTINE PTS(TMAX,TMIN,PT)
REAL PT,TMAX,TMIN
"""
PT = 1.0 - 0.0025 * ((0.25 * TMIN + 0.75 * TMAX) - 26.0) ** 2
return PT
class CustomFunctions(object):
"""
Custom functions
"""
@staticmethod
def gen_datetimes(tmpdint):
"""
Given an integer of format YYDDD (decade and day-of-year)
this will return a datetime object date.
List comprehension can then be used to create lists of
dates to be used as index's for time orderd data, e.g.:
dlist = [(gen_datetimes(dateint)) for dateint in Object.irrigation.date]
"""
yr = int(str(tmpdint)[0:2])
doy = int(str(tmpdint)[2:])
if yr > 50:
yr += 1900 # Just make a guess to deal with this
if yr < 50:
yr += 2000 # irritaingly vage integer date scheme
return dt.date(yr, 1, 1) + dt.timedelta(doy - 1)
@staticmethod
def get_variable_defs():
"""
Calling this function will create a dictionary object attached to class
that holds all variable names and definitions from Soil Water.
(This is currently only the defs from the soil water routine.)
"""
var_dic = {}
var_files = [pkg.resource_filename('PyCrop', "Data/var_defs.txt")]
for file in var_files:
with open(file, 'r') as f:
for line in f:
tmp = (line.split('='))
var_dic[str.strip(tmp[0])] = str.strip(tmp[1])
return var_dic
class Model(object):
"""
Functions to run the simulation, handels what was called the rate, and integration
flow.
"""
def soil_rate(self, n):
"""
Soil rate section
"""
if n in self.irrigation.index: # If there is irrigation and weather data
self.sw_state['TIRR'] += self.irrigation.irr[n]
self.sw_state['POTINF'] = self.weather.rain[n] + self.irrigation.irr[n]
else: # If there is only weather data
self.sw_state['POTINF'] = self.weather.rain[n]
self.sw_state['TRAIN'] += self.weather.rain[n]
self.sw_state['DRN'] = Soil_Water.draine(SWC=self.soil['SWC'],
FC=self.sw_state['FC'],
DRNp=self.soil['DRNp'])
if self.sw_state['POTINF'] > 0.0:
self.sw_state['ROF'] = Soil_Water.runoff(POTINF=self.sw_state['POTINF'],
CN=self.soil['CN'])
self.sw_state['INF'] = self.sw_state['POTINF'] - self.sw_state['ROF']
else:
self.sw_state['ROF'] = 0.0
self.sw_state['INF'] = 0.0
# Pot. evapotranspiration (ETp), soil evaporation (ESp) and plant transpiration (EPp)
self.sw_state['ETp'] = Soil_Water.ETpS(SRAD=self.weather.srad[n],
TMAX=self.weather.tmax[n],
TMIN=self.weather.tmin[n],
LAI=self.plant['lai'])
self.sw_state['ESp'] = self.sw_state['ETp'] * np.exp(-0.7 * self.plant['lai'])
self.sw_state['EPp'] = self.sw_state['ETp'] * (1 - np.exp(-0.7 * self.plant['lai']))
# Actual soil evaporation (ESa), plant transpiration (EPa)
self.sw_state['ESa'] = Soil_Water.ESaS(ESp=self.sw_state['ESp'],
SWC=self.soil['SWC'],
FC=self.sw_state['FC'],
WP=self.sw_state['WP'])
self.sw_state['EPa'] = self.sw_state['EPp'] * np.min([self.sw_state['SWFAC1'],
self.sw_state['SWFAC2']])
return
def plant_rate(self, n):
"""
Info here
"""
self.plant['TMN'] = np.mean([self.weather['tmax'][n], self.weather['tmin'][n]])
self.plant['PT'] = PlantMethods.PTS(TMAX=self.weather['tmax'][n],
TMIN=self.weather['tmin'][n])
self.plant['Pg'] = PlantMethods.PGS(SWFAC1=self.sw_state['SWFAC1'],
SWFAC2=self.sw_state['SWFAC2'],
PAR=self.weather['par'][n],
PD=self.plant['PD'],
PT=self.plant['PT'],
LAI=self.plant['lai'])
if self.plant['n'] < self.plant['Lfmax']: # In the vegetative phase
self.plant['FL'] = 1.0
self.plant['E'] = 1.0
self.plant['dN'] = self.plant['rm'] * self.plant['PT']
self.plant['dLAI'] = PlantMethods.lais(FL=self.plant['FL'], di=self.plant['di'],
PD=self.plant['PD'], EMP1=self.plant['EMP1'],
EMP2=self.plant['EMP2'], N=self.plant['n'],
nb=self.plant['nb'], SWFAC1=self.sw_state['SWFAC1'],
SWFAC2=self.sw_state['SWFAC2'], PT=self.plant['PT'],
dN=self.plant['dN'], p1=self.plant['p1'],
sla=self.plant['sla'])
self.plant['dw'] = self.plant['E'] * (self.plant['Pg']) * self.plant['PD']
self.plant['dwc'] = self.plant['fc'] * self.plant['dw']
self.plant['dwr'] = (1 - self.plant['fc']) * self.plant['dw']
self.plant['dwf'] = 0.0
else: # In the reproductive plant phase...
self.plant['FL'] = 2.0
if (self.plant['TMN'] >= self.plant['tb']) and (self.plant['TMN'] <= 25.):
self.plant['di'] = (self.plant['TMN'] - self.plant['tb'])
else:
self.plant['di'] = 0.0
self.plant['int'] += self.plant['di']
self.plant['E'] = 1.0
self.plant['dLAI'] = PlantMethods.lais(FL=self.plant['FL'], di=self.plant['di'],
PD=self.plant['PD'], EMP1=self.plant['EMP1'],
EMP2=self.plant['EMP2'], N=self.plant['n'],
nb=self.plant['nb'], SWFAC1=self.sw_state['SWFAC1'],
SWFAC2=self.sw_state['SWFAC2'], PT=self.plant['PT'],
dN=self.plant['dN'], p1=self.plant['p1'],
sla=self.plant['sla'])
self.plant['dw'] = self.plant['E'] * (self.plant['Pg']) * self.plant['PD']
self.plant['dwf'] = self.plant['dw']
self.plant['dwc'] = 0.0
self.plant['dwr'] = 0.0
self.plant['dN'] = 0.0
return
def sw_integrate(self):
"""
Info
"""
self.soil['SWC'] += (self.sw_state['INF'] - self.sw_state['ESa'] -
self.sw_state['EPa'] - self.sw_state['DRN'])
if self.soil['SWC'] > self.sw_state['ST']: # If wtr content > storage capacity
self.sw_state['ROF'] += self.soil['SWC'] + self.sw_state['ST'] # then make runoff
self.sw_state['TINF'] += self.sw_state['INF']
self.sw_state['TESA'] += self.sw_state['ESa']
self.sw_state['TEPA'] += self.sw_state['EPa']
self.sw_state['TDRN'] += self.sw_state['DRN']
self.sw_state['TROF'] += self.sw_state['ROF']
self.sw_state['SWFAC1'], self.sw_state['SWFAC2'] = Soil_Water.stress(
SWC=self.soil['SWC'], DP=self.soil['DP'],
FC=self.sw_state['FC'], ST=self.sw_state['ST'],
WP=self.sw_state['WP'], THE=self.sw_state['THE'])
return
def plant_integrate(self, doy):
self.plant['lai'] += self.plant['dLAI']
self.plant['w'] += self.plant['dw']
self.plant['wc'] += self.plant['dwc']
self.plant['wr'] += self.plant['dwr']
self.plant['wf'] += self.plant['dwf']
self.plant['lai'] = np.max(self.plant['lai'], 0.0)
self.plant['w'] = np.max(self.plant['w'], 0.0)
self.plant['wc'] = np.max(self.plant['wc'], 0.0)
self.plant['wr'] = np.max(self.plant['wr'], 0.0)
self.plant['wf'] = np.max(self.plant['wf'], 0.0)
self.plant['n'] += self.plant['dN']
if self.plant['int'] > self.plant['intot']:
self.status['endsim'] = True
print('The crop matured on day', doy)
return
def sw_output(self, n, doy, last_df):
"""
In charge of output for SW data, creates or updates as DF object.
"""
if n in self.irrigation.index: # If there is irrigation data get it
irrval = self.irrigation.irr[n]
else: # else set to 0
irrval = 0
tmpdat = [[doy,
self.weather.srad[n],
self.weather.tmax[n],
self.weather.tmin[n],
self.weather.rain[n],
irrval,
self.sw_state['ROF'],
self.sw_state['INF'],
self.sw_state['DRN'],
self.sw_state['ETp'],
self.sw_state['ESa'],
self.sw_state['EPa'],
self.soil['SWC'],
(self.soil['SWC'] / self.soil['DP']),
self.sw_state['SWFAC1'],
self.sw_state['SWFAC2']
]]
colnames = ['DOY', 'SRAD', 'TMAX', 'TMIN', 'RAIN', 'IRR', 'ROF',
'INF', 'DRN', 'ETP', 'ESa', 'EPa', 'SWC', 'SWC/DP', 'SWFAC1', 'SWFAC2']
if self.status['initial']: # If this is the first run then make the dataframe
return pd.DataFrame(data=tmpdat, index=[n], columns=colnames)
else: # if it is not the first run, then update the dataframe
dfupdate = pd.DataFrame(data=tmpdat, index=[n], columns=colnames)
return last_df.append(dfupdate)
def plant_output(self, doy, n, last_df):
"""
Plant output subroutine
"""
tmpdat = [[
doy,
self.plant['n'],
self.plant['int'],
self.plant['w'],
self.plant['wc'],
self.plant['wr'],
self.plant['wf'],
self.plant['lai']
]]
colnames = ['DOY', 'N', 'INT', 'W', 'Wc', 'Wr', 'Wf', 'LAI']
if self.status['initial']: # If this is the first run then make the dataframe
return pd.DataFrame(data=tmpdat, index=[n], columns=colnames)
else: # if it is not the first run, then update the dataframe
dfupdate = pd.DataFrame(data=tmpdat, index=[n], columns=colnames)
return last_df.append(dfupdate)
def run_model(self):
"""
Running rate and integration of Soil Water and Plant
DOYP - the date of planting has been added to the self.plant['doyp']
"""
# note below, diffrent years give diffrent maturation date [subscript to new year]
# alter the input in some way so I can feed it one year at a time only...
for n in np.sort(self.weather.index): # for each time-step (n)
if not self.status['endsim']:
# print(n,self.status['endsim'])
doy = n.timetuple()[7] # get the DOY
# ---- Rate calculations ---
Model.soil_rate(self, n=n)
# if doy > doyp: # Calc plant if doy is after a planting doy
if doy > self.plant['doyp']:
Model.plant_rate(self, n=n)
# --- Integration step --- (i.e. update state dictionaries each time step)
Model.sw_integrate(self, )
if doy > self.plant['doyp']:
Model.plant_integrate(self, doy=doy)
# --- Output stage ---
if self.status['initial']:
sw_out = Model.sw_output(self, n=n, doy=doy, last_df=None)
p_out = Model.plant_output(self, n=n, doy=doy, last_df=None)
self.status['initial'] = False
else:
sw_out = Model.sw_output(self, n=n, doy=doy, last_df=sw_out)
p_out = Model.plant_output(self, n=n, doy=doy, last_df=p_out)
print('Simulation finished')
return sw_out, p_out
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_kusto_pool_request(
workspace_name: str,
kusto_pool_name: str,
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations')
path_format_arguments = {
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"kustoPoolName": _SERIALIZER.url("kusto_pool_name", kusto_pool_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
workspace_name: str,
kusto_pool_name: str,
attached_database_configuration_name: str,
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations/{attachedDatabaseConfigurationName}')
path_format_arguments = {
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"kustoPoolName": _SERIALIZER.url("kusto_pool_name", kusto_pool_name, 'str'),
"attachedDatabaseConfigurationName": _SERIALIZER.url("attached_database_configuration_name", attached_database_configuration_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
workspace_name: str,
kusto_pool_name: str,
attached_database_configuration_name: str,
subscription_id: str,
resource_group_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations/{attachedDatabaseConfigurationName}')
path_format_arguments = {
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"kustoPoolName": _SERIALIZER.url("kusto_pool_name", kusto_pool_name, 'str'),
"attachedDatabaseConfigurationName": _SERIALIZER.url("attached_database_configuration_name", attached_database_configuration_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
workspace_name: str,
kusto_pool_name: str,
attached_database_configuration_name: str,
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations/{attachedDatabaseConfigurationName}')
path_format_arguments = {
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"kustoPoolName": _SERIALIZER.url("kusto_pool_name", kusto_pool_name, 'str'),
"attachedDatabaseConfigurationName": _SERIALIZER.url("attached_database_configuration_name", attached_database_configuration_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class KustoPoolAttachedDatabaseConfigurationsOperations(object):
"""KustoPoolAttachedDatabaseConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_kusto_pool(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.AttachedDatabaseConfigurationListResult"]:
"""Returns the list of attached database configurations of the given Kusto Pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AttachedDatabaseConfigurationListResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.synapse.models.AttachedDatabaseConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AttachedDatabaseConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_kusto_pool_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_kusto_pool.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_kusto_pool_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AttachedDatabaseConfigurationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_kusto_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations'} # type: ignore
@distributed_trace
def get(
self,
workspace_name: str,
kusto_pool_name: str,
attached_database_configuration_name: str,
resource_group_name: str,
**kwargs: Any
) -> "_models.AttachedDatabaseConfiguration":
"""Returns an attached database configuration.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param attached_database_configuration_name: The name of the attached database configuration.
:type attached_database_configuration_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AttachedDatabaseConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.AttachedDatabaseConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AttachedDatabaseConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
attached_database_configuration_name=attached_database_configuration_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AttachedDatabaseConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations/{attachedDatabaseConfigurationName}'} # type: ignore
def _create_or_update_initial(
self,
workspace_name: str,
kusto_pool_name: str,
attached_database_configuration_name: str,
resource_group_name: str,
parameters: "_models.AttachedDatabaseConfiguration",
**kwargs: Any
) -> "_models.AttachedDatabaseConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AttachedDatabaseConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AttachedDatabaseConfiguration')
request = build_create_or_update_request_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
attached_database_configuration_name=attached_database_configuration_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AttachedDatabaseConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AttachedDatabaseConfiguration', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AttachedDatabaseConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations/{attachedDatabaseConfigurationName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
workspace_name: str,
kusto_pool_name: str,
attached_database_configuration_name: str,
resource_group_name: str,
parameters: "_models.AttachedDatabaseConfiguration",
**kwargs: Any
) -> LROPoller["_models.AttachedDatabaseConfiguration"]:
"""Creates or updates an attached database configuration.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param attached_database_configuration_name: The name of the attached database configuration.
:type attached_database_configuration_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param parameters: The database parameters supplied to the CreateOrUpdate operation.
:type parameters: ~azure.mgmt.synapse.models.AttachedDatabaseConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AttachedDatabaseConfiguration or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.synapse.models.AttachedDatabaseConfiguration]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AttachedDatabaseConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
attached_database_configuration_name=attached_database_configuration_name,
resource_group_name=resource_group_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AttachedDatabaseConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations/{attachedDatabaseConfigurationName}'} # type: ignore
def _delete_initial(
self,
workspace_name: str,
kusto_pool_name: str,
attached_database_configuration_name: str,
resource_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
attached_database_configuration_name=attached_database_configuration_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations/{attachedDatabaseConfigurationName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
workspace_name: str,
kusto_pool_name: str,
attached_database_configuration_name: str,
resource_group_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes the attached database configuration with the given name.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param attached_database_configuration_name: The name of the attached database configuration.
:type attached_database_configuration_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
attached_database_configuration_name=attached_database_configuration_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/attachedDatabaseConfigurations/{attachedDatabaseConfigurationName}'} # type: ignore
|
|
# coding=utf-8
'''
xiami music provider.
'''
import json
import logging
import urllib
import urllib2
from replay import h
logger = logging.getLogger('listenone.' + __name__)
# https://github.com/Flowerowl/xiami
def caesar(location):
num = int(location[0])
avg_len = int(len(location[1:]) / num)
remainder = int(len(location[1:]) % num)
result = [
location[i * (avg_len + 1) + 1: (i + 1) * (avg_len + 1) + 1]
for i in range(remainder)]
result.extend(
[
location[(avg_len + 1) * remainder:]
[i * avg_len + 1: (i + 1) * avg_len + 1]
for i in range(num - remainder)])
url = urllib.unquote(
''.join([
''.join([result[j][i] for j in range(num)])
for i in range(avg_len)
]) +
''.join([result[r][-1] for r in range(remainder)])).replace('^', '0')
return url
def filetype():
return '.mp3'
def _xm_h(url, v=None):
'''
http request
'''
extra_headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'api.xiami.com',
'Referer': 'http://m.xiami.com/',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2)' +
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome' +
'/33.0.1750.152 Safari/537.36',
}
return h(url, v=v, extra_headers=extra_headers)
def _gen_url_params(d):
for k, v in d.iteritems():
d[k] = unicode(v).encode('utf-8')
return urllib.urlencode(d)
def _convert_song(song):
d = {
'id': 'xmtrack_' + str(song['song_id']),
'title': song['song_name'],
'artist': song['artist_name'],
'artist_id': 'xmartist_' + str(song['artist_id']),
'album': song['album_name'],
'album_id': 'xmalbum_' + str(song['album_id']),
'source': 'xiami',
'source_url': 'http://www.xiami.com/song/' + str(song['song_id']),
}
if 'logo' in song:
d['img_url'] = song['logo']
else:
d['img_url'] = ''
params = _gen_url_params(d)
d['url'] = '/track_file?' + params
return d
def _retina_url(s):
return s[:-6] + s[-4:]
# -------------standard interface part------------------
def search_track(keyword):
'''
return matched qq music songs
'''
keyword = urllib2.quote(keyword.encode("utf8"))
search_url = 'http://api.xiami.com/web?v=2.0&app_key=1&key=' + keyword \
+ '&page=1&limit=50&_ksTS=1459930568781_153&callback=jsonp154' + \
'&r=search/songs'
response = _xm_h(search_url)
json_string = response[len('jsonp154('):-len(')')]
data = json.loads(json_string)
result = []
for song in data['data']["songs"]:
result.append(_convert_song(song))
return result
def list_playlist():
url = 'http://api.xiami.com/web?v=2.0&app_key=1&_ksTS=1459927525542_91' + \
'&page=1&limit=60&callback=jsonp92&r=collect/recommend'
resonpse = _xm_h(url)
data = json.loads(resonpse[len('jsonp92('):-len(')')])
result = []
for l in data['data']:
d = dict(
cover_img_url=l['logo'],
title=l['collect_name'],
play_count=0,
list_id='xmplaylist_' + str(l['list_id']),)
result.append(d)
return result
def get_playlist(playlist_id):
url = 'http://api.xiami.com/web?v=2.0&app_key=1&id=%s' % playlist_id + \
'&_ksTS=1459928471147_121&callback=jsonp122&r=collect/detail'
resonpse = _xm_h(url)
data = json.loads(resonpse[len('jsonp122('):-len(')')])
info = dict(
cover_img_url=_retina_url(data['data']['logo']),
title=data['data']['collect_name'],
id='xmplaylist_' + playlist_id)
result = []
for song in data['data']['songs']:
result.append(_convert_song(song))
return dict(tracks=result, info=info)
def get_artist(artist_id):
url = 'http://api.xiami.com/web?v=2.0&app_key=1&id=%s' % str(artist_id) + \
'&page=1&limit=20&_ksTS=1459931285956_216' + \
'&callback=jsonp217&r=artist/detail'
resonpse = _xm_h(url)
data = json.loads(resonpse[len('jsonp217('):-len(')')])
artist_name = data['data']['artist_name']
info = dict(
cover_img_url=_retina_url(data['data']['logo']),
title=artist_name,
id='xmartist_' + artist_id)
url = 'http://api.xiami.com/web?v=2.0&app_key=1&id=%s' % str(artist_id) + \
'&page=1&limit=20&_ksTS=1459931285956_216' + \
'&callback=jsonp217&r=artist/hot-songs'
resonpse = _xm_h(url)
data = json.loads(resonpse[len('jsonp217('):-len(')')])
result = []
for song in data['data']:
d = {
'id': 'xmtrack_' + str(song['song_id']),
'title': song['song_name'],
'artist': artist_name,
'artist_id': 'xmartist_' + artist_id,
'album': '',
'album_id': '',
'img_url': '',
'source': 'xiami',
'source_url': 'http://www.xiami.com/song/' + str(song['song_id']),
}
params = _gen_url_params(d)
d['url'] = '/track_file?' + params
result.append(d)
return dict(tracks=result, info=info)
def get_album(album_id):
url = 'http://api.xiami.com/web?v=2.0&app_key=1&id=%s' % str(album_id) + \
'&page=1&limit=20&_ksTS=1459931285956_216' + \
'&callback=jsonp217&r=album/detail'
resonpse = _xm_h(url)
data = json.loads(resonpse[len('jsonp217('):-len(')')])
artist_name = data['data']['artist_name']
info = dict(
cover_img_url=_retina_url(data['data']['album_logo']),
title=data['data']['album_name'],
id='xmalbum_' + album_id)
result = []
for song in data['data']['songs']:
d = {
'id': 'xmtrack_' + str(song['song_id']),
'title': song['song_name'],
'artist': artist_name,
'artist_id': 'xmartist_' + str(song['artist_id']),
'album': song['album_name'],
'album_id': 'xmalbum_' + str(song['album_id']),
'img_url': song['album_logo'],
'source': 'xiami',
'source_url': 'http://www.xiami.com/song/' + str(song['song_id']),
}
params = _gen_url_params(d)
d['url'] = '/track_file?' + params
result.append(d)
return dict(tracks=result, info=info)
def get_url_by_id(song_id):
url = 'http://www.xiami.com/song/playlist/id/%s' % song_id + \
'/object_name/default/object_id/0/cat/json'
response = h(url)
secret = json.loads(response)['data']['trackList'][0]['location']
url = caesar(secret)
return url
|
|
#!/usr/bin/env python
import unittest, random
from common import *
from ctypes import *
def addMiRange(mi, r1, r2):
ibd.Mi_AddValidRange(mi, c_long(r1), c_long(r2))
newMii = ibd.Mii_New
newMii.restype = ctypes.c_void_p
delMii = ibd.Mii_Delete
newMiri = ibd.Miri_New
newMiri.restype = ctypes.c_void_p
delMiri = ibd.Miri_Delete
isValid = ibd.Mi_IsValid
Mii_Next = ibd.Mii_Next
MiCopy = ibd.Mi_Copy
MiCopy.restype = ctypes.c_void_p
class MarkerRange(Structure):
_fields_ = [("start", c_long),
("end", c_long)]
Miri_Next = ibd.Miri_Next
Mi_Complement = ibd.Mi_Complement
Mi_Complement.restype = ctypes.c_void_p
Mi_Union = ibd.Mi_Union
Mi_Union.restype = ctypes.c_void_p
Mi_Intersection = ibd.Mi_Intersection
Mi_Intersection.restype = ctypes.c_void_p
Mi_Difference = ibd.Mi_Difference
Mi_Difference.restype = ctypes.c_void_p
Mi_SymmetricDifference = ibd.Mi_SymmetricDifference
Mi_SymmetricDifference.restype = ctypes.c_void_p
ibd.Mr_Plus_Infinity.restype = ctypes.c_long
ibd.Mr_Minus_Infinity.restype = ctypes.c_long
ibd.Mr_Start.restype = ctypes.c_long
ibd.Mr_End.restype = ctypes.c_long
mr_plus_inf = ibd.Mr_Plus_Infinity()
mr_minus_inf = ibd.Mr_Minus_Infinity()
def delMi(*mi_list):
for mi in mi_list:
ibd.O_DecRef(mi)
def newMi(r1, r2, *args):
ibd.Mi_New.restype = ctypes.c_void_p
mi = ibd.Mi_New(c_long(r1), c_long(r2))
for r1, r2 in zip(args[::2], args[1::2]):
addMiRange(mi,r1,r2)
mi_copy = MiCopy(mi)
assert ibd.Mi_Equal(mi, mi_copy)
ibd.O_DecRef(mi_copy)
assert r1 >= r2 or ibd.Mi_ValidAnywhere(mi)
return mi
def checkRanges(mi, valid_set, invalid_set):
wrongly_invalid = [m for m in valid_set if not isValid(mi, m)]
wrongly_valid = [m for m in invalid_set if isValid(mi, m)]
#print "valid_set = ", valid_set
#print "wrongly_invalid = ", wrongly_invalid
#print "invalid_set = ", invalid_set
#print "wrongly_valid = ", wrongly_valid
assert len(wrongly_valid) == 0 and len(wrongly_invalid) == 0,\
"Wrongly Valid: %s; Wrongly Invalid: %s" \
% (",".join(str(m) for m in sorted(wrongly_valid)),
",".join(str(m) for m in sorted(wrongly_invalid)))
def checkSetOperation(operation, mi1, mi2, test_range = None):
if test_range is None:
test_range = [mr_minus_inf] + range(-50, 50) + [mr_plus_inf]
def validSet(mi):
return set( m for m in test_range if isValid(mi, c_long(m)) )
s_1 = validSet(mi1)
s_2 = validSet(mi2)
# Check copying is okay
mi1_copy = MiCopy(mi1)
assert s_1 == validSet(mi1_copy)
assert ibd.Mi_Equal(mi1, mi1_copy)
ibd.O_DecRef(mi1_copy)
mi2_copy = MiCopy(mi2)
assert s_2 == validSet(mi2_copy)
assert ibd.Mi_Equal(mi2, mi2_copy)
ibd.O_DecRef(mi2_copy)
if operation == "union":
mi_f = Mi_Union
s_f = set.union
elif operation == "intersection":
mi_f = Mi_Intersection
s_f = set.intersection
elif operation == "difference":
mi_f = Mi_Difference
s_f = set.difference
elif operation == "symmetricDifference":
mi_f = Mi_SymmetricDifference
s_f = set.symmetric_difference
mi_u = mi_f(mi1, mi2)
test_set = validSet(mi_u)
correct_set = s_f(s_1, s_2)
if test_set != correct_set:
print "FAIL Report (operation %s):" % operation
print "mi1 = ",
ibd.Mi_debug_printMi(mi1);
print "\n s1 = ", sorted(s_1),
print "\nmi2 = ",
ibd.Mi_debug_printMi(mi2);
print "\n s2 = ", sorted(s_2),
print "\nmi_c = ",
ibd.Mi_debug_printMi(mi_u);
print "\n s_c = ", sorted(correct_set),
print ""
assert test_set == correct_set, \
("Op %s: In correct, not test: %s; in test, not correct: %s"
% (operation,
",".join(str(m) for m in sorted(correct_set - test_set)),
",".join(str(m) for m in sorted(test_set - correct_set))))
# Assume sets are disposable
delMi(mi1, mi2, mi_u)
# Some set operations
def miSet_01_overlap():
return (newMi(0,5), newMi(3,8))
def miSet_02_nonoverlap():
return (newMi(0,3), newMi(5,8))
def miSet_03_multiple():
return (newMi(0,3, 5,12), newMi(2,7))
def miSet_04_multiple_disjoint():
return (newMi(0,3, 8,12), newMi(5,7))
def miSet_05_multiple():
return (newMi(0,3, 8,12), newMi(5,7))
def miSet_06_large(offset):
mi1 = newMi(2,5)
mi2 = newMi(2+offset,5+offset)
for i in xrange(-10,10):
addMiRange(mi1, i*10 + 2, i*10 + 5)
addMiRange(mi2, i*10 + 2 + offset, i*10 + 5 + offset)
return (mi1, mi2)
def miSet_07_large(r, n):
random.seed(n + r[0] + r[1])
def get():
s = set()
mi = newMi(0,0)
for i in range(n):
a = random.randint(*r)
b = random.randint(*r)
if a > b:
a, b = b, a
for x in range(a,b):
s.add(x)
addMiRange(mi, a, b)
for i in range(*r):
if i in s:
if not ibd.Mi_IsValid(mi, c_long(i)):
print "marker range = "
ibd.Mi_debug_printMi(mi)
print ""
print ("After adding [%d, %d), %d is not valid"
% (a, b, i) )
raise AssertionError
return mi
return (get(), get())
class TestMarkers(unittest.TestCase):
def checkRanges(self, mi, okay, bad):
for m in okay:
v = ibd.Mi_IsValid(mi, c_long(m))
self.assert_(v != 0, "%d should be valid." % m)
for m in bad:
v = ibd.Mi_IsValid(mi, c_long(m))
self.assert_(v == 0, "%d should not be valid." % m)
def test01Simple(self):
mi = newMi(5, 10)
self.checkRanges(mi, okay=[5,6,7,8,9], bad=[3,4,10,11])
delMi(mi)
def test02_TwoDistinctRanges(self):
mi = newMi(5, 7, 8, 10)
self.checkRanges(mi, okay=[5,6,8,9], bad=[3,4,7,10,11])
delMi(mi)
def test03_ManyDistinctRanges(self):
mi = newMi(0,2)
for i in range(1, 10):
addMiRange(mi, 10*i, 10*i+2)
okay_base = [0,1]
bad_base = [2,3,4,5,6,7,8,9]
for i in range(10):
self.checkRanges(mi,
okay=[10*i + a for a in okay_base],
bad = [10*i + a for a in bad_base])
delMi(mi)
def getNewMi(self, s, e, edge_range_category):
mi = newMi(s,e)
if 'b' in edge_range_category:
addMiRange(mi, -10, -5)
if 'a' in edge_range_category:
addMiRange(mi, 10, 15)
if 'B' in edge_range_category:
for i in range(1, 10):
addMiRange(mi, -10*i, -10*i+5)
if 'A' in edge_range_category:
for i in range(1, 10):
addMiRange(mi, 10*i, 10*i+5)
return mi
def check_ExtendForward_01(self, edge_range_category):
mi = self.getNewMi(0,2,edge_range_category)
self.checkRanges(mi, okay=[0,1], bad = [-1,2,3])
addMiRange(mi, 1, 3)
self.checkRanges(mi, okay=[0,1,2], bad = [-1,3])
delMi(mi)
def test04_ExtendForward_01 (self): self.check_ExtendForward_01('')
def test04_ExtendForward_01a (self): self.check_ExtendForward_01('a')
def test04_ExtendForward_01b (self): self.check_ExtendForward_01('b')
def test04_ExtendForward_01ab(self): self.check_ExtendForward_01('ab')
def test04_ExtendForward_01A (self): self.check_ExtendForward_01('A')
def test04_ExtendForward_01Ab(self): self.check_ExtendForward_01('Ab')
def test04_ExtendForward_01B (self): self.check_ExtendForward_01('B')
def test04_ExtendForward_01aB(self): self.check_ExtendForward_01('aB')
def test04_ExtendForward_01AB(self): self.check_ExtendForward_01('AB')
def check_ExtendForward_02(self, edge_range_category):
mi = self.getNewMi(0,2,edge_range_category)
self.checkRanges(mi, okay=[0,1], bad = [-1,2,3])
addMiRange(mi, 2, 3) # corner case w/ 2
self.checkRanges(mi, okay=[0,1,2], bad = [-1,3])
delMi(mi)
def test04_ExtendForward_02 (self): self.check_ExtendForward_02('')
def test04_ExtendForward_02a (self): self.check_ExtendForward_02('a')
def test04_ExtendForward_02b (self): self.check_ExtendForward_02('b')
def test04_ExtendForward_02ab(self): self.check_ExtendForward_02('ab')
def test04_ExtendForward_02A (self): self.check_ExtendForward_02('A')
def test04_ExtendForward_02Ab(self): self.check_ExtendForward_02('Ab')
def test04_ExtendForward_02B (self): self.check_ExtendForward_02('B')
def test04_ExtendForward_02aB(self): self.check_ExtendForward_02('aB')
def test04_ExtendForward_02AB(self): self.check_ExtendForward_02('AB')
def check_ExtendForward_03(self, edge_range_category):
mi = self.getNewMi(0,2,edge_range_category)
self.checkRanges(mi, okay=[0,1], bad = [-1,2,3])
addMiRange(mi, 0, 3) # corner case w/ 0
self.checkRanges(mi, okay=[0,1,2], bad = [-1,3])
delMi(mi)
def test04_ExtendForward_03 (self): self.check_ExtendForward_03('')
def test04_ExtendForward_03a (self): self.check_ExtendForward_03('a')
def test04_ExtendForward_03b (self): self.check_ExtendForward_03('b')
def test04_ExtendForward_03ab(self): self.check_ExtendForward_03('ab')
def test04_ExtendForward_03A (self): self.check_ExtendForward_03('A')
def test04_ExtendForward_03Ab(self): self.check_ExtendForward_03('Ab')
def test04_ExtendForward_03B (self): self.check_ExtendForward_03('B')
def test04_ExtendForward_03aB(self): self.check_ExtendForward_03('aB')
def test04_ExtendForward_03AB(self): self.check_ExtendForward_03('AB')
def check_Combine_01(self, edge_range_category):
mi = self.getNewMi(0,2,edge_range_category)
addMiRange(mi, 3, 5)
self.checkRanges(mi, okay=[0,1,3,4], bad = [-1,2,6])
addMiRange(mi, 1, 4)
self.checkRanges(mi, okay=[0,1,2,3,4], bad = [-1,5])
delMi(mi)
def test05_Combine_01 (self): self.check_Combine_01('')
def test05_Combine_01a (self): self.check_Combine_01('a')
def test05_Combine_01b (self): self.check_Combine_01('b')
def test05_Combine_01ab(self): self.check_Combine_01('ab')
def test05_Combine_01A (self): self.check_Combine_01('A')
def test05_Combine_01Ab(self): self.check_Combine_01('Ab')
def test05_Combine_01B (self): self.check_Combine_01('B')
def test05_Combine_01aB(self): self.check_Combine_01('aB')
def test05_Combine_01AB(self): self.check_Combine_01('AB')
def check_Combine_02(self, edge_range_category):
mi = self.getNewMi(0,2,edge_range_category)
addMiRange(mi, 3, 5)
self.checkRanges(mi, okay=[0,1,3,4], bad = [-1,2,6])
addMiRange(mi, 2, 3) # Corner case
self.checkRanges(mi, okay=[0,1,2,3,4], bad = [-1,5])
delMi(mi)
def test05_Combine_02 (self): self.check_Combine_02('')
def test05_Combine_02a (self): self.check_Combine_02('a')
def test05_Combine_02b (self): self.check_Combine_02('b')
def test05_Combine_02ab(self): self.check_Combine_02('ab')
def test05_Combine_02A (self): self.check_Combine_02('A')
def test05_Combine_02Ab(self): self.check_Combine_02('Ab')
def test05_Combine_02B (self): self.check_Combine_02('B')
def test05_Combine_02aB(self): self.check_Combine_02('aB')
def test05_Combine_02AB(self): self.check_Combine_02('AB')
def check_Combine_03(self, edge_range_category):
mi = self.getNewMi(0,2,edge_range_category)
addMiRange(mi, 3, 5)
self.checkRanges(mi, okay=[0,1,3,4], bad = [-1,2,6])
addMiRange(mi, 0, 5) # Corner case
self.checkRanges(mi, okay=[0,1,2,3,4], bad = [-1,5])
delMi(mi)
def test05_Combine_03 (self): self.check_Combine_03('')
def test05_Combine_03a (self): self.check_Combine_03('a')
def test05_Combine_03b (self): self.check_Combine_03('b')
def test05_Combine_03ab(self): self.check_Combine_03('ab')
def test05_Combine_03A (self): self.check_Combine_03('A')
def test05_Combine_03Ab(self): self.check_Combine_03('Ab')
def test05_Combine_03B (self): self.check_Combine_03('B')
def test05_Combine_03aB(self): self.check_Combine_03('aB')
def test05_Combine_03AB(self): self.check_Combine_03('AB')
def check_Combine_04(self, edge_range_category):
mi = self.getNewMi(0,2,edge_range_category)
addMiRange(mi, 3, 5)
self.checkRanges(mi, okay=[0,1,3,4], bad = [-1,2,6])
addMiRange(mi, 0, 3) # Corner case
self.checkRanges(mi, okay=[0,1,2,3,4], bad = [-1,5])
delMi(mi)
def test05_Combine_04 (self): self.check_Combine_04('')
def test05_Combine_04a (self): self.check_Combine_04('a')
def test05_Combine_04b (self): self.check_Combine_04('b')
def test05_Combine_04ab(self): self.check_Combine_04('ab')
def test05_Combine_04A (self): self.check_Combine_04('A')
def test05_Combine_04Ab(self): self.check_Combine_04('Ab')
def test05_Combine_04B (self): self.check_Combine_04('B')
def test05_Combine_04aB(self): self.check_Combine_04('aB')
def test05_Combine_04AB(self): self.check_Combine_04('AB')
def check_Combine_05(self, edge_range_category):
mi = self.getNewMi(0,2,edge_range_category)
addMiRange(mi, 3, 5)
self.checkRanges(mi, okay=[0,1,3,4], bad = [-1,2,6])
addMiRange(mi, 2, 5) # Corner case
self.checkRanges(mi, okay=[0,1,2,3,4], bad = [-1,5])
delMi(mi)
def test05_Combine_05 (self): self.check_Combine_05('')
def test05_Combine_05a (self): self.check_Combine_05('a')
def test05_Combine_05b (self): self.check_Combine_05('b')
def test05_Combine_05ab(self): self.check_Combine_05('ab')
def test05_Combine_05A (self): self.check_Combine_05('A')
def test05_Combine_05Ab(self): self.check_Combine_05('Ab')
def test05_Combine_05B (self): self.check_Combine_05('B')
def test05_Combine_05aB(self): self.check_Combine_05('aB')
def test05_Combine_05AB(self): self.check_Combine_05('AB')
def check_ExtendBack_01(self, edge_range_category):
mi = self.getNewMi(1,3,edge_range_category)
self.checkRanges(mi, okay=[1,2], bad = [-1,0,3])
addMiRange(mi, 0, 2)
self.checkRanges(mi, okay=[0,1,2], bad = [-1,3])
delMi(mi)
def test06_ExtendBack_01 (self): self.check_ExtendBack_01('')
def test06_ExtendBack_01a (self): self.check_ExtendBack_01('a')
def test06_ExtendBack_01b (self): self.check_ExtendBack_01('b')
def test06_ExtendBack_01ab(self): self.check_ExtendBack_01('ab')
def test06_ExtendBack_01A (self): self.check_ExtendBack_01('A')
def test06_ExtendBack_01Ab(self): self.check_ExtendBack_01('Ab')
def test06_ExtendBack_01B (self): self.check_ExtendBack_01('B')
def test06_ExtendBack_01aB(self): self.check_ExtendBack_01('aB')
def test06_ExtendBack_01AB(self): self.check_ExtendBack_01('AB')
def check_ExtendBack_02(self, edge_range_category):
mi = self.getNewMi(1,3,edge_range_category)
self.checkRanges(mi, okay=[1,2], bad = [-1,0,3])
addMiRange(mi, 0, 1) # corner case w/ 2
self.checkRanges(mi, okay=[0,1,2], bad = [-1,3])
delMi(mi)
def test06_ExtendBack_02 (self): self.check_ExtendBack_02('')
def test06_ExtendBack_02a (self): self.check_ExtendBack_02('a')
def test06_ExtendBack_02b (self): self.check_ExtendBack_02('b')
def test06_ExtendBack_02ab(self): self.check_ExtendBack_02('ab')
def test06_ExtendBack_02A (self): self.check_ExtendBack_02('A')
def test06_ExtendBack_02Ab(self): self.check_ExtendBack_02('Ab')
def test06_ExtendBack_02B (self): self.check_ExtendBack_02('B')
def test06_ExtendBack_02aB(self): self.check_ExtendBack_02('aB')
def test06_ExtendBack_02AB(self): self.check_ExtendBack_02('AB')
def check_ExtendBack_03(self, edge_range_category):
mi = self.getNewMi(1,3,edge_range_category)
self.checkRanges(mi, okay=[1,2], bad = [-1,0,3])
addMiRange(mi, 0, 3) # corner case w/ 0
self.checkRanges(mi, okay=[0,1,2], bad = [-1,3])
delMi(mi)
def test06_ExtendBack_03 (self): self.check_ExtendBack_03('')
def test06_ExtendBack_03a (self): self.check_ExtendBack_03('a')
def test06_ExtendBack_03b (self): self.check_ExtendBack_03('b')
def test06_ExtendBack_03ab(self): self.check_ExtendBack_03('ab')
def test06_ExtendBack_03A (self): self.check_ExtendBack_03('A')
def test06_ExtendBack_03Ab(self): self.check_ExtendBack_03('Ab')
def test06_ExtendBack_03B (self): self.check_ExtendBack_03('B')
def test06_ExtendBack_03aB(self): self.check_ExtendBack_03('aB')
def test06_ExtendBack_03AB(self): self.check_ExtendBack_03('AB')
def check_ExtendBoth_01(self, edge_range_category):
mi = self.getNewMi(1,3,edge_range_category)
self.checkRanges(mi, okay=[1,2], bad = [-1,0,3])
addMiRange(mi, 0, 5) # corner case w/ 0
self.checkRanges(mi, okay=[0,1,2,3,4], bad = [-1,5])
delMi(mi)
def test07_ExtendBoth_01 (self): self.check_ExtendBoth_01('')
def test07_ExtendBoth_01a (self): self.check_ExtendBoth_01('a')
def test07_ExtendBoth_01b (self): self.check_ExtendBoth_01('b')
def test07_ExtendBoth_01ab(self): self.check_ExtendBoth_01('ab')
def test07_ExtendBoth_01A (self): self.check_ExtendBoth_01('A')
def test07_ExtendBoth_01Ab(self): self.check_ExtendBoth_01('Ab')
def test07_ExtendBoth_01B (self): self.check_ExtendBoth_01('B')
def test07_ExtendBoth_01aB(self): self.check_ExtendBoth_01('aB')
def test07_ExtendBoth_01AB(self): self.check_ExtendBoth_01('AB')
def check_ChangeNothing_01(self, edge_range_category):
mi = self.getNewMi(0,4,edge_range_category)
self.checkRanges(mi, okay=[0,1,2,3], bad = [-1,4,5])
addMiRange(mi, 1, 2)
self.checkRanges(mi, okay=[0,1,2,3], bad = [-1,4,5])
delMi(mi)
def test08_ChangeNothing_01 (self): self.check_ChangeNothing_01('')
def test08_ChangeNothing_01a (self): self.check_ChangeNothing_01('a')
def test08_ChangeNothing_01b (self): self.check_ChangeNothing_01('b')
def test08_ChangeNothing_01ab(self): self.check_ChangeNothing_01('ab')
def test08_ChangeNothing_01A (self): self.check_ChangeNothing_01('A')
def test08_ChangeNothing_01Ab(self): self.check_ChangeNothing_01('Ab')
def test08_ChangeNothing_01B (self): self.check_ChangeNothing_01('B')
def test08_ChangeNothing_01aB(self): self.check_ChangeNothing_01('aB')
def test08_ChangeNothing_01AB(self): self.check_ChangeNothing_01('AB')
def check_ChangeNothing_02(self, edge_range_category):
mi = self.getNewMi(0,4,edge_range_category)
self.checkRanges(mi, okay=[0,1,2,3], bad = [-1,4,5])
addMiRange(mi, 0, 2) # corner case w/ 0
self.checkRanges(mi, okay=[0,1,2,3], bad = [-1,4,5])
delMi(mi)
def test08_ChangeNothing_02 (self): self.check_ChangeNothing_02('')
def test08_ChangeNothing_02a (self): self.check_ChangeNothing_02('a')
def test08_ChangeNothing_02b (self): self.check_ChangeNothing_02('b')
def test08_ChangeNothing_02ab(self): self.check_ChangeNothing_02('ab')
def test08_ChangeNothing_02A (self): self.check_ChangeNothing_02('A')
def test08_ChangeNothing_02Ab(self): self.check_ChangeNothing_02('Ab')
def test08_ChangeNothing_02B (self): self.check_ChangeNothing_02('B')
def test08_ChangeNothing_02aB(self): self.check_ChangeNothing_02('aB')
def test08_ChangeNothing_02AB(self): self.check_ChangeNothing_02('AB')
def check_ChangeNothing_03(self, edge_range_category):
mi = self.getNewMi(0,4,edge_range_category)
self.checkRanges(mi, okay=[0,1,2,3], bad = [-1,4,5])
addMiRange(mi, 0, 4) # corner case w/ 0
self.checkRanges(mi, okay=[0,1,2,3], bad = [-1,4,5])
delMi(mi)
def test08_ChangeNothing_03 (self): self.check_ChangeNothing_03('')
def test08_ChangeNothing_03a (self): self.check_ChangeNothing_03('a')
def test08_ChangeNothing_03b (self): self.check_ChangeNothing_03('b')
def test08_ChangeNothing_03ab(self): self.check_ChangeNothing_03('ab')
def test08_ChangeNothing_03A (self): self.check_ChangeNothing_03('A')
def test08_ChangeNothing_03Ab(self): self.check_ChangeNothing_03('Ab')
def test08_ChangeNothing_03B (self): self.check_ChangeNothing_03('B')
def test08_ChangeNothing_03aB(self): self.check_ChangeNothing_03('aB')
def test08_ChangeNothing_03AB(self): self.check_ChangeNothing_03('AB')
def check_ChangeNothing_04(self, edge_range_category):
mi = self.getNewMi(0,4,edge_range_category)
self.checkRanges(mi, okay=[0,1,2,3], bad = [-1,4,5])
addMiRange(mi, 1, 4) # corner case w/ 0
self.checkRanges(mi, okay=[0,1,2,3], bad = [-1,4,5])
delMi(mi)
def test08_ChangeNothing_04 (self): self.check_ChangeNothing_04('')
def test08_ChangeNothing_04a (self): self.check_ChangeNothing_04('a')
def test08_ChangeNothing_04b (self): self.check_ChangeNothing_04('b')
def test08_ChangeNothing_04ab(self): self.check_ChangeNothing_04('ab')
def test08_ChangeNothing_04A (self): self.check_ChangeNothing_04('A')
def test08_ChangeNothing_04Ab(self): self.check_ChangeNothing_04('Ab')
def test08_ChangeNothing_04B (self): self.check_ChangeNothing_04('B')
def test08_ChangeNothing_04aB(self): self.check_ChangeNothing_04('aB')
def test08_ChangeNothing_04AB(self): self.check_ChangeNothing_04('AB')
def getMiiList(self, mi):
mii = newMii(mi)
ac = []
while True:
mr = MarkerRange()
okay = Mii_Next(byref(mr), mii)
if okay:
ac.append( (ibd.Mr_Start(byref(mr)), ibd.Mr_End(byref(mr)) ) )
else:
delMii(mii)
return ac
def test10_MarkerRangeIterator_01_simple(self):
mi = newMi(5, 7)
self.assert_(self.getMiiList(mi) == [ (5, 7) ] )
def test10_MarkerRangeIterator_02_simple(self):
mi = newMi(5, 7)
addMiRange(mi, 8, 10)
l = self.getMiiList(mi)
self.assert_(l == [ (5, 7), (8, 10) ] )
def test10_MarkerRangeIterator_03_simple(self):
mi = newMi(5, 7)
addMiRange(mi, 9, 11)
addMiRange(mi, 12, 14)
addMiRange(mi, 13, 15)
l = self.getMiiList(mi)
self.assert_(l == [ (5, 7), (9, 11), (12,15) ] )
def test10_MarkerRangeIterator_04_corner(self):
mi = newMi(0, 0)
l = self.getMiiList(mi)
self.assert_(l == [], l)
def test10_MarkerRangeIterator_05_corner(self):
mi = newMi(3, 3)
l = self.getMiiList(mi)
self.assert_(l == [], l)
def test10_MarkerRangeIterator_06_corner(self):
l = self.getMiiList(0)
self.assert_(l == [(mr_minus_inf, mr_plus_inf)])
def getMiriList(self, mi):
mii = newMiri(mi)
ac = []
while True:
mr = MarkerRange()
okay = Miri_Next(byref(mr), mii)
if okay:
ac.append( (ibd.Mr_Start(byref(mr)), ibd.Mr_End(byref(mr)) ) )
else:
delMiri(mii)
return ac
def test11_MarkerRangeRevIterator_01_simple(self):
mi = newMi(5, 7)
self.assert_(self.getMiriList(mi) == [ (5, 7) ] )
def test11_MarkerRangeRevIterator_02_simple(self):
mi = newMi(5, 7)
addMiRange(mi, 8, 10)
self.assert_(self.getMiriList(mi) == [ (8, 10), (5, 7) ] )
def test11_MarkerRangeRevIterator_04_corner(self):
mi = newMi(0, 0)
l = self.getMiriList(mi)
self.assert_(l == [])
def test11_MarkerRangeRevIterator_05_corner(self):
mi = newMi(3, 3)
l = self.getMiriList(mi)
self.assert_(l == [])
def test11_MarkerRangeRevIterator_06_corner(self):
l = self.getMiriList(0)
self.assert_(l == [(mr_minus_inf, mr_plus_inf)])
def test20_SetComplement_01(self):
mi = newMi(4,10)
in_range = range(4,10)
out_range = [mr_minus_inf] + range(-10, 4) + range(10,20) + [mr_plus_inf]
checkRanges(mi, in_range, out_range)
mi_c = Mi_Complement(mi)
checkRanges(mi_c, out_range, in_range)
delMi(mi, mi_c)
def test20_SetComplement_02(self):
mi = newMi(4,10, 12, 20)
in_range = range(4,10) + range(12,20)
out_range = ([mr_minus_inf]
+ range(-10, 4) + range(10,12) + range(20,30)
+ [mr_plus_inf])
checkRanges(mi, in_range, out_range)
mi_c = Mi_Complement(mi)
checkRanges(mi_c, out_range, in_range)
delMi(mi, mi_c)
def test20_SetComplement_03(self):
in_range = []
out_range = [mr_minus_inf] + [mr_plus_inf]
mi = newMi(0,0)
for i in xrange(50):
addMiRange(mi, i*10 + 3, i*10 + 7)
in_range += range(i*10 + 3, i*10 + 7)
out_range += range(i*10, i*10 + 3)
out_range += range(i*10 + 7, i*10 + 10)
checkRanges(mi, in_range, out_range)
mi_c = Mi_Complement(mi)
checkRanges(mi_c, out_range, in_range)
delMi(mi, mi_c)
# Set union operations
def test21_SetUnion_01_overlap(self):
checkSetOperation("union", *miSet_01_overlap())
def test21_SetUnion_01_overlap(self):
checkSetOperation("union", *miSet_01_overlap())
def test21_SetUnion_02_nonoverlap(self):
checkSetOperation("union", *miSet_02_nonoverlap())
def test21_SetUnion_03_multiple(self):
checkSetOperation("union", *miSet_03_multiple())
def test21_SetUnion_04_multiple_disjoint(self):
checkSetOperation("union", *miSet_04_multiple_disjoint())
def test21_SetUnion_05_multiple(self):
checkSetOperation("union", *miSet_05_multiple())
def test21_SetUnion_06_large_01(self):
checkSetOperation("union", *miSet_06_large(0))
def test21_SetUnion_06_large_02(self):
checkSetOperation("union", *miSet_06_large(1))
def test21_SetUnion_06_large_03(self):
checkSetOperation("union", *miSet_06_large(3))
def test21_SetUnion_06_large_04(self):
checkSetOperation("union", *miSet_06_large(5))
def test21_SetUnion_06_large_05(self):
checkSetOperation("union", *miSet_06_large(10))
def test21_SetUnion_07_small_01(self):
checkSetOperation("union", *miSet_07_large( (-10, 10), 5))
def test21_SetUnion_07_medium_01(self):
for i in range(2,50):
checkSetOperation("union", *miSet_07_large( (-100, 100), i))
def test21_SetUnion_07_large_01(self):
for i in range(2,500,25):
checkSetOperation("union", *miSet_07_large( (-1000, 1000), i))
# Set intersection operations
def test22_SetIntersection_01_overlap(self):
checkSetOperation("intersection", *miSet_01_overlap())
def test22_SetIntersection_02_nonoverlap(self):
checkSetOperation("intersection", *miSet_02_nonoverlap())
def test22_SetIntersection_03_multiple(self):
checkSetOperation("intersection", *miSet_03_multiple())
def test22_SetIntersection_04_multiple_disjoint(self):
checkSetOperation("intersection", *miSet_04_multiple_disjoint())
def test22_SetIntersection_05_multiple(self):
checkSetOperation("intersection", *miSet_05_multiple())
def test22_SetIntersection_06_large_01(self):
checkSetOperation("intersection", *miSet_06_large(0))
def test22_SetIntersection_06_large_02(self):
checkSetOperation("intersection", *miSet_06_large(1))
def test22_SetIntersection_06_large_03(self):
checkSetOperation("intersection", *miSet_06_large(3))
def test22_SetIntersection_06_large_04(self):
checkSetOperation("intersection", *miSet_06_large(5))
def test22_SetIntersection_06_large_05(self):
checkSetOperation("intersection", *miSet_06_large(10))
def test22_SetIntersection_07_small_01(self):
checkSetOperation("intersection", *miSet_07_large( (-10, 10), 5))
def test22_SetIntersection_07_medium_01(self):
for i in range(2,50):
checkSetOperation("intersection", *miSet_07_large( (-100, 100), i))
def test22_SetIntersection_07_large_01(self):
for i in range(2,500,25):
checkSetOperation("intersection", *miSet_07_large( (-1000, 1000), i))
# Set difference operations
def test23_SetDifference_01_overlap(self):
checkSetOperation("difference", *miSet_01_overlap())
def test23_SetDifference_01_overlap(self):
checkSetOperation("difference", *miSet_01_overlap())
def test23_SetDifference_02_nonoverlap(self):
checkSetOperation("difference", *miSet_02_nonoverlap())
def test23_SetDifference_03_multiple(self):
checkSetOperation("difference", *miSet_03_multiple())
def test23_SetDifference_04_multiple_disjoint(self):
checkSetOperation("difference", *miSet_04_multiple_disjoint())
def test23_SetDifference_05_multiple(self):
checkSetOperation("difference", *miSet_05_multiple())
def test23_SetDifference_06_large_01(self):
checkSetOperation("difference", *miSet_06_large(0))
def test23_SetDifference_06_large_02(self):
checkSetOperation("difference", *miSet_06_large(1))
def test23_SetDifference_06_large_03(self):
checkSetOperation("difference", *miSet_06_large(3))
def test23_SetDifference_06_large_04(self):
checkSetOperation("difference", *miSet_06_large(5))
def test23_SetDifference_06_large_05(self):
checkSetOperation("difference", *miSet_06_large(10))
def test23_SetDifference_07_small_01(self):
checkSetOperation("difference", *miSet_07_large( (-10, 10), 5))
def test23_SetDifference_07_medium_01(self):
for i in range(2,50):
checkSetOperation("difference", *miSet_07_large( (-100, 100), i))
def test23_SetDifference_07_large_01(self):
for i in range(2,500,25):
checkSetOperation("difference", *miSet_07_large( (-1000, 1000), i))
# Set symmetricDifference operations
def test24_SetSymmetricDifference_01_overlap(self):
checkSetOperation("symmetricDifference", *miSet_01_overlap())
def test24_SetSymmetricDifference_01_overlap(self):
checkSetOperation("symmetricDifference", *miSet_01_overlap())
def test24_SetSymmetricDifference_02_nonoverlap(self):
checkSetOperation("symmetricDifference", *miSet_02_nonoverlap())
def test24_SetSymmetricDifference_03_multiple(self):
checkSetOperation("symmetricDifference", *miSet_03_multiple())
def test24_SetSymmetricDifference_04_multiple_disjoint(self):
checkSetOperation("symmetricDifference", *miSet_04_multiple_disjoint())
def test24_SetSymmetricDifference_05_multiple(self):
checkSetOperation("symmetricDifference", *miSet_05_multiple())
def test24_SetSymmetricDifference_06_large_01(self):
checkSetOperation("symmetricDifference", *miSet_06_large(0))
def test24_SetSymmetricDifference_06_large_02(self):
checkSetOperation("symmetricDifference", *miSet_06_large(1))
def test24_SetSymmetricDifference_06_large_03(self):
checkSetOperation("symmetricDifference", *miSet_06_large(3))
def test24_SetSymmetricDifference_06_large_04(self):
checkSetOperation("symmetricDifference", *miSet_06_large(5))
def test24_SetSymmetricDifference_06_large_05(self):
checkSetOperation("symmetricDifference", *miSet_06_large(10))
def test24_SetSymmetricDifference_07_small_01(self):
checkSetOperation("symmetricDifference", *miSet_07_large( (-10, 10), 5))
def test24_SetSymmetricDifference_07_medium_01(self):
for i in range(2,50):
checkSetOperation("symmetricDifference", *miSet_07_large( (-100, 100), i))
def test24_SetSymmetricDifference_07_large_01(self):
for i in range(2,500,25):
checkSetOperation("symmetricDifference", *miSet_07_large( (-1000, 1000), i))
if __name__ == '__main__':
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, Dict, Optional, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.dms import DmsHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class DmsCreateTaskOperator(BaseOperator):
"""
Creates AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsCreateTaskOperator`
:param replication_task_id: Replication task id
:type replication_task_id: str
:param source_endpoint_arn: Source endpoint ARN
:type source_endpoint_arn: str
:param target_endpoint_arn: Target endpoint ARN
:type target_endpoint_arn: str
:param replication_instance_arn: Replication instance ARN
:type replication_instance_arn: str
:param table_mappings: Table mappings
:type table_mappings: dict
:param migration_type: Migration type ('full-load'|'cdc'|'full-load-and-cdc'), full-load by default.
:type migration_type: str
:param create_task_kwargs: Extra arguments for DMS replication task creation.
:type create_task_kwargs: Optional[dict]
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
"""
template_fields: Sequence[str] = (
'replication_task_id',
'source_endpoint_arn',
'target_endpoint_arn',
'replication_instance_arn',
'table_mappings',
'migration_type',
'create_task_kwargs',
)
template_ext: Sequence[str] = ()
template_fields_renderers = {
"table_mappings": "json",
"create_task_kwargs": "json",
}
def __init__(
self,
*,
replication_task_id: str,
source_endpoint_arn: str,
target_endpoint_arn: str,
replication_instance_arn: str,
table_mappings: dict,
migration_type: str = 'full-load',
create_task_kwargs: Optional[dict] = None,
aws_conn_id: str = 'aws_default',
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_id = replication_task_id
self.source_endpoint_arn = source_endpoint_arn
self.target_endpoint_arn = target_endpoint_arn
self.replication_instance_arn = replication_instance_arn
self.migration_type = migration_type
self.table_mappings = table_mappings
self.create_task_kwargs = create_task_kwargs or {}
self.aws_conn_id = aws_conn_id
def execute(self, context: 'Context'):
"""
Creates AWS DMS replication task from Airflow
:return: replication task arn
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
task_arn = dms_hook.create_replication_task(
replication_task_id=self.replication_task_id,
source_endpoint_arn=self.source_endpoint_arn,
target_endpoint_arn=self.target_endpoint_arn,
replication_instance_arn=self.replication_instance_arn,
migration_type=self.migration_type,
table_mappings=self.table_mappings,
**self.create_task_kwargs,
)
self.log.info("DMS replication task(%s) is ready.", self.replication_task_id)
return task_arn
class DmsDeleteTaskOperator(BaseOperator):
"""
Deletes AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsDeleteTaskOperator`
:param replication_task_arn: Replication task ARN
:type replication_task_arn: str
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
"""
template_fields: Sequence[str] = ('replication_task_arn',)
template_ext: Sequence[str] = ()
template_fields_renderers: Dict[str, str] = {}
def __init__(
self,
*,
replication_task_arn: Optional[str] = None,
aws_conn_id: str = 'aws_default',
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
self.aws_conn_id = aws_conn_id
def execute(self, context: 'Context'):
"""
Deletes AWS DMS replication task from Airflow
:return: replication task arn
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
dms_hook.delete_replication_task(replication_task_arn=self.replication_task_arn)
self.log.info("DMS replication task(%s) has been deleted.", self.replication_task_arn)
class DmsDescribeTasksOperator(BaseOperator):
"""
Describes AWS DMS replication tasks.
:param describe_tasks_kwargs: Describe tasks command arguments
:type describe_tasks_kwargs: Optional[dict]
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
"""
template_fields: Sequence[str] = ('describe_tasks_kwargs',)
template_ext: Sequence[str] = ()
template_fields_renderers: Dict[str, str] = {'describe_tasks_kwargs': 'json'}
def __init__(
self,
*,
describe_tasks_kwargs: Optional[dict] = None,
aws_conn_id: str = 'aws_default',
**kwargs,
):
super().__init__(**kwargs)
self.describe_tasks_kwargs = describe_tasks_kwargs or {}
self.aws_conn_id = aws_conn_id
def execute(self, context: 'Context'):
"""
Describes AWS DMS replication tasks from Airflow
:return: Marker and list of replication tasks
:rtype: (Optional[str], list)
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
return dms_hook.describe_replication_tasks(**self.describe_tasks_kwargs)
class DmsStartTaskOperator(BaseOperator):
"""
Starts AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsStartTaskOperator`
:param replication_task_arn: Replication task ARN
:type replication_task_arn: str
:param start_replication_task_type: Replication task start type (default='start-replication')
('start-replication'|'resume-processing'|'reload-target')
:type start_replication_task_type: str
:param start_task_kwargs: Extra start replication task arguments
:type start_task_kwargs: Optional[dict]
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
"""
template_fields: Sequence[str] = (
'replication_task_arn',
'start_replication_task_type',
'start_task_kwargs',
)
template_ext: Sequence[str] = ()
template_fields_renderers = {'start_task_kwargs': 'json'}
def __init__(
self,
*,
replication_task_arn: str,
start_replication_task_type: str = 'start-replication',
start_task_kwargs: Optional[dict] = None,
aws_conn_id: str = 'aws_default',
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
self.start_replication_task_type = start_replication_task_type
self.start_task_kwargs = start_task_kwargs or {}
self.aws_conn_id = aws_conn_id
def execute(self, context: 'Context'):
"""
Starts AWS DMS replication task from Airflow
:return: replication task arn
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
dms_hook.start_replication_task(
replication_task_arn=self.replication_task_arn,
start_replication_task_type=self.start_replication_task_type,
**self.start_task_kwargs,
)
self.log.info("DMS replication task(%s) is starting.", self.replication_task_arn)
class DmsStopTaskOperator(BaseOperator):
"""
Stops AWS DMS replication task.
:param replication_task_arn: Replication task ARN
:type replication_task_arn: str
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
"""
template_fields: Sequence[str] = ('replication_task_arn',)
template_ext: Sequence[str] = ()
template_fields_renderers: Dict[str, str] = {}
def __init__(
self,
*,
replication_task_arn: Optional[str] = None,
aws_conn_id: str = 'aws_default',
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
self.aws_conn_id = aws_conn_id
def execute(self, context: 'Context'):
"""
Stops AWS DMS replication task from Airflow
:return: replication task arn
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
dms_hook.stop_replication_task(replication_task_arn=self.replication_task_arn)
self.log.info("DMS replication task(%s) is stopping.", self.replication_task_arn)
|
|
# -*- test-case-name: twisted.conch.test.test_cftp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation module for the I{cftp} command.
"""
from __future__ import division, print_function
import os, sys, getpass, struct, tty, fcntl, stat
import fnmatch, pwd, glob
from twisted.conch.client import connect, default, options
from twisted.conch.ssh import connection, common
from twisted.conch.ssh import channel, filetransfer
from twisted.protocols import basic
from twisted.internet import reactor, stdio, defer, utils
from twisted.python import log, usage, failure
class ClientOptions(options.ConchOptions):
synopsis = """Usage: cftp [options] [user@]host
cftp [options] [user@]host[:dir[/]]
cftp [options] [user@]host[:file [localfile]]
"""
longdesc = ("cftp is a client for logging into a remote machine and "
"executing commands to send and receive file information")
optParameters = [
['buffersize', 'B', 32768, 'Size of the buffer to use for sending/receiving.'],
['batchfile', 'b', None, 'File to read commands from, or \'-\' for stdin.'],
['requests', 'R', 5, 'Number of requests to make before waiting for a reply.'],
['subsystem', 's', 'sftp', 'Subsystem/server program to connect to.']]
compData = usage.Completions(
descriptions={
"buffersize": "Size of send/receive buffer (default: 32768)"},
extraActions=[usage.CompleteUserAtHost(),
usage.CompleteFiles(descr="local file")])
def parseArgs(self, host, localPath=None):
self['remotePath'] = ''
if ':' in host:
host, self['remotePath'] = host.split(':', 1)
self['remotePath'].rstrip('/')
self['host'] = host
self['localPath'] = localPath
def run():
# import hotshot
# prof = hotshot.Profile('cftp.prof')
# prof.start()
args = sys.argv[1:]
if '-l' in args: # cvs is an idiot
i = args.index('-l')
args = args[i:i+2]+args
del args[i+2:i+4]
options = ClientOptions()
try:
options.parseOptions(args)
except usage.UsageError as u:
print('ERROR: %s' % u)
sys.exit(1)
if options['log']:
realout = sys.stdout
log.startLogging(sys.stderr)
sys.stdout = realout
else:
log.discardLogs()
doConnect(options)
reactor.run()
# prof.stop()
# prof.close()
def handleError():
global exitStatus
exitStatus = 2
try:
reactor.stop()
except: pass
log.err(failure.Failure())
raise
def doConnect(options):
# log.deferr = handleError # HACK
if '@' in options['host']:
options['user'], options['host'] = options['host'].split('@',1)
host = options['host']
if not options['user']:
options['user'] = getpass.getuser()
if not options['port']:
options['port'] = 22
else:
options['port'] = int(options['port'])
host = options['host']
port = options['port']
conn = SSHConnection()
conn.options = options
vhk = default.verifyHostKey
uao = default.SSHUserAuthClient(options['user'], options, conn)
connect.connect(host, port, options, vhk, uao).addErrback(_ebExit)
def _ebExit(f):
#global exitStatus
if hasattr(f.value, 'value'):
s = f.value.value
else:
s = str(f)
print(s)
#exitStatus = "conch: exiting with error %s" % f
try:
reactor.stop()
except: pass
def _ignore(*args): pass
class FileWrapper:
def __init__(self, f):
self.f = f
self.total = 0.0
f.seek(0, 2) # seek to the end
self.size = f.tell()
def __getattr__(self, attr):
return getattr(self.f, attr)
class StdioClient(basic.LineReceiver):
_pwd = pwd
ps = 'cftp> '
delimiter = '\n'
reactor = reactor
def __init__(self, client, f = None):
self.client = client
self.currentDirectory = ''
self.file = f
self.useProgressBar = (not f and 1) or 0
def connectionMade(self):
self.client.realPath('').addCallback(self._cbSetCurDir)
def _cbSetCurDir(self, path):
self.currentDirectory = path
self._newLine()
def lineReceived(self, line):
if self.client.transport.localClosed:
return
log.msg('got line %s' % repr(line))
line = line.lstrip()
if not line:
self._newLine()
return
if self.file and line.startswith('-'):
self.ignoreErrors = 1
line = line[1:]
else:
self.ignoreErrors = 0
d = self._dispatchCommand(line)
if d is not None:
d.addCallback(self._cbCommand)
d.addErrback(self._ebCommand)
def _dispatchCommand(self, line):
if ' ' in line:
command, rest = line.split(' ', 1)
rest = rest.lstrip()
else:
command, rest = line, ''
if command.startswith('!'): # command
f = self.cmd_EXEC
rest = (command[1:] + ' ' + rest).strip()
else:
command = command.upper()
log.msg('looking up cmd %s' % command)
f = getattr(self, 'cmd_%s' % command, None)
if f is not None:
return defer.maybeDeferred(f, rest)
else:
self._ebCommand(failure.Failure(NotImplementedError(
"No command called `%s'" % command)))
self._newLine()
def _printFailure(self, f):
log.msg(f)
e = f.trap(NotImplementedError, filetransfer.SFTPError, OSError, IOError)
if e == NotImplementedError:
self.transport.write(self.cmd_HELP(''))
elif e == filetransfer.SFTPError:
self.transport.write("remote error %i: %s\n" %
(f.value.code, f.value.message))
elif e in (OSError, IOError):
self.transport.write("local error %i: %s\n" %
(f.value.errno, f.value.strerror))
def _newLine(self):
if self.client.transport.localClosed:
return
self.transport.write(self.ps)
self.ignoreErrors = 0
if self.file:
l = self.file.readline()
if not l:
self.client.transport.loseConnection()
else:
self.transport.write(l)
self.lineReceived(l.strip())
def _cbCommand(self, result):
if result is not None:
self.transport.write(result)
if not result.endswith('\n'):
self.transport.write('\n')
self._newLine()
def _ebCommand(self, f):
self._printFailure(f)
if self.file and not self.ignoreErrors:
self.client.transport.loseConnection()
self._newLine()
def cmd_CD(self, path):
path, rest = self._getFilename(path)
if not path.endswith('/'):
path += '/'
newPath = path and os.path.join(self.currentDirectory, path) or ''
d = self.client.openDirectory(newPath)
d.addCallback(self._cbCd)
d.addErrback(self._ebCommand)
return d
def _cbCd(self, directory):
directory.close()
d = self.client.realPath(directory.name)
d.addCallback(self._cbCurDir)
return d
def _cbCurDir(self, path):
self.currentDirectory = path
def cmd_CHGRP(self, rest):
grp, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
grp = int(grp)
d = self.client.getAttrs(path)
d.addCallback(self._cbSetUsrGrp, path, grp=grp)
return d
def cmd_CHMOD(self, rest):
mod, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
mod = int(mod, 8)
d = self.client.setAttrs(path, {'permissions':mod})
d.addCallback(_ignore)
return d
def cmd_CHOWN(self, rest):
usr, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
usr = int(usr)
d = self.client.getAttrs(path)
d.addCallback(self._cbSetUsrGrp, path, usr=usr)
return d
def _cbSetUsrGrp(self, attrs, path, usr=None, grp=None):
new = {}
new['uid'] = (usr is not None) and usr or attrs['uid']
new['gid'] = (grp is not None) and grp or attrs['gid']
d = self.client.setAttrs(path, new)
d.addCallback(_ignore)
return d
def cmd_GET(self, rest):
remote, rest = self._getFilename(rest)
if '*' in remote or '?' in remote: # wildcard
if rest:
local, rest = self._getFilename(rest)
if not os.path.isdir(local):
return "Wildcard get with non-directory target."
else:
local = ''
d = self._remoteGlob(remote)
d.addCallback(self._cbGetMultiple, local)
return d
if rest:
local, rest = self._getFilename(rest)
else:
local = os.path.split(remote)[1]
log.msg((remote, local))
lf = open(local, 'w', 0)
path = os.path.join(self.currentDirectory, remote)
d = self.client.openFile(path, filetransfer.FXF_READ, {})
d.addCallback(self._cbGetOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
return d
def _cbGetMultiple(self, files, local):
#if self._useProgressBar: # one at a time
# XXX this can be optimized for times w/o progress bar
return self._cbGetMultipleNext(None, files, local)
def _cbGetMultipleNext(self, res, files, local):
if isinstance(res, failure.Failure):
self._printFailure(res)
elif res:
self.transport.write(res)
if not res.endswith('\n'):
self.transport.write('\n')
if not files:
return
f = files.pop(0)[0]
lf = open(os.path.join(local, os.path.split(f)[1]), 'w', 0)
path = os.path.join(self.currentDirectory, f)
d = self.client.openFile(path, filetransfer.FXF_READ, {})
d.addCallback(self._cbGetOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
d.addBoth(self._cbGetMultipleNext, files, local)
return d
def _ebCloseLf(self, f, lf):
lf.close()
return f
def _cbGetOpenFile(self, rf, lf):
return rf.getAttrs().addCallback(self._cbGetFileSize, rf, lf)
def _cbGetFileSize(self, attrs, rf, lf):
if not stat.S_ISREG(attrs['permissions']):
rf.close()
lf.close()
return "Can't get non-regular file: %s" % rf.name
rf.size = attrs['size']
bufferSize = self.client.transport.conn.options['buffersize']
numRequests = self.client.transport.conn.options['requests']
rf.total = 0.0
dList = []
chunks = []
startTime = self.reactor.seconds()
for i in range(numRequests):
d = self._cbGetRead('', rf, lf, chunks, 0, bufferSize, startTime)
dList.append(d)
dl = defer.DeferredList(dList, fireOnOneErrback=1)
dl.addCallback(self._cbGetDone, rf, lf)
return dl
def _getNextChunk(self, chunks):
end = 0
for chunk in chunks:
if end == 'eof':
return # nothing more to get
if end != chunk[0]:
i = chunks.index(chunk)
chunks.insert(i, (end, chunk[0]))
return (end, chunk[0] - end)
end = chunk[1]
bufSize = int(self.client.transport.conn.options['buffersize'])
chunks.append((end, end + bufSize))
return (end, bufSize)
def _cbGetRead(self, data, rf, lf, chunks, start, size, startTime):
if data and isinstance(data, failure.Failure):
log.msg('get read err: %s' % data)
reason = data
reason.trap(EOFError)
i = chunks.index((start, start + size))
del chunks[i]
chunks.insert(i, (start, 'eof'))
elif data:
log.msg('get read data: %i' % len(data))
lf.seek(start)
lf.write(data)
if len(data) != size:
log.msg('got less than we asked for: %i < %i' %
(len(data), size))
i = chunks.index((start, start + size))
del chunks[i]
chunks.insert(i, (start, start + len(data)))
rf.total += len(data)
if self.useProgressBar:
self._printProgressBar(rf, startTime)
chunk = self._getNextChunk(chunks)
if not chunk:
return
else:
start, length = chunk
log.msg('asking for %i -> %i' % (start, start+length))
d = rf.readChunk(start, length)
d.addBoth(self._cbGetRead, rf, lf, chunks, start, length, startTime)
return d
def _cbGetDone(self, ignored, rf, lf):
log.msg('get done')
rf.close()
lf.close()
if self.useProgressBar:
self.transport.write('\n')
return "Transferred %s to %s" % (rf.name, lf.name)
def cmd_PUT(self, rest):
"""
Do an upload request for a single local file or a globing expression.
@param rest: Requested command line for the PUT command.
@type rest: L{str}
@return: A deferred which fires with L{None} when transfer is done.
@rtype: L{defer.Deferred}
"""
local, rest = self._getFilename(rest)
# FIXME: https://twistedmatrix.com/trac/ticket/7241
# Use a better check for globbing expression.
if '*' in local or '?' in local:
if rest:
remote, rest = self._getFilename(rest)
remote = os.path.join(self.currentDirectory, remote)
else:
remote = ''
files = glob.glob(local)
return self._putMultipleFiles(files, remote)
else:
if rest:
remote, rest = self._getFilename(rest)
else:
remote = os.path.split(local)[1]
return self._putSingleFile(local, remote)
def _putSingleFile(self, local, remote):
"""
Perform an upload for a single file.
@param local: Path to local file.
@type local: L{str}.
@param remote: Remote path for the request relative to current working
directory.
@type remote: L{str}
@return: A deferred which fires when transfer is done.
"""
return self._cbPutMultipleNext(None, [local], remote, single=True)
def _putMultipleFiles(self, files, remote):
"""
Perform an upload for a list of local files.
@param files: List of local files.
@type files: C{list} of L{str}.
@param remote: Remote path for the request relative to current working
directory.
@type remote: L{str}
@return: A deferred which fires when transfer is done.
"""
return self._cbPutMultipleNext(None, files, remote)
def _cbPutMultipleNext(
self, previousResult, files, remotePath, single=False):
"""
Perform an upload for the next file in the list of local files.
@param previousResult: Result form previous file form the list.
@type previousResult: L{str}
@param files: List of local files.
@type files: C{list} of L{str}
@param remotePath: Remote path for the request relative to current
working directory.
@type remotePath: L{str}
@param single: A flag which signals if this is a transfer for a single
file in which case we use the exact remote path
@type single: L{bool}
@return: A deferred which fires when transfer is done.
"""
if isinstance(previousResult, failure.Failure):
self._printFailure(previousResult)
elif previousResult:
self.transport.write(previousResult)
if not previousResult.endswith('\n'):
self.transport.write('\n')
currentFile = None
while files and not currentFile:
try:
currentFile = files.pop(0)
localStream = open(currentFile, 'r')
except:
self._printFailure(failure.Failure())
currentFile = None
# No more files to transfer.
if not currentFile:
return None
if single:
remote = remotePath
else:
name = os.path.split(currentFile)[1]
remote = os.path.join(remotePath, name)
log.msg((name, remote, remotePath))
d = self._putRemoteFile(localStream, remote)
d.addBoth(self._cbPutMultipleNext, files, remotePath)
return d
def _putRemoteFile(self, localStream, remotePath):
"""
Do an upload request.
@param localStream: Local stream from where data is read.
@type localStream: File like object.
@param remotePath: Remote path for the request relative to current working directory.
@type remotePath: L{str}
@return: A deferred which fires when transfer is done.
"""
remote = os.path.join(self.currentDirectory, remotePath)
flags = (
filetransfer.FXF_WRITE |
filetransfer.FXF_CREAT |
filetransfer.FXF_TRUNC
)
d = self.client.openFile(remote, flags, {})
d.addCallback(self._cbPutOpenFile, localStream)
d.addErrback(self._ebCloseLf, localStream)
return d
def _cbPutOpenFile(self, rf, lf):
numRequests = self.client.transport.conn.options['requests']
if self.useProgressBar:
lf = FileWrapper(lf)
dList = []
chunks = []
startTime = self.reactor.seconds()
for i in range(numRequests):
d = self._cbPutWrite(None, rf, lf, chunks, startTime)
if d:
dList.append(d)
dl = defer.DeferredList(dList, fireOnOneErrback=1)
dl.addCallback(self._cbPutDone, rf, lf)
return dl
def _cbPutWrite(self, ignored, rf, lf, chunks, startTime):
chunk = self._getNextChunk(chunks)
start, size = chunk
lf.seek(start)
data = lf.read(size)
if self.useProgressBar:
lf.total += len(data)
self._printProgressBar(lf, startTime)
if data:
d = rf.writeChunk(start, data)
d.addCallback(self._cbPutWrite, rf, lf, chunks, startTime)
return d
else:
return
def _cbPutDone(self, ignored, rf, lf):
lf.close()
rf.close()
if self.useProgressBar:
self.transport.write('\n')
return 'Transferred %s to %s' % (lf.name, rf.name)
def cmd_LCD(self, path):
os.chdir(path)
def cmd_LN(self, rest):
linkpath, rest = self._getFilename(rest)
targetpath, rest = self._getFilename(rest)
linkpath, targetpath = map(
lambda x: os.path.join(self.currentDirectory, x),
(linkpath, targetpath))
return self.client.makeLink(linkpath, targetpath).addCallback(_ignore)
def cmd_LS(self, rest):
# possible lines:
# ls current directory
# ls name_of_file that file
# ls name_of_directory that directory
# ls some_glob_string current directory, globbed for that string
options = []
rest = rest.split()
while rest and rest[0] and rest[0][0] == '-':
opts = rest.pop(0)[1:]
for o in opts:
if o == 'l':
options.append('verbose')
elif o == 'a':
options.append('all')
rest = ' '.join(rest)
path, rest = self._getFilename(rest)
if not path:
fullPath = self.currentDirectory + '/'
else:
fullPath = os.path.join(self.currentDirectory, path)
d = self._remoteGlob(fullPath)
d.addCallback(self._cbDisplayFiles, options)
return d
def _cbDisplayFiles(self, files, options):
files.sort()
if 'all' not in options:
files = [f for f in files if not f[0].startswith('.')]
if 'verbose' in options:
lines = [f[1] for f in files]
else:
lines = [f[0] for f in files]
if not lines:
return None
else:
return '\n'.join(lines)
def cmd_MKDIR(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.makeDirectory(path, {}).addCallback(_ignore)
def cmd_RMDIR(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.removeDirectory(path).addCallback(_ignore)
def cmd_LMKDIR(self, path):
os.system("mkdir %s" % path)
def cmd_RM(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.removeFile(path).addCallback(_ignore)
def cmd_LLS(self, rest):
os.system("ls %s" % rest)
def cmd_RENAME(self, rest):
oldpath, rest = self._getFilename(rest)
newpath, rest = self._getFilename(rest)
oldpath, newpath = map (
lambda x: os.path.join(self.currentDirectory, x),
(oldpath, newpath))
return self.client.renameFile(oldpath, newpath).addCallback(_ignore)
def cmd_EXIT(self, ignored):
self.client.transport.loseConnection()
cmd_QUIT = cmd_EXIT
def cmd_VERSION(self, ignored):
return "SFTP version %i" % self.client.version
def cmd_HELP(self, ignored):
return """Available commands:
cd path Change remote directory to 'path'.
chgrp gid path Change gid of 'path' to 'gid'.
chmod mode path Change mode of 'path' to 'mode'.
chown uid path Change uid of 'path' to 'uid'.
exit Disconnect from the server.
get remote-path [local-path] Get remote file.
help Get a list of available commands.
lcd path Change local directory to 'path'.
lls [ls-options] [path] Display local directory listing.
lmkdir path Create local directory.
ln linkpath targetpath Symlink remote file.
lpwd Print the local working directory.
ls [-l] [path] Display remote directory listing.
mkdir path Create remote directory.
progress Toggle progress bar.
put local-path [remote-path] Put local file.
pwd Print the remote working directory.
quit Disconnect from the server.
rename oldpath newpath Rename remote file.
rmdir path Remove remote directory.
rm path Remove remote file.
version Print the SFTP version.
? Synonym for 'help'.
"""
def cmd_PWD(self, ignored):
return self.currentDirectory
def cmd_LPWD(self, ignored):
return os.getcwd()
def cmd_PROGRESS(self, ignored):
self.useProgressBar = not self.useProgressBar
return "%ssing progess bar." % (self.useProgressBar and "U" or "Not u")
def cmd_EXEC(self, rest):
"""
Run C{rest} using the user's shell (or /bin/sh if they do not have
one).
"""
shell = self._pwd.getpwnam(getpass.getuser())[6]
if not shell:
shell = '/bin/sh'
if rest:
cmds = ['-c', rest]
return utils.getProcessOutput(shell, cmds, errortoo=1)
else:
os.system(shell)
# accessory functions
def _remoteGlob(self, fullPath):
log.msg('looking up %s' % fullPath)
head, tail = os.path.split(fullPath)
if '*' in tail or '?' in tail:
glob = 1
else:
glob = 0
if tail and not glob: # could be file or directory
# try directory first
d = self.client.openDirectory(fullPath)
d.addCallback(self._cbOpenList, '')
d.addErrback(self._ebNotADirectory, head, tail)
else:
d = self.client.openDirectory(head)
d.addCallback(self._cbOpenList, tail)
return d
def _cbOpenList(self, directory, glob):
files = []
d = directory.read()
d.addBoth(self._cbReadFile, files, directory, glob)
return d
def _ebNotADirectory(self, reason, path, glob):
d = self.client.openDirectory(path)
d.addCallback(self._cbOpenList, glob)
return d
def _cbReadFile(self, files, l, directory, glob):
if not isinstance(files, failure.Failure):
if glob:
l.extend([f for f in files if fnmatch.fnmatch(f[0], glob)])
else:
l.extend(files)
d = directory.read()
d.addBoth(self._cbReadFile, l, directory, glob)
return d
else:
reason = files
reason.trap(EOFError)
directory.close()
return l
def _abbrevSize(self, size):
# from http://mail.python.org/pipermail/python-list/1999-December/018395.html
_abbrevs = [
(1<<50, 'PB'),
(1<<40, 'TB'),
(1<<30, 'GB'),
(1<<20, 'MB'),
(1<<10, 'kB'),
(1, 'B')
]
for factor, suffix in _abbrevs:
if size > factor:
break
return '%.1f' % (size/factor) + suffix
def _abbrevTime(self, t):
if t > 3600: # 1 hour
hours = int(t / 3600)
t -= (3600 * hours)
mins = int(t / 60)
t -= (60 * mins)
return "%i:%02i:%02i" % (hours, mins, t)
else:
mins = int(t/60)
t -= (60 * mins)
return "%02i:%02i" % (mins, t)
def _printProgressBar(self, f, startTime):
"""
Update a console progress bar on this L{StdioClient}'s transport, based
on the difference between the start time of the operation and the
current time according to the reactor, and appropriate to the size of
the console window.
@param f: a wrapper around the file which is being written or read
@type f: L{FileWrapper}
@param startTime: The time at which the operation being tracked began.
@type startTime: L{float}
"""
diff = self.reactor.seconds() - startTime
total = f.total
try:
winSize = struct.unpack('4H',
fcntl.ioctl(0, tty.TIOCGWINSZ, '12345679'))
except IOError:
winSize = [None, 80]
if diff == 0.0:
speed = 0.0
else:
speed = total / diff
if speed:
timeLeft = (f.size - total) / speed
else:
timeLeft = 0
front = f.name
if f.size:
percentage = (total / f.size) * 100
else:
percentage = 100
back = '%3i%% %s %sps %s ' % (percentage,
self._abbrevSize(total),
self._abbrevSize(speed),
self._abbrevTime(timeLeft))
spaces = (winSize[1] - (len(front) + len(back) + 1)) * ' '
self.transport.write('\r%s%s%s' % (front, spaces, back))
def _getFilename(self, line):
"""
Parse line received as command line input and return first filename
together with the remaining line.
@param line: Arguments received from command line input.
@type line: L{str}
@return: Tupple with filename and rest. Return empty values when no path was not found.
@rtype: C{tupple}
"""
line = line.strip()
if not line:
return '', ''
if line[0] in '\'"':
ret = []
line = list(line)
try:
for i in range(1,len(line)):
c = line[i]
if c == line[0]:
return ''.join(ret), ''.join(line[i+1:]).lstrip()
elif c == '\\': # quoted character
del line[i]
if line[i] not in '\'"\\':
raise IndexError("bad quote: \\%s" % (line[i],))
ret.append(line[i])
else:
ret.append(line[i])
except IndexError:
raise IndexError("unterminated quote")
ret = line.split(None, 1)
if len(ret) == 1:
return ret[0], ''
else:
return ret[0], ret[1]
setattr(StdioClient, 'cmd_?', StdioClient.cmd_HELP)
class SSHConnection(connection.SSHConnection):
def serviceStarted(self):
self.openChannel(SSHSession())
class SSHSession(channel.SSHChannel):
name = 'session'
def channelOpen(self, foo):
log.msg('session %s open' % self.id)
if self.conn.options['subsystem'].startswith('/'):
request = 'exec'
else:
request = 'subsystem'
d = self.conn.sendRequest(self, request, \
common.NS(self.conn.options['subsystem']), wantReply=1)
d.addCallback(self._cbSubsystem)
d.addErrback(_ebExit)
def _cbSubsystem(self, result):
self.client = filetransfer.FileTransferClient()
self.client.makeConnection(self)
self.dataReceived = self.client.dataReceived
f = None
if self.conn.options['batchfile']:
fn = self.conn.options['batchfile']
if fn != '-':
f = open(fn)
self.stdio = stdio.StandardIO(StdioClient(self.client, f))
def extReceived(self, t, data):
if t==connection.EXTENDED_DATA_STDERR:
log.msg('got %s stderr data' % len(data))
sys.stderr.write(data)
sys.stderr.flush()
def eofReceived(self):
log.msg('got eof')
self.stdio.loseWriteConnection()
def closeReceived(self):
log.msg('remote side closed %s' % self)
self.conn.sendClose(self)
def closed(self):
try:
reactor.stop()
except:
pass
def stopWriting(self):
self.stdio.pauseProducing()
def startWriting(self):
self.stdio.resumeProducing()
if __name__ == '__main__':
run()
|
|
import pytest
import py
from textwrap import dedent
pytest_plugins = "pytester",
def test_version():
import pytest_cache
assert pytest_cache.__version__
def test_cache_reportheader(testdir):
p = testdir.makepyfile("""
def test_hello():
pass
""")
cachedir = p.dirpath(".cache")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"cachedir: %s" % cachedir,
])
def test_cache_show(testdir):
result = testdir.runpytest("--cache")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*cache is empty*"
])
p = testdir.makeconftest("""
def pytest_configure(config):
config.cache.set("my/name", [1,2,3])
config.cache.set("other/some", {1:2})
dp = config.cache.makedir("mydb")
dp.ensure("hello")
dp.ensure("world")
""")
result = testdir.runpytest()
assert result.ret == 0
result = testdir.runpytest("--cache")
result.stdout.fnmatch_lines_random([
"*cachedir:*",
"-*cache values*-",
"*my/name contains:",
" [1, 2, 3]",
"*other/some contains*",
" {1: 2}",
"-*cache directories*-",
"*mydb/hello*length 0*",
"*mydb/world*length 0*",
])
class TestNewAPI:
def test_config_cache_makedir(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
pytest.raises(ValueError, lambda:
config.cache.makedir("key/name"))
p = config.cache.makedir("name")
assert p.check()
def test_config_cache_dataerror(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
cache = config.cache
pytest.raises(ValueError, lambda: cache.set("key/name", cache))
config.cache.set("key/name", 0)
config.cache._getvaluepath("key/name").write("123")
val = config.cache.get("key/name", -2)
assert val == -2
def test_config_cache(self, testdir):
testdir.makeconftest("""
def pytest_configure(config):
# see that we get cache information early on
assert hasattr(config, "cache")
""")
testdir.makepyfile("""
def test_session(pytestconfig):
assert hasattr(pytestconfig, "cache")
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def XXX_test_cachefuncarg(self, testdir):
testdir.makepyfile("""
import pytest
def test_cachefuncarg(cache):
val = cache.get("some/thing", None)
assert val is None
cache.set("some/thing", [1])
pytest.raises(TypeError, lambda: cache.get("some/thing"))
val = cache.get("some/thing", [])
assert val == [1]
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
class TestLastFailed:
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
p.write(py.code.Source("""
def test_1():
assert 1
def test_2():
assert 1
def test_3():
assert 0
"""))
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 passed*1 desel*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
result = testdir.runpytest("--lf", "--clearcache")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile(test_a="""
def test_a1():
assert 0
def test_a2():
assert 1
""", test_b="""
def test_b1():
assert 0
""")
p = testdir.tmpdir.join("test_a.py")
p2 = testdir.tmpdir.join("test_b.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
p2.write(py.code.Source("""
def test_b1():
assert 1
"""))
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 passed*",
])
result = testdir.runpytest("--lf", p)
result.stdout.fnmatch_lines([
"*1 failed*1 desel*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p1 = testdir.makepyfile("""
def test_1():
assert 0
""")
p2 = testdir.tmpdir.join("test_something.py")
p2.write(py.code.Source("""
def test_2():
assert 0
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 failed*",
])
def test_lastfailed_xpass(self, testdir):
rep = testdir.inline_runsource1("""
import pytest
@pytest.mark.xfail
def test_hello():
assert 1
""")
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
assert not lastfailed
|
|
"""Test the Yeelight config flow."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries
from homeassistant.components import dhcp, ssdp, zeroconf
from homeassistant.components.yeelight.config_flow import MODEL_UNKNOWN, CannotConnect
from homeassistant.components.yeelight.const import (
CONF_DETECTED_MODEL,
CONF_MODE_MUSIC,
CONF_MODEL,
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DEFAULT_MODE_MUSIC,
DEFAULT_NAME,
DEFAULT_NIGHTLIGHT_SWITCH,
DEFAULT_SAVE_ON_CHANGE,
DEFAULT_TRANSITION,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
)
from homeassistant.const import CONF_DEVICE, CONF_HOST, CONF_ID, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_ABORT, RESULT_TYPE_FORM
from . import (
CAPABILITIES,
ID,
IP_ADDRESS,
MODEL,
MODULE,
MODULE_CONFIG_FLOW,
NAME,
UNIQUE_FRIENDLY_NAME,
ZEROCONF_DATA,
_mocked_bulb,
_patch_discovery,
_patch_discovery_interval,
_patch_discovery_timeout,
)
from tests.common import MockConfigEntry
DEFAULT_CONFIG = {
CONF_MODEL: "",
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: DEFAULT_NIGHTLIGHT_SWITCH,
}
SSDP_INFO = ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
upnp={},
ssdp_headers=CAPABILITIES,
)
async def test_discovery(hass: HomeAssistant):
"""Test setting up discovery."""
with _patch_discovery(), _patch_discovery_interval():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# test we can try again
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_DEVICE: ID}
)
assert result3["type"] == "create_entry"
assert result3["title"] == UNIQUE_FRIENDLY_NAME
assert result3["data"] == {CONF_ID: ID, CONF_HOST: IP_ADDRESS, CONF_MODEL: MODEL}
await hass.async_block_till_done()
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_discovery_interval():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_with_existing_device_present(hass: HomeAssistant):
"""Test setting up discovery."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_ID: "0x000000000099999", CONF_HOST: "4.4.4.4"}
)
config_entry.add_to_hass(hass)
alternate_bulb = _mocked_bulb()
alternate_bulb.capabilities["id"] = "0x000000000099999"
alternate_bulb.capabilities["location"] = "yeelight://4.4.4.4"
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=alternate_bulb
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# Now abort and make sure we can start over
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_discovery_interval():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=_mocked_bulb()
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_DEVICE: ID}
)
assert result3["type"] == "create_entry"
assert result3["title"] == UNIQUE_FRIENDLY_NAME
assert result3["data"] == {
CONF_ID: ID,
CONF_HOST: IP_ADDRESS,
CONF_MODEL: MODEL,
}
await hass.async_block_till_done()
await hass.async_block_till_done()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_discovery_interval():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_no_device(hass: HomeAssistant):
"""Test discovery without device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_import(hass: HomeAssistant):
"""Test import from yaml."""
config = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: IP_ADDRESS,
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH_TYPE: NIGHTLIGHT_SWITCH_TYPE_LIGHT,
}
# Cannot connect
mocked_bulb = _mocked_bulb(cannot_connect=True)
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
# Success
mocked_bulb = _mocked_bulb()
with _patch_discovery(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
), patch(f"{MODULE}.async_setup", return_value=True) as mock_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: IP_ADDRESS,
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: True,
}
await hass.async_block_till_done()
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# Duplicate
mocked_bulb = _mocked_bulb()
with _patch_discovery(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_manual(hass: HomeAssistant):
"""Test manually setup."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
# Cannot connect (timeout)
mocked_bulb = _mocked_bulb(cannot_connect=True)
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
# Cannot connect (error)
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
assert result3["errors"] == {"base": "cannot_connect"}
# Success
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_timeout(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
), patch(f"{MODULE}.async_setup", return_value=True), patch(
f"{MODULE}.async_setup_entry", return_value=True
):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result4["type"] == "create_entry"
assert result4["title"] == "Color 0x15243f"
assert result4["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_ID: "0x000000000015243f",
CONF_MODEL: MODEL,
}
# Duplicate
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mocked_bulb = _mocked_bulb()
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_options(hass: HomeAssistant):
"""Test options flow."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: IP_ADDRESS, CONF_NAME: NAME, CONF_DETECTED_MODEL: MODEL},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
config = {
CONF_NAME: NAME,
CONF_MODEL: MODEL,
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: DEFAULT_NIGHTLIGHT_SWITCH,
}
assert config_entry.options == config
assert hass.states.get(f"light.{NAME}_nightlight") is None
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
config[CONF_NIGHTLIGHT_SWITCH] = True
user_input = {**config}
user_input.pop(CONF_NAME)
user_input.pop(CONF_MODEL)
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
result2 = await hass.config_entries.options.async_configure(
result["flow_id"], user_input
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == config
assert result2["data"] == config_entry.options
assert hass.states.get(f"light.{NAME}_nightlight") is not None
async def test_options_unknown_model(hass: HomeAssistant):
"""Test options flow with an unknown model."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: IP_ADDRESS, CONF_NAME: NAME, CONF_DETECTED_MODEL: "not_in_db"},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
config = {
CONF_NAME: NAME,
CONF_MODEL: "not_in_db",
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: DEFAULT_NIGHTLIGHT_SWITCH,
}
assert config_entry.options == config
assert hass.states.get(f"light.{NAME}_nightlight") is None
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
config[CONF_NIGHTLIGHT_SWITCH] = True
user_input = {**config}
user_input.pop(CONF_NAME)
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
result2 = await hass.config_entries.options.async_configure(
result["flow_id"], user_input
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == config
assert result2["data"] == config_entry.options
assert hass.states.get(f"light.{NAME}_nightlight") is not None
async def test_manual_no_capabilities(hass: HomeAssistant):
"""Test manually setup without successful get_capabilities."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
mocked_bulb = _mocked_bulb()
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
), patch(
f"{MODULE}.async_setup", return_value=True
), patch(
f"{MODULE}.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_ID: None,
CONF_MODEL: MODEL_UNKNOWN,
}
async def test_discovered_by_homekit_and_dhcp(hass):
"""Test we get the form with homekit and abort for dhcp source when we get both."""
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data=zeroconf.ZeroconfServiceInfo(
host=IP_ADDRESS,
hostname="mock_hostname",
name="mock_name",
port=None,
properties={zeroconf.ATTR_PROPERTIES_ID: "aa:bb:cc:dd:ee:ff"},
type="mock_type",
),
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip=IP_ADDRESS, macaddress="aa:bb:cc:dd:ee:ff", hostname="mock_hostname"
),
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_in_progress"
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip=IP_ADDRESS, macaddress="00:00:00:00:00:00", hostname="mock_hostname"
),
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_ABORT
assert result3["reason"] == "already_in_progress"
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", side_effect=CannotConnect
):
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip="1.2.3.5", macaddress="00:00:00:00:00:01", hostname="mock_hostname"
),
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_ABORT
assert result3["reason"] == "cannot_connect"
@pytest.mark.parametrize(
"source, data",
[
(
config_entries.SOURCE_DHCP,
dhcp.DhcpServiceInfo(
ip=IP_ADDRESS, macaddress="aa:bb:cc:dd:ee:ff", hostname="mock_hostname"
),
),
(
config_entries.SOURCE_HOMEKIT,
zeroconf.ZeroconfServiceInfo(
host=IP_ADDRESS,
hostname="mock_hostname",
name="mock_name",
port=None,
properties={zeroconf.ATTR_PROPERTIES_ID: "aa:bb:cc:dd:ee:ff"},
type="mock_type",
),
),
],
)
async def test_discovered_by_dhcp_or_homekit(hass, source, data):
"""Test we can setup when discovered from dhcp or homekit."""
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_ID: "0x000000000015243f",
CONF_MODEL: MODEL,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", side_effect=CannotConnect
):
result3 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_ABORT
assert result3["reason"] == "already_configured"
@pytest.mark.parametrize(
"source, data",
[
(
config_entries.SOURCE_DHCP,
dhcp.DhcpServiceInfo(
ip=IP_ADDRESS, macaddress="aa:bb:cc:dd:ee:ff", hostname="mock_hostname"
),
),
(
config_entries.SOURCE_HOMEKIT,
zeroconf.ZeroconfServiceInfo(
host=IP_ADDRESS,
hostname="mock_hostname",
name="mock_name",
port=None,
properties={zeroconf.ATTR_PROPERTIES_ID: "aa:bb:cc:dd:ee:ff"},
type="mock_type",
),
),
],
)
async def test_discovered_by_dhcp_or_homekit_failed_to_get_id(hass, source, data):
"""Test we abort if we cannot get the unique id when discovered from dhcp or homekit."""
mocked_bulb = _mocked_bulb()
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_discovered_ssdp(hass):
"""Test we can setup when discovered from ssdp."""
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=SSDP_INFO
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_ID: "0x000000000015243f",
CONF_MODEL: MODEL,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=SSDP_INFO
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_discovered_zeroconf(hass):
"""Test we can setup when discovered from zeroconf."""
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_ID: "0x000000000015243f",
CONF_MODEL: MODEL,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_interval(), patch(
f"{MODULE_CONFIG_FLOW}.AsyncBulb", return_value=mocked_bulb
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=SSDP_INFO,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
|
|
# --------------------------------------------------------------------
__all__ = ['PetscConfig',
'setup', 'Extension',
'config', 'build', 'build_src', 'build_ext',
'clean', 'test', 'sdist',
'log',
]
# --------------------------------------------------------------------
import sys, os
try:
import setuptools
except ImportError:
setuptools = None
def import_command(cmd):
try:
from importlib import import_module
except ImportError:
import_module = lambda n: __import__(n, fromlist=[None])
try:
if not setuptools: raise ImportError
mod = import_module('setuptools.command.' + cmd)
return getattr(mod, cmd)
except ImportError:
mod = import_module('distutils.command.' + cmd)
return getattr(mod, cmd)
if setuptools:
from setuptools import setup
from setuptools import Extension as _Extension
from setuptools import Command
else:
from distutils.core import setup
from distutils.core import Extension as _Extension
from distutils.core import Command
_config = import_command('config')
_build = import_command('build')
_build_ext = import_command('build_ext')
_install = import_command('install')
_clean = import_command('clean')
_sdist = import_command('sdist')
from distutils import sysconfig
from distutils import log
from distutils.util import split_quoted, execute
from distutils.errors import DistutilsError
# --------------------------------------------------------------------
def fix_config_vars(names, values):
import os, re
values = list(values)
if sys.platform == 'darwin':
if 'ARCHFLAGS' in os.environ:
ARCHFLAGS = os.environ['ARCHFLAGS']
for i, flag in enumerate(list(values)):
flag, count = re.subn('-arch\s+\w+', ' ', flag)
if count and ARCHFLAGS:
flag = flag + ' ' + ARCHFLAGS
values[i] = flag
if 'SDKROOT' in os.environ:
SDKROOT = os.environ['SDKROOT']
for i, flag in enumerate(list(values)):
flag, count = re.subn('-isysroot [^ \t]*', ' ', flag)
if count and SDKROOT:
flag = flag + ' ' + '-isysroot ' + SDKROOT
values[i] = flag
return values
def get_config_vars(*names):
# Core Python configuration
values = sysconfig.get_config_vars(*names)
# Do any distutils flags fixup right now
values = fix_config_vars(names, values)
return values
from distutils.unixccompiler import UnixCCompiler
rpath_option_orig = UnixCCompiler.runtime_library_dir_option
def rpath_option(compiler, dir):
option = rpath_option_orig(compiler, dir)
if sys.platform[:5] == 'linux':
if option.startswith('-R'):
option = option.replace('-R', '-Wl,-rpath,', 1)
elif option.startswith('-Wl,-R'):
option = option.replace('-Wl,-R', '-Wl,-rpath,', 1)
return option
UnixCCompiler.runtime_library_dir_option = rpath_option
# --------------------------------------------------------------------
class PetscConfig:
def __init__(self, petsc_dir, petsc_arch):
self.configdict = { }
if not petsc_dir:
raise DistutilsError("PETSc not found")
if not os.path.isdir(petsc_dir):
raise DistutilsError("invalid PETSC_DIR: %s" % petsc_dir)
self.version = self._get_petsc_version(petsc_dir)
self.configdict = self._get_petsc_config(petsc_dir, petsc_arch)
self.PETSC_DIR = self['PETSC_DIR']
self.PETSC_ARCH = self['PETSC_ARCH']
language_map = {'CONLY':'c', 'CXXONLY':'c++'}
self.language = language_map[self['PETSC_LANGUAGE']]
def __getitem__(self, item):
return self.configdict[item]
def configure(self, extension, compiler=None):
self.configure_extension(extension)
if compiler is not None:
self.configure_compiler(compiler)
def _get_petsc_version(self, petsc_dir):
import re
version_re = {
'major' : re.compile(r"#define\s+PETSC_VERSION_MAJOR\s+(\d+)"),
'minor' : re.compile(r"#define\s+PETSC_VERSION_MINOR\s+(\d+)"),
'micro' : re.compile(r"#define\s+PETSC_VERSION_SUBMINOR\s+(\d+)"),
'patch' : re.compile(r"#define\s+PETSC_VERSION_PATCH\s+(\d+)"),
'release': re.compile(r"#define\s+PETSC_VERSION_RELEASE\s+(\d+)"),
}
petscversion_h = os.path.join(petsc_dir, 'include', 'petscversion.h')
with open(petscversion_h, 'rt') as f: data = f.read()
major = int(version_re['major'].search(data).groups()[0])
minor = int(version_re['minor'].search(data).groups()[0])
micro = int(version_re['micro'].search(data).groups()[0])
release = int(version_re['release'].search(data).groups()[0])
return (major, minor, micro), bool(release)
def _get_petsc_config(self, petsc_dir, petsc_arch):
from os.path import join, isdir, exists
PETSC_DIR = petsc_dir
PETSC_ARCH = petsc_arch
#
confdir = join('lib', 'petsc', 'conf')
if not (PETSC_ARCH and isdir(join(PETSC_DIR, PETSC_ARCH))):
petscvars = join(PETSC_DIR, confdir, 'petscvariables')
PETSC_ARCH = makefile(open(petscvars, 'rt')).get('PETSC_ARCH')
if not (PETSC_ARCH and isdir(join(PETSC_DIR, PETSC_ARCH))):
PETSC_ARCH = ''
#
variables = join(PETSC_DIR, confdir, 'variables')
if not exists(variables):
variables = join(PETSC_DIR, PETSC_ARCH, confdir, 'variables')
petscvariables = join(PETSC_DIR, PETSC_ARCH, confdir, 'petscvariables')
#
with open(variables) as f:
contents = f.read()
with open(petscvariables) as f:
contents += f.read()
#
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
confstr = 'PETSC_DIR = %s\n' % PETSC_DIR
confstr += 'PETSC_ARCH = %s\n' % PETSC_ARCH
confstr += contents
confdict = makefile(StringIO(confstr))
return confdict
def _configure_ext(self, ext, dct, preppend=False):
extdict = ext.__dict__
for key, values in dct.items():
if key in extdict:
for value in values:
if value not in extdict[key]:
if preppend:
extdict[key].insert(0, value)
else:
extdict[key].append(value)
def configure_extension(self, extension):
# define macros
macros = [('PETSC_DIR', self['PETSC_DIR'])]
extension.define_macros.extend(macros)
# includes and libraries
petsc_inc = flaglist(self['PETSC_CC_INCLUDES'])
petsc_lib = flaglist(
'-L%s %s' % (self['PETSC_LIB_DIR'], self['PETSC_LIB_BASIC']))
petsc_lib['runtime_library_dirs'].append(self['PETSC_LIB_DIR'])
# Link in extra libraries on static builds
if self['BUILDSHAREDLIB'] != 'yes':
petsc_ext_lib = split_quoted(self['PETSC_EXTERNAL_LIB_BASIC'])
petsc_lib['extra_link_args'].extend(petsc_ext_lib)
self._configure_ext(extension, petsc_inc, preppend=True)
self._configure_ext(extension, petsc_lib)
def configure_compiler(self, compiler):
if compiler.compiler_type != 'unix': return
(cc, cxx, cflags, ccshared,
ldflags, ldshared, so_ext) = get_config_vars(
'CC', 'CXX', 'CFLAGS', 'CCSHARED',
'LDFLAGS', 'LDSHARED', 'SO')
cflags = cflags or ''
ldflags = ldflags or ''
cflags = cflags.replace('-Wstrict-prototypes', '')
ld = cc
ldshared = ldshared.replace(ld, '', 1).strip()
ldshared = [flg for flg in split_quoted(ldshared)
if flg not in split_quoted(ldflags)]
ldshared = str.join(' ', ldshared)
#
getenv = os.environ.get
def get_flags(cmd):
try: return ' '.join(split_quoted(cmd)[1:])
except: return ''
# C compiler
PCC = self['PCC']
PCC_FLAGS = get_flags(cc) + ' ' + self['PCC_FLAGS']
PCC_FLAGS = PCC_FLAGS.replace('-fvisibility=hidden', '')
if sys.version_info[:2] < (2, 5):
PCC_FLAGS = PCC_FLAGS.replace('-Wwrite-strings', '')
PCC = getenv('PCC', PCC) + ' ' + getenv('PCCFLAGS', PCC_FLAGS)
ccshared = getenv('CCSHARED', ccshared)
cflags = getenv('CFLAGS', cflags)
PCC_SHARED = str.join(' ', (PCC, ccshared, cflags))
# C++ compiler
if self.language == 'c++':
PCXX = PCC
else:
try:
PCXX = self['CXX']
except KeyError:
PCXX = cxx
# linker
PLD = self['PCC_LINKER']
PLD_FLAGS = get_flags(ld) + ' ' + self['PCC_LINKER_FLAGS']
PLD_FLAGS = PLD_FLAGS.replace('-fvisibility=hidden', '')
PLD = getenv('PLD', PLD) + ' ' + getenv('PLDFLAGS', PLD_FLAGS)
ldshared = getenv('LDSHARED', ldshared)
ldflags = getenv('LDFLAGS', cflags + ' ' + ldflags)
PLD_SHARED = str.join(' ', (PLD, ldshared, ldflags))
#
compiler.set_executables(
compiler = PCC,
compiler_cxx = PCXX,
linker_exe = PLD,
compiler_so = PCC_SHARED,
linker_so = PLD_SHARED,
)
compiler.shared_lib_extension = so_ext
#
if sys.platform == 'darwin':
for attr in ('preprocessor',
'compiler', 'compiler_cxx', 'compiler_so',
'linker_so', 'linker_exe'):
compiler_cmd = getattr(compiler, attr, [])
while '-mno-fused-madd' in compiler_cmd:
compiler_cmd.remove('-mno-fused-madd')
def log_info(self):
PETSC_DIR = self['PETSC_DIR']
PETSC_ARCH = self['PETSC_ARCH']
version = ".".join([str(i) for i in self.version[0]])
release = ("development", "release")[self.version[1]]
version_info = version + ' ' + release
scalar_type = self['PETSC_SCALAR']
precision = self['PETSC_PRECISION']
language = self['PETSC_LANGUAGE']
compiler = self['PCC']
linker = self['PCC_LINKER']
log.info('PETSC_DIR: %s' % PETSC_DIR )
log.info('PETSC_ARCH: %s' % PETSC_ARCH )
log.info('version: %s' % version_info)
log.info('scalar-type: %s' % scalar_type)
log.info('precision: %s' % precision)
log.info('language: %s' % language)
log.info('compiler: %s' % compiler)
log.info('linker: %s' % linker)
# --------------------------------------------------------------------
class Extension(_Extension):
pass
# --------------------------------------------------------------------
cmd_petsc_opts = [
('petsc-dir=', None,
"define PETSC_DIR, overriding environmental variables"),
('petsc-arch=', None,
"define PETSC_ARCH, overriding environmental variables"),
]
class config(_config):
Configure = PetscConfig
user_options = _config.user_options + cmd_petsc_opts
def initialize_options(self):
_config.initialize_options(self)
self.petsc_dir = None
self.petsc_arch = None
def get_config_arch(self, arch):
return config.Configure(self.petsc_dir, arch)
def run(self):
_config.run(self)
self.petsc_dir = config.get_petsc_dir(self.petsc_dir)
if self.petsc_dir is None: return
petsc_arch = config.get_petsc_arch(self.petsc_dir, self.petsc_arch)
log.info('-' * 70)
log.info('PETSC_DIR: %s' % self.petsc_dir)
arch_list = petsc_arch
if not arch_list :
arch_list = [ None ]
for arch in arch_list:
conf = self.get_config_arch(arch)
archname = conf.PETSC_ARCH or conf['PETSC_ARCH']
scalar_type = conf['PETSC_SCALAR']
precision = conf['PETSC_PRECISION']
language = conf['PETSC_LANGUAGE']
compiler = conf['PCC']
linker = conf['PCC_LINKER']
log.info('-'*70)
log.info('PETSC_ARCH: %s' % archname)
log.info(' * scalar-type: %s' % scalar_type)
log.info(' * precision: %s' % precision)
log.info(' * language: %s' % language)
log.info(' * compiler: %s' % compiler)
log.info(' * linker: %s' % linker)
log.info('-' * 70)
#@staticmethod
def get_petsc_dir(petsc_dir):
if not petsc_dir: return None
petsc_dir = os.path.expandvars(petsc_dir)
if not petsc_dir or '$PETSC_DIR' in petsc_dir:
try:
import petsc
petsc_dir = petsc.get_petsc_dir()
except ImportError:
log.warn("PETSC_DIR not specified")
return None
petsc_dir = os.path.expanduser(petsc_dir)
petsc_dir = os.path.abspath(petsc_dir)
return config.chk_petsc_dir(petsc_dir)
get_petsc_dir = staticmethod(get_petsc_dir)
#@staticmethod
def chk_petsc_dir(petsc_dir):
if not os.path.isdir(petsc_dir):
log.error('invalid PETSC_DIR: %s (ignored)' % petsc_dir)
return None
return petsc_dir
chk_petsc_dir = staticmethod(chk_petsc_dir)
#@staticmethod
def get_petsc_arch(petsc_dir, petsc_arch):
if not petsc_dir: return None
petsc_arch = os.path.expandvars(petsc_arch)
if (not petsc_arch or '$PETSC_ARCH' in petsc_arch):
petsc_arch = ''
petsc_conf = os.path.join(petsc_dir, 'lib', 'petsc', 'conf')
if os.path.isdir(petsc_conf):
petscvariables = os.path.join(petsc_conf, 'petscvariables')
if os.path.exists(petscvariables):
conf = makefile(open(petscvariables, 'rt'))
petsc_arch = conf.get('PETSC_ARCH', '')
petsc_arch = petsc_arch.split(os.pathsep)
petsc_arch = unique(petsc_arch)
petsc_arch = [arch for arch in petsc_arch if arch]
return config.chk_petsc_arch(petsc_dir, petsc_arch)
get_petsc_arch = staticmethod(get_petsc_arch)
#@staticmethod
def chk_petsc_arch(petsc_dir, petsc_arch):
valid_archs = []
for arch in petsc_arch:
arch_path = os.path.join(petsc_dir, arch)
if os.path.isdir(arch_path):
valid_archs.append(arch)
else:
log.warn("invalid PETSC_ARCH: %s (ignored)" % arch)
return valid_archs
chk_petsc_arch = staticmethod(chk_petsc_arch)
class build(_build):
user_options = _build.user_options + cmd_petsc_opts
def initialize_options(self):
_build.initialize_options(self)
self.petsc_dir = None
self.petsc_arch = None
def finalize_options(self):
_build.finalize_options(self)
self.set_undefined_options('config',
('petsc_dir', 'petsc_dir'),
('petsc_arch', 'petsc_arch'))
self.petsc_dir = config.get_petsc_dir(self.petsc_dir)
self.petsc_arch = config.get_petsc_arch(self.petsc_dir,
self.petsc_arch)
sub_commands = \
[('build_src', lambda *args: True)] + \
_build.sub_commands
class build_src(Command):
description = "build C sources from Cython files"
user_options = [
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['force']
def initialize_options(self):
self.force = False
def finalize_options(self):
self.set_undefined_options('build',
('force', 'force'),
)
def run(self):
pass
class build_ext(_build_ext):
user_options = _build_ext.user_options + cmd_petsc_opts
def initialize_options(self):
_build_ext.initialize_options(self)
self.petsc_dir = None
self.petsc_arch = None
self._outputs = []
def finalize_options(self):
_build_ext.finalize_options(self)
self.set_undefined_options('build',
('petsc_dir', 'petsc_dir'),
('petsc_arch', 'petsc_arch'))
if ((sys.platform.startswith('linux') or
sys.platform.startswith('gnu') or
sys.platform.startswith('sunos')) and
sysconfig.get_config_var('Py_ENABLE_SHARED')):
py_version = sysconfig.get_python_version()
bad_pylib_dir = os.path.join(sys.prefix, "lib",
"python" + py_version,
"config")
try:
self.library_dirs.remove(bad_pylib_dir)
except ValueError:
pass
pylib_dir = sysconfig.get_config_var("LIBDIR")
if pylib_dir not in self.library_dirs:
self.library_dirs.append(pylib_dir)
if pylib_dir not in self.rpath:
self.rpath.append(pylib_dir)
if sys.exec_prefix == '/usr':
self.library_dirs.remove(pylib_dir)
self.rpath.remove(pylib_dir)
def _copy_ext(self, ext):
from copy import deepcopy
extclass = ext.__class__
fullname = self.get_ext_fullname(ext.name)
modpath = str.split(fullname, '.')
pkgpath = os.path.join('', *modpath[0:-1])
name = modpath[-1]
sources = list(ext.sources)
newext = extclass(name, sources)
newext.__dict__.update(deepcopy(ext.__dict__))
newext.name = name
return pkgpath, newext
def _build_ext_arch(self, ext, pkgpath, arch):
build_temp = self.build_temp
build_lib = self.build_lib
try:
self.build_temp = os.path.join(build_temp, arch)
self.build_lib = os.path.join(build_lib, pkgpath, arch)
_build_ext.build_extension(self, ext)
finally:
self.build_temp = build_temp
self.build_lib = build_lib
def get_config_arch(self, arch):
return config.Configure(self.petsc_dir, arch)
def build_extension(self, ext):
if not isinstance(ext, Extension):
return _build_ext.build_extension(self, ext)
petsc_arch = self.petsc_arch
if not petsc_arch:
petsc_arch = [ None ]
for arch in petsc_arch:
config = self.get_config_arch(arch)
ARCH = arch or config['PETSC_ARCH']
if ARCH not in self.PETSC_ARCH_LIST:
self.PETSC_ARCH_LIST.append(ARCH)
ext.language = config.language
config.log_info()
pkgpath, newext = self._copy_ext(ext)
config.configure(newext, self.compiler)
name = self.distribution.get_name()
version = self.distribution.get_version()
distdir = "%s-%s/" % (name, version)
self._build_ext_arch(newext, pkgpath, ARCH)
def build_extensions(self, *args, **kargs):
self.PETSC_ARCH_LIST = []
_build_ext.build_extensions(self, *args,**kargs)
if not self.PETSC_ARCH_LIST: return
self.build_configuration(self.PETSC_ARCH_LIST)
def build_configuration(self, arch_list):
#
template, variables = self.get_config_data(arch_list)
config_data = template % variables
#
build_lib = self.build_lib
dist_name = self.distribution.get_name()
config_file = os.path.join(build_lib, dist_name, 'lib',
dist_name.replace('4py', '') + '.cfg')
#
def write_file(filename, data):
with open(filename, 'w') as fh:
fh.write(config_data)
execute(write_file, (config_file, config_data),
msg='writing %s' % config_file,
verbose=self.verbose, dry_run=self.dry_run)
def get_config_data(self, arch_list):
template = """\
PETSC_DIR = %(PETSC_DIR)s
PETSC_ARCH = %(PETSC_ARCH)s
"""
variables = {'PETSC_DIR' : self.petsc_dir,
'PETSC_ARCH' : os.path.pathsep.join(arch_list)}
return template, variables
def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
if isinstance(ext, Extension):
head, tail = os.path.split(filename)
for arch in self.petsc_arch:
outfile = os.path.join(self.build_lib,
head, arch, tail)
outputs.append(outfile)
else:
outfile = os.path.join(self.build_lib, filename)
outputs.append(outfile)
outputs = list(set(outputs))
return outputs
class install(_install):
def run(self):
_install.run(self)
class clean(_clean):
def run(self):
_clean.run(self)
from distutils.dir_util import remove_tree
if self.all:
# remove the <package>.egg_info directory
try:
egg_info = self.get_finalized_command('egg_info').egg_info
if os.path.exists(egg_info):
remove_tree(egg_info, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
egg_info)
except DistutilsError:
pass
class test(Command):
description = "run the test suite"
user_options = [('args=', None, "options")]
def initialize_options(self):
self.args = None
def finalize_options(self):
if self.args:
self.args = split_quoted(self.args)
else:
self.args = []
def run(self):
pass
class sdist(_sdist):
def run(self):
build_src = self.get_finalized_command('build_src')
build_src.run()
_sdist.run(self)
# --------------------------------------------------------------------
def append(seq, item):
if item not in seq:
seq.append(item)
def append_dict(conf, dct):
for key, values in dct.items():
if key in conf:
for value in values:
if value not in conf[key]:
conf[key].append(value)
def unique(seq):
res = []
for item in seq:
if item not in res:
res.append(item)
return res
def flaglist(flags):
conf = {
'define_macros' : [],
'undef_macros' : [],
'include_dirs' : [],
'libraries' : [],
'library_dirs' : [],
'runtime_library_dirs': [],
'extra_compile_args' : [],
'extra_link_args' : [],
}
if type(flags) is str:
flags = flags.split()
switch = '-Wl,'
newflags = []
linkopts = []
for f in flags:
if f.startswith(switch):
if len(f) > 4:
append(linkopts, f[4:])
else:
append(newflags, f)
if linkopts:
newflags.append(switch + ','.join(linkopts))
flags = newflags
append_next_word = None
for word in flags:
if append_next_word is not None:
append(append_next_word, word)
append_next_word = None
continue
switch, value = word[0:2], word[2:]
if switch == "-I":
append(conf['include_dirs'], value)
elif switch == "-D":
try:
idx = value.index("=")
macro = (value[:idx], value[idx+1:])
except ValueError:
macro = (value, None)
append(conf['define_macros'], macro)
elif switch == "-U":
append(conf['undef_macros'], value)
elif switch == "-l":
append(conf['libraries'], value)
elif switch == "-L":
append(conf['library_dirs'], value)
elif switch == "-R":
append(conf['runtime_library_dirs'], value)
elif word.startswith("-Wl"):
linkopts = word.split(',')
append_dict(conf, flaglist(linkopts[1:]))
elif word == "-rpath":
append_next_word = conf['runtime_library_dirs']
elif word == "-Xlinker":
append_next_word = conf['extra_link_args']
else:
#log.warn("unrecognized flag '%s'" % word)
pass
return conf
# --------------------------------------------------------------------
from distutils.text_file import TextFile
# Regexes needed for parsing Makefile-like syntaxes
import re as _re
_variable_rx = _re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = _re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = _re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def makefile(fileobj, dct=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
fp = TextFile(file=fileobj,
strip_comments=1,
skip_blanks=1,
join_lines=1)
if dct is None:
dct = {}
done = {}
notdone = {}
while 1:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = str.strip(v)
if "$" in v:
notdone[n] = v
else:
try: v = int(v)
except ValueError: pass
done[n] = v
try: del notdone[n]
except KeyError: pass
fp.close()
# do variable interpolation here
while notdone:
for name in list(notdone.keys()):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = str.strip(value)
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference;
# just drop it since we can't deal
del notdone[name]
# save the results in the global dictionary
dct.update(done)
return dct
# --------------------------------------------------------------------
|
|
import os
import uuid
### arast ###
import client
import assembly
#### Single Files #####
class FileInfo(dict):
def __init__(self, filename=None, shock_url=None, shock_id=None, name=None,
create_time=None, metadata=None, direct_url=None, keep_name=False, *args):
dict.__init__(self, *args)
if filename:
assert os.path.exists(filename)
filesize = os.path.getsize(filename)
fname = os.path.basename(filename)
else:
filesize = None
fname = None
self.update({'direct_url': direct_url,
'shock_url': shock_url,
'shock_id' : shock_id,
'filesize': filesize,
'filename': fname,
'local_file': filename,
'keep_name': keep_name,
'create_time': create_time,
'metadata': metadata})
self.id = uuid.uuid4()
# def fetch_file(self, outdir=None):
# """ If file has a direct_url, download the file"""
# downloaded = assembly.curl_download_url(self.direct_url, outdir=outdir)
# self.update({'filesize': os.path.getsize(downloaded),
# 'filename': os.path.basename(self.direct_url),
# 'local_file': downloaded})
##### Set of Files ######
class FileSet(dict):
def __init__(self, set_type, file_infos,
**kwargs):
dict.__init__(self)
self.update({'type': set_type,
'file_infos': [],
'tags': []})
self.update(kwargs)
self.id = uuid.uuid4()
if type(file_infos) is list:
for f in file_infos:
self['file_infos'].append(f)
else:
self['file_infos'] = [file_infos]
@property
def files(self):
""" Returns file paths of all files in set"""
return [f['local_file'] for f in self['file_infos']]
@property
def shock_nodes(self):
return [fi['shock_id'] for fi in self['file_infos']]
@property
def name(self):
return self['name'] or None
@property
def type(self):
return self['type'] or None
def add_tag(self, tag):
if not tag in self['tags']:
self['tags'].append(tag)
def update_files(self, files):
self['file_infos'] = [FileInfo(f) for f in files]
def update_fileinfo(self, fileinfos):
self['file_infos'] = fileinfos
class ReadSet(FileSet):
def __init__(self, set_type, file_infos, **kwargs):
self['insert'] = None
self['stdev'] = None
FileSet.__init__(self, set_type, file_infos, **kwargs)
self.__dict__.update(kwargs)
self['type'] = set_type
@property
def insert(self):
return self['insert']
@property
def stdev(self):
return self['stdev']
class ContigSet(FileSet):
def __init__(self, set_type, file_infos, **kwargs):
FileSet.__init__(self, set_type, file_infos, **kwargs)
self.__dict__.update(kwargs)
class ScaffoldSet(FileSet):
def __init__(self, set_type, file_infos, **kwargs):
FileSet.__init__(self, set_type, file_infos, **kwargs)
self.__dict__.update(kwargs)
class ReferenceSet(FileSet):
def __init__(self, set_type, file_infos, **kwargs):
FileSet.__init__(self, set_type, file_infos, **kwargs)
self.__dict__.update(kwargs)
assert len(file_infos) < 2
def set_factory(set_type, file_infos, keep_name=False, **kwargs):
"""
Creates a particular FileSet (e.g ReadSet) depending on set_type.
file_infos is a list of filepaths, FileInfos, or a FileSet.
If file_infos is a FileSet, it will convert it to the correct class
according to set_type.
"""
if isinstance(file_infos, FileSet):
file_infos = file_infos['file_infos']
elif type(file_infos) is not list:
file_infos = [file_infos]
for i,f in enumerate(file_infos):
if type(f) is not FileInfo and os.path.exists(f):
file_infos[i] = FileInfo(f, keep_name=keep_name)
if set_type in ['paired', 'single']:
return ReadSet(set_type, file_infos, **kwargs)
elif set_type == 'contigs':
return ContigSet(set_type, file_infos, **kwargs)
elif set_type == 'scaffolds':
return ScaffoldSet(set_type, file_infos, **kwargs)
elif set_type == 'reference':
return ReferenceSet(set_type, file_infos, **kwargs)
else:
return FileSet(set_type, file_infos, **kwargs)
#### All Filesets #####
class FileSetContainer(dict):
def __init__(self, filesets=None):
self.filesets = filesets if filesets else []
def find_type(self, set_type):
return [fileset for fileset in self.filesets if fileset['type'] == set_type]
def find(self, id):
for fileset in self.filesets:
if fileset.id == id: return fileset
def find_and_update(self, id, newdict):
self.find(id).update(newdict)
@property
def readsets(self):
""" Returns a list of all ReadSet objects"""
return [fileset for fileset in self.filesets if type(fileset) is ReadSet]
@property
def readsets_paired(self):
""" Returns a list of all paired-end ReadSet objects"""
return [readset for readset in self.readsets if readset['type'] == 'paired']
@property
def readsets_single(self):
""" Returns a list of all single-end ReadSet objects"""
return [readset for readset in self.readsets if readset['type'] == 'single']
@property
def readfiles(self):
return [readfile for readset in self.readsets for readfile in readset.files]
@property
def readfiles_paired(self):
return [readfile for readset in self.readsets_paired for readfile in readset.files]
@property
def readfiles_single(self):
return [readfile for readset in self.readsets_single for readfile in readset.files]
@property
def contigsets(self):
""" Returns a list of all ContigSet objects"""
return [fileset for fileset in self.filesets if type(fileset) is ContigSet]
@property
def contigfiles(self):
return [contigfile for contigset in self.contigsets for contigfile in contigset.files]
@property
def scaffoldsets(self):
""" Returns a list of all ScaffoldSet objects"""
return [fileset for fileset in self.filesets if type(fileset) is ScaffoldSet]
@property
def scaffoldfiles(self):
return [scaffoldfile for scaffoldset in self.scaffoldsets for scaffoldfile in scaffoldset.files]
@property
def referencesets(self):
""" Returns a list of all ReferenceSet objects"""
return [fileset for fileset in self.filesets if type(fileset) is ReferenceSet]
@property
def referencefiles(self):
return [referencefile for referenceset in self.referencesets for referencefile in referenceset.files]
@property
def bamfiles(self):
pass
def filepaths(filesets):
""" Return a list of filepaths from list of FileSets """
filepaths = []
for fs in filesets:
filepaths += fs.files
return filepaths
######## ARAST EXCEPTIONS #########
class ArastUserInterrupt(BaseException):
pass
class ArastDataInputError(Exception):
pass
class ArastDataOutputError(Exception):
pass
class ArastClientRequestError(Exception):
pass
|
|
# Copyright 2012-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import coredata
from . import environment
from . import dependencies
from . import mlog
import copy, os
from .mesonlib import File, flatten, MesonException
known_basic_kwargs = {'install' : True,
'c_pch' : True,
'cpp_pch' : True,
'c_args' : True,
'cpp_args' : True,
'cs_args' : True,
'vala_args' : True,
'link_args' : True,
'link_depends': True,
'link_with' : True,
'include_directories': True,
'dependencies' : True,
'install_dir' : True,
'main_class' : True,
'gui_app' : True,
'extra_files' : True,
'install_rpath' : True,
'resources' : True,
'sources' : True,
'objects' : True,
'native' : True,
}
known_shlib_kwargs = known_basic_kwargs.copy()
known_shlib_kwargs.update({'version' : True,
'soversion' : True,
'name_prefix' : True,
'name_suffix' : True,
'vs_module_defs' : True})
backslash_explanation = \
'''Compiler arguments have a backslash "\\" character. This is unfortunately not
permitted. The reason for this is that backslash is a shell quoting character
that behaves differently across different systems. Because of this is it not
possible to make it work reliably across all the platforms Meson needs to
support.
There are several different ways of working around this issue. Most of the time
you are using this to provide a -D define to your compiler. Try instead to
create a config.h file and put all of your definitions in it using
configure_file().
Another approach is to move the backslashes into the source and have the other
bits in the def. So you would have an arg -DPLAIN_TEXT="foo" and then in your
C sources something like this:
const char *fulltext = "\\\\" PLAIN_TEXT;
We are fully aware that these are not really usable or pleasant ways to do
this but it's the best we can do given the way shell quoting works.
'''
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = {}
self.compilers = []
self.cross_compilers = []
self.global_args = {}
self.global_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.install_scripts = []
self.postconf_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
def has_language(self, language):
for i in self.compilers:
if i.get_language() == language:
return True
return False
def add_compiler(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
if self.has_language(compiler.get_language()):
return
self.compilers.append(compiler)
def add_cross_compiler(self, compiler):
if len(self.cross_compilers) == 0:
self.static_cross_linker = self.environment.detect_static_linker(compiler)
for i in self.cross_compilers:
if i.get_language() == compiler.get_language():
return
self.cross_compilers.append(compiler)
def get_project(self):
return self.projects['']
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler):
return self.global_args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler):
return self.global_link_args.get(compiler.get_language(), [])
class IncludeDirs():
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects():
def __init__(self, target, srclist):
self.target = target
self.srclist = srclist
class BuildTarget():
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.name = name
self.subdir = subdir
self.subproject = subproject # Can not be calculated from subdir as subproject dirname can be changed per project.
self.is_cross = is_cross
self.sources = []
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_depends = []
self.filename = 'no_name'
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.process_sourcelist(sources)
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
if len(self.sources) == 0 and \
len(self.generated) == 0 and \
len(self.objects) == 0:
raise InvalidArguments('Build target %s has no sources.' % name)
self.validate_sources()
def get_id(self):
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons.
base = self.name + self.type_suffix()
if self.subproject == '':
return base
return self.subproject + '@@' + base
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, known_basic_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if not k in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.log(mlog.bold('Warning:'), 'Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
self.objects.append(s)
elif isinstance(s, ExtractedObjects):
self.objects.append(s)
else:
raise InvalidArguments('Bad object in target %s.' % self.name)
def process_sourcelist(self, sources):
if not isinstance(sources, list):
sources = [sources]
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if not s in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, GeneratedList) or isinstance(s, CustomTarget):
self.generated.append(s)
else:
raise InvalidArguments('Bad source in target %s.' % self.name)
def validate_sources(self):
if len(self.sources) > 0:
firstname = self.sources[0]
if isinstance(firstname, File):
firstname = firstname.fname
first = os.path.split(firstname)[1]
(base, suffix) = os.path.splitext(first)
if suffix == '.rs':
if self.name != base:
raise InvalidArguments('In Rust targets, the first source file must be named projectname.rs.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
if not isinstance(d, list):
d = [d]
newd = []
for i in d:
if hasattr(i, 'held_object'):
newd.append(i.held_object)
else:
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
if 'dependencies' in self.kwargs:
self.kwargs['dependencies'] = self.unpack_holder(self.kwargs['dependencies'])
if 'link_with' in self.kwargs:
self.kwargs['link_with'] = self.unpack_holder(self.kwargs['link_with'])
def extract_objects(self, srcargs):
obj_src = []
for srclist in srcargs:
if not isinstance(srclist, list):
srclist = [srclist]
for src in srclist:
if not isinstance(src, str):
raise MesonException('Extraction arguments must be strings.')
src = File(False, self.subdir, src)
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self):
return ExtractedObjects(self, self.sources)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_custom_install_dir(self):
return self.custom_install_dir
def process_kwargs(self, kwargs, environment):
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = kwargs.get('link_with', [])
if not isinstance(llist, list):
llist = [llist]
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
self.link(linktarget)
c_pchlist = kwargs.get('c_pch', [])
if not isinstance(c_pchlist, list):
c_pchlist = [c_pchlist]
self.add_pch('c', c_pchlist)
cpp_pchlist = kwargs.get('cpp_pch', [])
if not isinstance(cpp_pchlist, list):
cpp_pchlist = [cpp_pchlist]
self.add_pch('cpp', cpp_pchlist)
clist = kwargs.get('c_args', [])
if not isinstance(clist, list):
clist = [clist]
self.add_compiler_args('c', clist)
cpplist = kwargs.get('cpp_args', [])
if not isinstance(cpplist, list):
cpplist = [cpplist]
self.add_compiler_args('cpp', cpplist)
cslist = kwargs.get('cs_args', [])
if not isinstance(cslist, list):
cslist = [cslist]
self.add_compiler_args('cs', cslist)
valalist = kwargs.get('vala_args', [])
if not isinstance(valalist, list):
valalist = [valalist]
self.add_compiler_args('vala', valalist)
self.link_args = kwargs.get('link_args', [])
if not isinstance(self.link_args, list):
self.link_args = [self.link_args]
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
self.link_depends = kwargs.get('link_depends', [])
if not isinstance(self.link_depends, list):
self.link_depends = [self.link_depends]
for i in self.link_depends:
if not isinstance(i, str):
raise InvalidArguments('Link_depends arguments must be strings.')
inclist = kwargs.get('include_directories', [])
if not isinstance(inclist, list):
inclist = [inclist]
self.add_include_dirs(inclist)
deplist = kwargs.get('dependencies', [])
if not isinstance(deplist, list):
deplist = [deplist]
self.add_external_deps(deplist)
self.custom_install_dir = kwargs.get('install_dir', None)
if self.custom_install_dir is not None:
if not isinstance(self.custom_install_dir, str):
raise InvalidArguments('Custom_install_dir must be a string')
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = kwargs.get('extra_files', [])
if isinstance(extra_files, str):
extra_files = [extra_files]
for i in extra_files:
if not isinstance(i, str):
raise InvalidArguments('Arguments to extra_files must be strings.')
trial = os.path.join(environment.get_source_dir(), self.subdir, i)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
resources = kwargs.get('resources', [])
if not isinstance(resources, list):
resources = [resources]
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if len(name_prefix) != 0:
raise InvalidArguments('Array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('Name prefix must be a string.')
self.prefix = name_prefix
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if len(name_suffix) != 0:
raise InvalidArguments('Array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('Name suffix must be a string.')
self.suffix = name_suffix
def get_subdir(self):
return self.subdir
def get_filename(self):
return self.filename
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self):
transitive_deps = []
for t in self.link_targets:
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies()
return transitive_deps
def get_basename(self):
return self.name
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_external_deps(self, deps):
if not isinstance(deps, list):
deps = [deps]
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_external_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
else:
raise InvalidArguments('Argument is not an external dependency')
def get_external_deps(self):
return self.external_deps
def link(self, target):
if not isinstance(target, list):
target = [target]
for t in target:
if hasattr(t, 'held_object'):
t = t.held_object
if not isinstance(t, StaticLibrary) and \
not isinstance(t, SharedLibrary):
raise InvalidArguments('Link target is not library.')
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target %s.' % self.name)
self.link_targets.append(t)
def set_generated(self, genlist):
for g in genlist:
if not(isinstance(g, GeneratedList)):
raise InvalidArguments('Generated source argument is not the output of a generator.')
self.generated.append(g)
def add_pch(self, language, pchlist):
if len(pchlist) == 0:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('Pch argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = flatten(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if isinstance(a, str) and '\\' in a:
raise InvalidArguments(backslash_explanation)
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliaslist(self):
return []
class Generator():
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires one and only one positional argument')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, Executable) and not isinstance(exe, dependencies.ExternalProgram):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.process_kwargs(kwargs)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = kwargs['output']
if not isinstance(outputs, list):
outputs = [outputs]
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if not '@BASENAME@' in rule and not '@PLAINNAME@' in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if '/' in rule or '\\' in rule:
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
def get_base_outnames(self, inname):
plainname = os.path.split(inname)[1]
basename = plainname.split('.')[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
def get_arglist(self):
return self.arglist
class GeneratedList():
def __init__(self, generator, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.extra_args = extra_args
def add_file(self, newfile):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_infilelist(self):
return self.infilelist
def get_outfilelist(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.prefix = ''
self.suffix = environment.get_exe_suffix()
suffix = environment.get_exe_suffix()
if len(self.sources) > 0 and self.sources[0].endswith('.cs'):
suffix = 'exe'
if suffix != '':
self.filename = self.name + '.' + suffix
else:
self.filename = self.name
def type_suffix(self):
return "@exe"
class StaticLibrary(BuildTarget):
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if len(self.sources) > 0 and self.sources[0].endswith('.cs'):
raise InvalidArguments('Static libraries not supported for C#.')
if not hasattr(self, 'prefix'):
self.prefix = environment.get_static_lib_prefix()
self.suffix = environment.get_static_lib_suffix()
if len(self.sources) > 0 and self.sources[0].endswith('.rs'):
self.suffix = 'rlib'
self.filename = self.prefix + self.name + '.' + self.suffix
def get_import_filename(self):
return self.filename
def get_osx_filename(self):
return self.get_filename()
def type_suffix(self):
return "@sta"
class SharedLibrary(BuildTarget):
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.version = None
self.soversion = None
self.vs_module_defs = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs);
if len(self.sources) > 0 and self.sources[0].endswith('.cs'):
prefix = 'lib'
suffix = 'dll'
else:
prefix = environment.get_shared_lib_prefix()
suffix = environment.get_shared_lib_suffix()
if not hasattr(self, 'prefix'):
self.prefix = prefix
if not hasattr(self, 'suffix'):
if len(self.sources) > 0 and self.sources[0].endswith('.rs'):
self.suffix = 'rlib'
else:
self.suffix = suffix
self.importsuffix = environment.get_import_lib_suffix()
self.filename = self.prefix + self.name + '.' + self.suffix
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'version' in kwargs:
self.set_version(kwargs['version'])
if 'soversion' in kwargs:
self.set_soversion(kwargs['soversion'])
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if (os.path.isabs(path)):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
def check_unknown_kwargs(self, kwargs):
self.check_unknown_kwargs_int(kwargs, known_shlib_kwargs)
def get_shbase(self):
return self.prefix + self.name + '.' + self.suffix
def get_import_filename(self):
return self.prefix + self.name + '.' + self.importsuffix
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_filename(self):
'''Works on all platforms except OSX, which does its own thing.'''
fname = self.get_shbase()
if self.version is None:
return fname
else:
return fname + '.' + self.version
def get_osx_filename(self):
if self.version is None:
return self.get_shbase()
return self.prefix + self.name + '.' + self.version + '.' + self.suffix
def set_version(self, version):
if not isinstance(version, str):
raise InvalidArguments('Shared library version is not a string.')
self.version = version
def set_soversion(self, version):
if isinstance(version, int):
version = str(version)
if not isinstance(version, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
self.soversion = version
def get_aliaslist(self):
aliases = []
if self.soversion is not None:
aliases.append(self.get_shbase() + '.' + self.soversion)
if self.version is not None:
aliases.append(self.get_shbase())
return aliases
def type_suffix(self):
return "@sha"
class CustomTarget:
known_kwargs = {'input' : True,
'output' : True,
'command' : True,
'install' : True,
'install_dir' : True,
'build_always' : True,
'depends' : True,
'depend_files' : True,
}
def __init__(self, name, subdir, kwargs):
self.name = name
self.subdir = subdir
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.process_kwargs(kwargs)
self.extra_files = []
self.install_rpath = ''
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.log(mlog.bold('Warning:'), 'Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, BuildTarget) or isinstance(c, CustomTarget):
deps.append(c)
return deps
def process_kwargs(self, kwargs):
self.sources = kwargs.get('input', [])
if not isinstance(self.sources, list):
self.sources = [self.sources]
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.output = kwargs['output']
if not isinstance(self.output, list):
self.output = [self.output]
for i in self.output:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if '/' in i:
raise InvalidArguments('Output must not contain a path segment.')
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
cmd = kwargs['command']
if not(isinstance(cmd, list)):
cmd = [cmd]
final_cmd = []
for i, c in enumerate(cmd):
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
raise InvalidArguments('Tried to use not found external program in a build rule.')
final_cmd += c.get_command()
elif isinstance(c, BuildTarget) or isinstance(c, CustomTarget):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
# Hackety hack, only supports one level of flattening. Should really
# work to arbtrary depth.
for s in c:
if not isinstance(s, str):
raise InvalidArguments('Array as argument %d contains a non-string.' % i)
final_cmd.append(s)
elif isinstance(c, File):
final_cmd.append(os.path.join(c.subdir, c.fname))
else:
raise InvalidArguments('Argument %s in "command" is invalid.' % i)
self.command = final_cmd
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" not specified.')
self.install_dir = kwargs['install_dir']
if not(isinstance(self.install_dir, str)):
raise InvalidArguments('"install_dir" must be a string.')
else:
self.install = False
self.build_always = kwargs.get('build_always', False)
if not isinstance(self.build_always, bool):
raise InvalidArguments('Argument build_always must be a boolean.')
extra_deps = kwargs.get('depends', [])
if not isinstance(extra_deps, list):
extra_deps = [extra_deps]
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, CustomTarget) and not isinstance(ed, BuildTarget):
raise InvalidArguments('Can only depend on toplevel targets.')
self.extra_depends.append(ed)
depend_files = kwargs.get('depend_files', [])
if not isinstance(depend_files, list):
depend_files = [depend_files]
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type in depend_files.')
def get_basename(self):
return self.name
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_subdir(self):
return self.subdir
def get_filename(self):
return self.output
def get_aliaslist(self):
return []
def get_sources(self):
return self.sources
def get_generated_sources(self):
return []
def type_suffix(self):
return "@cus"
class RunTarget:
def __init__(self, name, command, args, subdir):
self.name = name
self.command = command
self.args = args
self.subdir = subdir
def get_id(self):
return self.name + self.type_suffix()
def get_basename(self):
return self.name
def get_dependencies(self):
return []
def get_generated_sources(self):
return []
def get_sources(self):
return []
def get_subdir(self):
return self.subdir
def should_install(self):
return False
def get_filename(self):
return self.name
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs);
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
self.filename = self.name + '.jar'
incdirs = kwargs.get('include_directories', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
class ConfigureFile():
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData():
def __init__(self):
super().__init__()
self.values = {}
def get(self, name):
return self.values[name]
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data():
def __init__(self, in_sourcetree, source_subdir, sources, install_dir):
self.in_sourcetree = in_sourcetree
self.source_subdir = source_subdir
self.sources = sources
self.install_dir = install_dir
class InstallScript:
def __init__(self, cmd_arr):
assert(isinstance(cmd_arr, list))
self.cmd_arr = cmd_arr
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from mock import Mock, patch, call
from preggy import expect
from materialgirl import Materializer
from materialgirl.storage.memory import InMemoryStorage
from tests.base import TestCase
class TestMaterialGirl(TestCase):
@staticmethod
def woots_generator():
woots = ['woot1', 'woot2', 'woot3', 'woot4']
for woot in woots:
yield woot
def test_can_create_girl(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
expect(girl).not_to_be_null()
expect(girl.storage).not_to_be_null()
expect(girl.storage).to_equal(storage)
def test_can_add_material(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
girl.add_material(
'test',
lambda: 'woot'
)
girl.run()
expect(storage.items).to_include('test')
expect(storage.items['test']).to_equal('woot')
def test_can_add_material_with_expiration_and_graceperiod(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
girl.add_material(
'test',
lambda: 'woot',
expiration=2,
grace_period=4
)
girl.run()
expect(storage.items).to_include('test')
expect(storage.items['test']).to_equal('woot')
def test_can_expire_materials(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
girl.add_material(
'test',
lambda: 'woot'
)
girl.run()
expect(storage.items).to_length(1)
expect(storage.items['test']).to_equal('woot')
girl.expire('test')
expect(girl.is_expired('test')).to_be_true()
expect(storage.items).to_length(1)
expect(storage.items.get('test')).to_be_null()
expect(storage.items['_expired_test']).to_equal('woot')
def test_can_update_expired_materials(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
woots = self.woots_generator()
girl.add_material(
'test',
lambda: next(woots)
)
girl.run()
expect(storage.items).to_length(1)
expect(storage.items['test']).to_equal('woot1')
storage.expire('test')
girl.run()
expect(storage.items).to_length(1)
expect(storage.items['test']).to_equal('woot2')
def test_can_update_deleted_materials(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
woots = self.woots_generator()
girl.add_material(
'test',
lambda: next(woots)
)
girl.run()
expect(storage.items).to_length(1)
expect(storage.items['test']).to_equal('woot1')
storage.items = {}
girl.run()
expect(storage.items).to_length(1)
expect(storage.items['test']).to_equal('woot2')
def test_dont_update_not_expired_materials(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
woots = self.woots_generator()
girl.add_material(
'test',
lambda: next(woots)
)
girl.run()
expect(storage.items).to_length(1)
expect(storage.items['test']).to_equal('woot1')
girl.run()
expect(storage.items).to_length(1)
expect(storage.items['test']).to_equal('woot1')
def test_raises_if_key_not_found(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
try:
girl.get('test')
except ValueError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of(
'Key test not found in materials. Maybe you forgot to call "add_material" for this key?'
)
else:
assert False, "Should not have gotten this far"
try:
girl.is_expired('test')
except ValueError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of(
'Key test not found in materials. Maybe you forgot to call "add_material" for this key?'
)
else:
assert False, "Should not have gotten this far"
try:
girl.expire('test')
except ValueError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of(
'Key test not found in materials. Maybe you forgot to call "add_material" for this key?'
)
else:
assert False, "Should not have gotten this far"
def test_can_get_value_after_material_girl_run(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
girl.add_material(
'test',
lambda: 'woot'
)
girl.run()
value = girl.get('test')
expect(value).to_equal('woot')
def test_can_get_value_if_material_girl_not_run(self):
storage = InMemoryStorage()
girl = Materializer(storage=storage)
girl.add_material(
'test',
lambda: 'woot'
)
value = girl.get('test')
expect(value).to_equal('woot')
@patch('logging.info')
def test_can_lock_key(self, logging_info_mock):
storage = Mock(
store=Mock(),
acquire_lock=Mock(),
release_lock=Mock()
)
girl = Materializer(storage=storage)
girl.add_material(
'test1',
lambda: 'woot1'
)
girl.add_material(
'test2',
lambda: 'woot2'
)
girl.run()
expect(storage.store.call_count).to_equal(2)
expect(storage.acquire_lock.call_count).to_equal(2)
expect(storage.release_lock.call_count).to_equal(2)
expect(logging_info_mock.call_count).to_equal(10)
@patch('logging.info')
def test_can_skip_locked_key(self, logging_info_mock):
storage = Mock(
store=Mock(),
acquire_lock=Mock(return_value=None),
release_lock=Mock()
)
girl = Materializer(storage=storage)
girl.add_material(
'test1',
lambda: 'woot1'
)
girl.add_material(
'test2',
lambda: 'woot2'
)
girl.run()
expect(storage.store.call_count).to_equal(0)
expect(storage.acquire_lock.call_count).to_equal(2)
expect(storage.release_lock.call_count).to_equal(0)
expect(logging_info_mock.call_count).to_equal(4)
def test_can_lock_key_without_timeout(self):
storage = Mock(store=Mock(), acquire_lock=Mock())
girl = Materializer(storage=storage)
girl.add_material(
'test1',
lambda: 'woot'
)
girl.add_material(
'test2',
lambda: 'woot'
)
girl.run()
expect(storage.store.call_count).to_equal(2)
expect(storage.acquire_lock.call_count).to_equal(2)
storage.acquire_lock.assert_has_calls([
call(
'test1',
timeout=None
), call(
'test2',
timeout=None
)
])
def test_can_lock_key_with_timeout(self):
storage = Mock(store=Mock(), acquire_lock=Mock())
girl = Materializer(storage=storage)
girl.add_material(
'test1',
lambda: 'woot',
lock_timeout=1
)
girl.add_material(
'test2',
lambda: 'woot',
lock_timeout=2
)
girl.run()
expect(storage.store.call_count).to_equal(2)
expect(storage.acquire_lock.call_count).to_equal(2)
storage.acquire_lock.assert_has_calls([
call(
'test1',
timeout=1
), call(
'test2',
timeout=2
)
])
def test_can_miss_the_cache(self):
storage = Mock(retrieve=Mock(return_value=None))
girl = Materializer(storage=storage, load_on_cachemiss=False)
girl.add_material('test', lambda: 'woot')
girl.run()
value = girl.get('test')
expect(value).to_be_null()
expect(storage.acquire_lock.call_count).to_equal(1)
storage.store.assert_called_once_with(
'test',
'woot',
expiration=10,
grace_period=0
)
|
|
#!/usr/bin/python
from neutrino import nparser, token, ast, data
import unittest
ar = lambda *e: ast.Array(e)
df = lambda i, v, b: ast.LocalDeclaration(i, False, v, b)
lm = lambda s, b: ast.Lambda([ast.Method(s, b)])
lt = ast.Literal
nd = lambda n, v: ast.NamespaceDeclaration([], n, v)
md = lambda s, b: ast.MethodDeclaration(ast.Method(s, b))
fd = lambda s, b: ast.FunctionDeclaration(ast.Method(s, b))
fpm = lambda n, g, *t: ast.Parameter(n, t, g)
pm = lambda n, *t: fpm(n, ast.Guard.any(), *t)
sq = lambda *e: ast.Sequence(e)
qt = ast.Quote
im = ast.Import
eq = ast.Guard.eq
is_ = ast.Guard.is_
any = ast.Guard.any
ST = data._SUBJECT
SL = data._SELECTOR
TN = data._TRANSPORT
SN = data._SYNC
ix = data.Operation.infix
def ut(phase, *elements):
return ast.Module(None).add_element(*elements)
def mu(*phases):
result = ast.Module(None)
for (phase, elements) in phases:
result.add_element(*elements)
return result
def ls(*params):
prefix = [
fpm(nm(['self']), any(), ST),
fpm(None, eq(lt(data.Operation.call())), SL),
fpm(None, eq(lt(SN)), TN)
]
return ast.Signature(prefix + list(params), False, None)
def nm(names, phase=0):
if isinstance(names, list):
return data.Identifier(phase, data.Path(names))
else:
return data.Identifier(phase, data.Path([names]))
def id(names, phase=0):
name = nm(names, phase)
return ast.Variable(ident=name)
def bn(left, op, right):
return ast.Invocation([
ast.Argument(ST, left),
ast.Argument(SL, ast.Literal(data.Operation.infix(op))),
ast.Argument(TN, ast.Literal(SN)),
ast.Argument(0, right)
])
def cl(fun, *poss):
args = [
ast.Argument(ST, fun),
ast.Argument(SL, ast.Literal(data.Operation.call())),
ast.Argument(TN, ast.Literal(SN)),
]
for i in xrange(len(poss)):
args.append(ast.Argument(i, poss[i]))
return ast.Invocation(args)
def mt(fun, name, *poss):
args = [
ast.Argument(ST, fun),
ast.Argument(SL, name),
ast.Argument(TN, ast.Literal(SN)),
]
for i in xrange(len(poss)):
pos = poss[i]
if type(pos) == tuple:
args.append(ast.Argument(*pos))
else:
args.append(ast.Argument(i, pos))
return ast.Invocation(args)
class ParserTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def check_expression(self, input, expected):
found = nparser.Parser(token.tokenize(input), ast.Module("")).parse_expression(False)
# Convert the asts to strings because that's just infinitely easier to
# debug when assertions fail. Of course that requires that ast string
# conversion is sane, which it is.
self.assertEquals(str(expected), str(found))
def check_program(self, input, expected):
found = nparser.Parser(token.tokenize(input), ast.Module("")).parse_program()
self.assertEquals(unicode(expected), unicode(found))
def test_atomic_expressions(self):
test = self.check_expression
test('1', lt(1))
test('"foo"', lt('foo'))
test('$foo', id('foo'))
test('@foo', id('foo', -1))
test('@foo:bar', id(['foo', 'bar'], -1))
test('(1)', lt(1))
test('((($foo)))', id('foo'))
test('[]', ar())
test('[1]', ar(lt(1)))
test('[2, 3]', ar(lt(2), lt(3)))
test('[4, 5, 6]', ar(lt(4), lt(5), lt(6)))
test('[7, [8, [9]]]', ar(lt(7), ar(lt(8), ar(lt(9)))))
test('null', lt(None))
test('true', lt(True))
test('false', lt(False))
def test_calls(self):
test = self.check_expression
test('1 + 2', bn(lt(1), '+', lt(2)))
test('1 + 2 + 3', bn(bn(lt(1), '+', lt(2)), '+', lt(3)))
test('$a()', cl(id("a")))
test('$a()()', cl(cl(id("a"))))
test('$a(1)', cl(id("a"), lt(1)))
test('$a(1, 2)', cl(id("a"), lt(1), lt(2)))
test('$a(1, 2, 3)', cl(id("a"), lt(1), lt(2), lt(3)))
test('$a(1)(2)(3)', cl(cl(cl(id("a"), lt(1)), lt(2)), lt(3)))
def test_methods(self):
test = self.check_expression
test('$a.foo(1)', mt(id("a"), lt(ix("foo")), lt(1)))
test('$a.foo(1, 2)', mt(id("a"), lt(ix("foo")), lt(1), lt(2)))
test('$a.foo(1, 2, 3)', mt(id("a"), lt(ix("foo")), lt(1), lt(2), lt(3)))
test('$a.foo(x: 1)', mt(id("a"), lt(ix("foo")), ("x", lt(1))))
test('$a.foo(x: 1, y: 2)', mt(id("a"), lt(ix("foo")), ("x", lt(1)), ("y", lt(2))))
test('$a.foo(1: 1, 0: 2)', mt(id("a"), lt(ix("foo")), (1, lt(1)), (0, lt(2))))
def test_sequence(self):
test = self.check_expression
test('{}', lt(None))
test('{1;}', lt(1))
test('{1; 2;}', sq(lt(1), lt(2)))
test('{1; 2; 3;}', sq(lt(1), lt(2), lt(3)))
test('{1; 2; 3; 4;}', sq(lt(1), lt(2), lt(3), lt(4)))
test('{1; {2;} 3; 4;}', sq(lt(1), lt(2), lt(3), lt(4)))
def test_local_definitions(self):
test = self.check_expression
test('{ def $x := 4; $x; }', df(nm("x"), lt(4), id("x")))
test('{ def $x := 4; $x; $y; }', df(nm("x"), lt(4), sq(id("x"), id("y"))))
test('{ def $x := 4; }', df(nm("x"), lt(4), lt(None)))
test('{ $x; $y; def $x := 4; }', sq(id("x"), id("y"), df(nm("x"), lt(4), lt(None))))
def test_lambda(self):
test = self.check_expression
test('fn () => $x', lm(ls(), id("x")))
test('fn ($x) => $x', lm(ls(pm(nm("x"), 0)), id("x")))
test('fn ($x, $y, $z) => $x', lm(ls(pm(nm("x"), 0), pm(nm("y"), 1), pm(nm("z"), 2)), id("x")))
test('fn ($x, $y) => $x', lm(ls(pm(nm("x"), 0), pm(nm("y"), 1)), id("x")))
test('fn ($x, $y, $z) => $x', lm(ls(pm(nm("x"), 0), pm(nm("y"), 1), pm(nm("z"), 2)), id("x")))
test('fn => $x', lm(ls(), id("x")))
test('fn $x => $x', lm(ls(pm(nm("x"), 0)), id("x")))
test('fn $x { }', lm(ls(pm(nm("x"), 0)), lt(None)))
test('fn $x { $x; }', lm(ls(pm(nm("x"), 0)), id("x")))
test('fn { }', lm(ls(), lt(None)))
test('fn { $x; }', lm(ls(), id("x")))
test('fn () { }', lm(ls(), lt(None)))
test('fn ($x) { $x; }', lm(ls(pm(nm("x"), 0)), id("x")))
test('fn (x: $x) { }', lm(ls(pm(nm("x"), "x", 0)), lt(None)))
test('fn (y: $x) { }', lm(ls(pm(nm("x"), "y", 0)), lt(None)))
test('fn (x: $x, y: $y) { }', lm(ls(pm(nm("x"), "x", 0), pm(nm("y"), "y", 1)), lt(None)))
test('fn x: $x { }', lm(ls(pm(nm("x"), "x", 0)), lt(None)))
def test_program_toplevel_definition(self):
test = self.check_program
test('def $x := 5;', ut(0, nd(nm("x"), lt(5))))
test('', ut(0))
test('def $x := 5; def $y := 6;', ut(0, nd(nm("x"), lt(5)), nd(nm("y"), lt(6))))
test('def @x := 5;', mu(
(-1, [nd(nm("x", -1), lt(5))]),
(0, [])))
def test_program_imports(self):
test = self.check_program
test('import $foo;', ut(0, im(nm("foo"))))
test('import $foo:bar;', ut(0, im(nm(["foo", "bar"]))))
test('import @foo;', ut(-1, im(nm("foo", -1))))
test('import @@foo;', ut(-2, im(nm("foo", -2))))
def test_quote(self):
test = self.check_expression
test('@(1)', qt(-1, lt(1)))
test('@(1 + 2)', qt(-1, bn(lt(1), '+', lt(2))))
test('@@(1 + 2)', qt(-2, bn(lt(1), '+', lt(2))))
test('@@@(1 + 2)', qt(-3, bn(lt(1), '+', lt(2))))
test('@(1 + @(2))', qt(-1, bn(lt(1), '+', qt(-1, lt(2)))))
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=0)
unittest.main(testRunner=runner)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural GPU for Learning Algorithms."""
import math
import os
import random
import sys
import time
import matplotlib.animation as anim
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import data_utils as data
import neural_gpu
tf.app.flags.DEFINE_float("lr", 0.001, "Learning rate.")
tf.app.flags.DEFINE_float("init_weight", 1.0, "Initial weights deviation.")
tf.app.flags.DEFINE_float("max_grad_norm", 1.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("cutoff", 1.2, "Cutoff at the gates.")
tf.app.flags.DEFINE_float("pull", 0.0005, "Starting pull of the relaxations.")
tf.app.flags.DEFINE_float("pull_incr", 1.2, "Increase pull by that much.")
tf.app.flags.DEFINE_float("curriculum_bound", 0.15, "Move curriculum < this.")
tf.app.flags.DEFINE_float("dropout", 0.15, "Dropout that much.")
tf.app.flags.DEFINE_float("grad_noise_scale", 0.0, "Gradient noise scale.")
tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size.")
tf.app.flags.DEFINE_integer("low_batch_size", 16, "Low batch size.")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200, "Steps per epoch.")
tf.app.flags.DEFINE_integer("nmaps", 128, "Number of floats in each cell.")
tf.app.flags.DEFINE_integer("niclass", 33, "Number of classes (0 is padding).")
tf.app.flags.DEFINE_integer("noclass", 33, "Number of classes (0 is padding).")
tf.app.flags.DEFINE_integer("train_data_size", 5000, "Training examples/len.")
tf.app.flags.DEFINE_integer("max_length", 41, "Maximum length.")
tf.app.flags.DEFINE_integer("rx_step", 6, "Relax that many recursive steps.")
tf.app.flags.DEFINE_integer("random_seed", 125459, "Random seed.")
tf.app.flags.DEFINE_integer("nconvs", 2, "How many convolutions / 1 step.")
tf.app.flags.DEFINE_integer("kw", 3, "Kernel width.")
tf.app.flags.DEFINE_integer("kh", 3, "Kernel height.")
tf.app.flags.DEFINE_integer("height", 4, "Height.")
tf.app.flags.DEFINE_integer("forward_max", 401, "Maximum forward length.")
tf.app.flags.DEFINE_integer("jobid", -1, "Task id when running on borg.")
tf.app.flags.DEFINE_integer("nprint", 0, "How many test examples to print out.")
tf.app.flags.DEFINE_integer("mode", 0, "Mode: 0-train other-decode.")
tf.app.flags.DEFINE_bool("animate", False, "Whether to produce an animation.")
tf.app.flags.DEFINE_bool("quantize", False, "Whether to quantize variables.")
tf.app.flags.DEFINE_string("task", "rev", "Which task are we learning?")
tf.app.flags.DEFINE_string("train_dir", "/tmp/", "Directory to store models.")
tf.app.flags.DEFINE_string("ensemble", "", "Model paths for ensemble.")
FLAGS = tf.app.flags.FLAGS
EXTRA_EVAL = 12
def initialize(sess):
"""Initialize data and model."""
if FLAGS.jobid >= 0:
data.log_filename = os.path.join(FLAGS.train_dir, "log%d" % FLAGS.jobid)
data.print_out("NN ", newline=False)
# Set random seed.
seed = FLAGS.random_seed + max(0, FLAGS.jobid)
tf.set_random_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Check data sizes.
assert data.bins
min_length = 3
max_length = min(FLAGS.max_length, data.bins[-1])
assert max_length + 1 > min_length
while len(data.bins) > 1 and data.bins[-2] > max_length + EXTRA_EVAL:
data.bins = data.bins[:-1]
assert data.bins[0] > FLAGS.rx_step
data.forward_max = max(FLAGS.forward_max, data.bins[-1])
nclass = min(FLAGS.niclass, FLAGS.noclass)
data_size = FLAGS.train_data_size if FLAGS.mode == 0 else 1000
# Initialize data for each task.
tasks = FLAGS.task.split("-")
for t in tasks:
for l in xrange(max_length + EXTRA_EVAL - 1):
data.init_data(t, l, data_size, nclass)
data.init_data(t, data.bins[-2], data_size, nclass)
data.init_data(t, data.bins[-1], data_size, nclass)
end_size = 4 * 1024 if FLAGS.mode > 0 else 1024
data.init_data(t, data.forward_max, end_size, nclass)
# Print out parameters.
curriculum = FLAGS.curriculum_bound
msg1 = ("layers %d kw %d h %d kh %d relax %d batch %d noise %.2f task %s"
% (FLAGS.nconvs, FLAGS.kw, FLAGS.height, FLAGS.kh, FLAGS.rx_step,
FLAGS.batch_size, FLAGS.grad_noise_scale, FLAGS.task))
msg2 = "data %d %s" % (FLAGS.train_data_size, msg1)
msg3 = ("cut %.2f pull %.3f lr %.2f iw %.2f cr %.2f nm %d d%.4f gn %.2f %s" %
(FLAGS.cutoff, FLAGS.pull_incr, FLAGS.lr, FLAGS.init_weight,
curriculum, FLAGS.nmaps, FLAGS.dropout, FLAGS.max_grad_norm, msg2))
data.print_out(msg3)
# Create checkpoint directory if it does not exist.
checkpoint_dir = os.path.join(FLAGS.train_dir, "neural_gpu%s"
% ("" if FLAGS.jobid < 0 else str(FLAGS.jobid)))
if not gfile.IsDirectory(checkpoint_dir):
data.print_out("Creating checkpoint directory %s." % checkpoint_dir)
gfile.MkDir(checkpoint_dir)
# Create model and initialize it.
tf.get_variable_scope().set_initializer(
tf.uniform_unit_scaling_initializer(factor=1.8 * FLAGS.init_weight))
model = neural_gpu.NeuralGPU(
FLAGS.nmaps, FLAGS.nmaps, FLAGS.niclass, FLAGS.noclass, FLAGS.dropout,
FLAGS.rx_step, FLAGS.max_grad_norm, FLAGS.cutoff, FLAGS.nconvs,
FLAGS.kw, FLAGS.kh, FLAGS.height, FLAGS.mode, FLAGS.lr,
FLAGS.pull, FLAGS.pull_incr, min_length + 3)
data.print_out("Created model.")
sess.run(tf.initialize_all_variables())
data.print_out("Initialized variables.")
# Load model from parameters if a checkpoint exists.
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
data.print_out("Reading model parameters from %s"
% ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
# Check if there are ensemble models and get their checkpoints.
ensemble = []
ensemble_dir_list = [d for d in FLAGS.ensemble.split(",") if d]
for ensemble_dir in ensemble_dir_list:
ckpt = tf.train.get_checkpoint_state(ensemble_dir)
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
data.print_out("Found ensemble model %s" % ckpt.model_checkpoint_path)
ensemble.append(ckpt.model_checkpoint_path)
# Return the model and needed variables.
return (model, min_length, max_length, checkpoint_dir, curriculum, ensemble)
def single_test(l, model, sess, task, nprint, batch_size, print_out=True,
offset=None, ensemble=None, get_steps=False):
"""Test model on test data of length l using the given session."""
inpt, target = data.get_batch(l, batch_size, False, task, offset)
_, res, _, steps = model.step(sess, inpt, target, False, get_steps=get_steps)
errors, total, seq_err = data.accuracy(inpt, res, target, batch_size, nprint)
seq_err = float(seq_err) / batch_size
if total > 0:
errors = float(errors) / total
if print_out:
data.print_out(" %s len %d errors %.2f sequence-errors %.2f"
% (task, l, 100*errors, 100*seq_err))
# Ensemble eval.
if ensemble:
results = []
for m in ensemble:
model.saver.restore(sess, m)
_, result, _, _ = model.step(sess, inpt, target, False)
m_errors, m_total, m_seq_err = data.accuracy(inpt, result, target,
batch_size, nprint)
m_seq_err = float(m_seq_err) / batch_size
if total > 0:
m_errors = float(m_errors) / m_total
data.print_out(" %s len %d m-errors %.2f m-sequence-errors %.2f"
% (task, l, 100*m_errors, 100*m_seq_err))
results.append(result)
ens = [sum(o) for o in zip(*results)]
errors, total, seq_err = data.accuracy(inpt, ens, target,
batch_size, nprint)
seq_err = float(seq_err) / batch_size
if total > 0:
errors = float(errors) / total
if print_out:
data.print_out(" %s len %d ens-errors %.2f ens-sequence-errors %.2f"
% (task, l, 100*errors, 100*seq_err))
return errors, seq_err, (steps, inpt, [np.argmax(o, axis=1) for o in res])
def multi_test(l, model, sess, task, nprint, batch_size, offset=None,
ensemble=None):
"""Run multiple tests at lower batch size to save memory."""
errors, seq_err = 0.0, 0.0
to_print = nprint
low_batch = FLAGS.low_batch_size
low_batch = min(low_batch, batch_size)
for mstep in xrange(batch_size / low_batch):
cur_offset = None if offset is None else offset + mstep * low_batch
err, sq_err, _ = single_test(l, model, sess, task, to_print, low_batch,
False, cur_offset, ensemble=ensemble)
to_print = max(0, to_print - low_batch)
errors += err
seq_err += sq_err
if FLAGS.mode > 0:
cur_errors = float(low_batch * errors) / ((mstep+1) * low_batch)
cur_seq_err = float(low_batch * seq_err) / ((mstep+1) * low_batch)
data.print_out(" %s multitest current errors %.2f sequence-errors %.2f"
% (task, 100*cur_errors, 100*cur_seq_err))
errors = float(low_batch) * float(errors) / batch_size
seq_err = float(low_batch) * float(seq_err) / batch_size
data.print_out(" %s len %d errors %.2f sequence-errors %.2f"
% (task, l, 100*errors, 100*seq_err))
return errors, seq_err
def train():
"""Train the model."""
batch_size = FLAGS.batch_size
tasks = FLAGS.task.split("-")
with tf.Session() as sess:
(model, min_length, max_length, checkpoint_dir,
curriculum, _) = initialize(sess)
quant_op = neural_gpu.quantize_weights_op(512, 8)
max_cur_length = min(min_length + 3, max_length)
prev_acc_perp = [1000000 for _ in xrange(3)]
prev_seq_err = 1.0
# Main traning loop.
while True:
global_step, pull, max_cur_length, learning_rate = sess.run(
[model.global_step, model.pull, model.cur_length, model.lr])
acc_loss, acc_total, acc_errors, acc_seq_err = 0.0, 0, 0, 0
acc_grad_norm, step_count, step_time = 0.0, 0, 0.0
for _ in xrange(FLAGS.steps_per_checkpoint):
global_step += 1
task = random.choice(tasks)
# Select the length for curriculum learning.
l = np.random.randint(max_cur_length - min_length + 1) + min_length
# Prefer longer stuff 60% of time.
if np.random.randint(100) < 60:
l1 = np.random.randint(max_cur_length - min_length+1) + min_length
l = max(l, l1)
# Mixed curriculum learning: in 25% of cases go to any larger length.
if np.random.randint(100) < 25:
l1 = np.random.randint(max_length - min_length + 1) + min_length
l = max(l, l1)
# Run a step and time it.
start_time = time.time()
inp, target = data.get_batch(l, batch_size, True, task)
noise_param = math.sqrt(math.pow(global_step, -0.55) *
prev_seq_err) * FLAGS.grad_noise_scale
loss, res, gnorm, _ = model.step(sess, inp, target, True, noise_param)
step_time += time.time() - start_time
acc_grad_norm += float(gnorm)
# Accumulate statistics only if we did not exceed curriculum length.
if l < max_cur_length + 1:
step_count += 1
acc_loss += loss
errors, total, seq_err = data.accuracy(inp, res, target,
batch_size, 0)
acc_total += total
acc_errors += errors
acc_seq_err += seq_err
# Normalize and print out accumulated statistics.
acc_loss /= step_count
step_time /= FLAGS.steps_per_checkpoint
acc_seq_err = float(acc_seq_err) / (step_count * batch_size)
prev_seq_err = max(0.0, acc_seq_err - 0.02) # No noise at error < 2%.
acc_errors = float(acc_errors) / acc_total if acc_total > 0 else 1.0
msg1 = "step %d step-time %.2f" % (global_step, step_time)
msg2 = "lr %.8f pull %.3f" % (learning_rate, pull)
msg3 = ("%s %s grad-norm %.8f"
% (msg1, msg2, acc_grad_norm / FLAGS.steps_per_checkpoint))
data.print_out("%s len %d ppx %.8f errors %.2f sequence-errors %.2f" %
(msg3, max_cur_length, data.safe_exp(acc_loss),
100*acc_errors, 100*acc_seq_err))
# If errors are below the curriculum threshold, move curriculum forward.
if curriculum > acc_seq_err:
if FLAGS.quantize:
# Quantize weights.
data.print_out(" Quantizing parameters.")
sess.run([quant_op])
# Increase current length (until the next with training data).
do_incr = True
while do_incr and max_cur_length < max_length:
sess.run(model.cur_length_incr_op)
for t in tasks:
if data.train_set[t]: do_incr = False
# Forget last perplexities if we're not yet at the end.
if max_cur_length < max_length:
prev_acc_perp.append(1000000)
# Either increase pull or, if it's large, average parameters.
if pull < 0.1:
sess.run(model.pull_incr_op)
else:
data.print_out(" Averaging parameters.")
sess.run(model.avg_op)
if acc_seq_err < (curriculum / 3.0):
sess.run(model.lr_decay_op)
# Lower learning rate if we're worse than the last 3 checkpoints.
acc_perp = data.safe_exp(acc_loss)
if acc_perp > max(prev_acc_perp[-3:]):
sess.run(model.lr_decay_op)
prev_acc_perp.append(acc_perp)
# Save checkpoint.
checkpoint_path = os.path.join(checkpoint_dir, "neural_gpu.ckpt")
model.saver.save(sess, checkpoint_path,
global_step=model.global_step)
# Run evaluation.
bound = data.bins[-1] + 1
for t in tasks:
l = min_length
while l < max_length + EXTRA_EVAL and l < bound:
_, seq_err, _ = single_test(l, model, sess, t,
FLAGS.nprint, batch_size)
l += 1
while l < bound + 1 and not data.test_set[t][l]:
l += 1
if seq_err < 0.05: # Run larger test if we're good enough.
_, seq_err = multi_test(data.forward_max, model, sess, t,
FLAGS.nprint, batch_size * 4)
if seq_err < 0.01: # Super-large test on 1-task large-forward models.
if data.forward_max > 4000 and len(tasks) == 1:
multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint,
batch_size * 16, 0)
def animate(l, test_data, anim_size):
"""Create animation for the given data (hacky matplotlib use)."""
xf = 12 # Extra frames to slow down at start and end.
fps = 2 # Frames per step.
# Make the figure.
fig = plt.figure(figsize=(16, 9), facecolor="white")
ax = fig.add_axes([0, 0, 1, 1], frameon=False, zorder=2)
ax.set_xticks([i * 24-0.5 for i in xrange(4)])
ax.set_xticklabels([])
ax.set_yticks([i - 0.5 for i in xrange(l+1)])
ax.grid(which="major", axis="both", linestyle="-", color="black")
# We need text fields.
text_fields = []
text_size = 24*32/l
for y in xrange(l):
text_fields.append(ax.text(
11.25, y + 0.15, "", color="g", ha="center", va="center",
bbox={"facecolor": "b", "alpha": 0.01, "pad": 24 * text_size},
size=text_size - (4 * 32 / l), animated=True))
im = ax.imshow(np.zeros_like(test_data[0][0][0]), vmin=-1.0,
vmax=1.0, cmap="gray", aspect="auto", origin="upper",
interpolation="none", animated=True)
im.set_zorder(1)
# Main animation step.
def animation_update(frame_no, test_data, xf, im, text_fields):
"""Update an animation frame."""
steps, inpt, out_raw = test_data
length = len(steps)
batch = frame_no / (fps * (l+4*xf))
index = int((frame_no % (fps * (l+4*xf))) / fps)
# Cut output after first padding.
out = [out_raw[i][batch] for i in xrange(len(text_fields))]
if 0 in out:
i = out.index(0)
out = out[0:i] + [0 for _ in xrange(len(out) - i)]
# Show the state after the first frames.
if index >= 2*xf:
im.set_array(steps[min(length - 1, index - 2*xf)][batch])
for i, t in enumerate(text_fields):
if index - 2*xf < length:
t.set_text("")
else:
t.set_text(data.to_symbol(out[i]))
else:
for i, t in enumerate(text_fields):
t.set_text(data.to_symbol(inpt[i][batch]) if index < xf else "")
if index < xf:
im.set_array(np.zeros_like(steps[0][0]))
else:
im.set_array(steps[0][batch])
return im,
# Create the animation and save to mp4.
animation = anim.FuncAnimation(
fig, animation_update, blit=True, frames=(l+4*xf)*anim_size*fps,
interval=500/fps, fargs=(test_data, xf, im, text_fields))
animation.save("/tmp/neural_gpu.mp4", writer="mencoder", fps=4*fps, dpi=3*80)
def evaluate():
"""Evaluate an existing model."""
batch_size = FLAGS.batch_size
tasks = FLAGS.task.split("-")
with tf.Session() as sess:
model, min_length, max_length, _, _, ensemble = initialize(sess)
bound = data.bins[-1] + 1
for t in tasks:
l = min_length
while l < max_length + EXTRA_EVAL and l < bound:
_, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint,
batch_size, ensemble=ensemble)
l += 1
while l < bound + 1 and not data.test_set[t][l]:
l += 1
# Animate.
if FLAGS.animate:
anim_size = 2
_, _, test_data = single_test(l, model, sess, t, 0, anim_size,
get_steps=True)
animate(l, test_data, anim_size)
# More tests.
_, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint,
batch_size * 4, ensemble=ensemble)
if seq_err < 0.01: # Super-test if we're very good and in large-test mode.
if data.forward_max > 4000 and len(tasks) == 1:
multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint,
batch_size * 64, 0, ensemble=ensemble)
def interactive():
"""Interactively probe an existing model."""
with tf.Session() as sess:
model, _, _, _, _, _ = initialize(sess)
sys.stdout.write("Input to Neural GPU, e.g., 0 1. Use -1 for PAD.\n")
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline()
while inpt:
ids = [data.to_id(s) for s in inpt.strip().split()]
inpt, target = data.get_batch(len(ids), 1, False, "",
preset=(ids, [0 for _ in ids]))
_, res, _, _ = model.step(sess, inpt, target, False)
res = [np.argmax(o, axis=1) for o in res]
res = [o for o in res[:len(ids)] if o > 0]
print " " + " ".join([data.to_symbol(output[0]) for output in res])
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline()
def main(_):
if FLAGS.mode == 0:
train()
elif FLAGS.mode == 1:
evaluate()
else:
interactive()
if __name__ == "__main__":
tf.app.run()
|
|
import re
import time
import urllib
import urlparse
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.urlresolvers import reverse
from django.template import defaultfilters
from django.template.loader import render_to_string
from django.utils.encoding import smart_str
from django.utils.html import strip_tags
import jinja2
from markdown import markdown as python_markdown
from django_jinja import library
from product_details import product_details
from remo.base import utils
LINE_LIMIT = 75
FOLD_SEP = u'\r\n '
COUNTRIES_NAME_TO_CODE = {}
for code, name in product_details.get_regions('en').items():
name = name.lower()
COUNTRIES_NAME_TO_CODE[name] = code
# Yanking filters from Django.
library.filter(strip_tags)
library.filter(defaultfilters.timesince)
library.filter(defaultfilters.truncatewords)
library.filter(defaultfilters.pluralize)
@library.global_function
def thisyear():
"""The current year."""
return jinja2.Markup(datetime.today().year)
@library.global_function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
return reverse(viewname, args=args, kwargs=kwargs)
@library.filter
def urlparams(url_, hash=None, **query):
"""Add a fragment and/or query paramaters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url = urlparse.urlparse(url_)
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
q = url.query
query_dict = dict(urlparse.parse_qsl(smart_str(q))) if q else {}
query_dict.update((k, v) for k, v in query.items())
query_string = _urlencode([(k, v) for k, v in query_dict.items()
if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl()
def _urlencode(items):
"""A Unicode-safe URLencoder."""
try:
return urllib.urlencode(items)
except UnicodeEncodeError:
return urllib.urlencode([(k, smart_str(v)) for k, v in items])
@library.filter
def urlencode(txt):
"""Url encode a path."""
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
return urllib.quote_plus(txt)
@library.global_function
def static(path):
return staticfiles_storage.url(path)
@library.filter
def markdown(text):
"""Return text rendered as Markdown."""
return jinja2.Markup(python_markdown(text))
@library.filter
def get_display_name(obj):
"""Return obj display_name if obj is User. Otherwise return None."""
if isinstance(obj, User):
return obj.userprofile.display_name
@library.filter
def format_datetime(obj, type=None):
"""Return datetime obj formatted."""
if type == 'full':
return obj.strftime('%d %B %Y %H:%M')
return obj.strftime('%Y-%m-%d %H:%M')
@library.filter
def format_datetime_iso(obj):
"""Return datetime obj ISO formatted."""
return obj.strftime('%Y-%m-%dT%H:%M:%S')
@library.filter
def format_datetime_unix(obj):
"""Return unix representation of obj."""
return time.mktime(obj.timetuple())
@library.filter
def format_datetime_utc(obj):
"""Return datetime object UTC formatted."""
return obj.strftime('%Y%m%dT%H%M%S')
@library.filter
def strftime(obj, style):
"""Return string of datetime object formatted with style."""
return obj.strftime(style)
@library.global_function
def get_static_map_url(width, height, lon, lat, zoom=4):
"""Return static map url."""
token = settings.MAPBOX_TOKEN
base_url = 'https://api.tiles.mapbox.com/v3/%(tok)s/'
marker_query = 'pin-m(%(lon)s,%(lat)s)/'
center_query = '%(lon)s,%(lat)s,%(zoom)s/%(width)sx%(height)s.png'
URL = base_url + marker_query + center_query
return URL % {'tok': token, 'width': width, 'height': height,
'lat': lat, 'lon': lon, 'zoom': zoom}
@library.global_function
def get_next_url(request):
"""Return next_url stored in session or Dashboard."""
if 'next_url' in request.session:
return request.session.pop('next_url')
elif request.get_full_path() == '/':
return reverse('dashboard')
return request.get_full_path()
@library.filter
def get_bugzilla_url(bug_id):
"""Return bugzilla url for bug_id."""
return u'https://bugzilla.mozilla.org/show_bug.cgi?id=%d' % bug_id
@library.global_function
def active(request, pattern):
"""Return 'active-nav' string when pattern matches request's full path."""
if re.match(pattern, request.get_full_path()):
return 'active-nav'
return None
@library.global_function
def field_with_attrs(bfield, **kwargs):
"""Allows templates to dynamically add html attributes to bound
fields from django forms.
Taken from bedrock.
"""
bfield.field.widget.attrs.update(kwargs)
return bfield
@library.global_function
def field_errors(field):
"""Return string with rendered template with field errors."""
return jinja2.Markup(render_to_string('form-error.jinja', {'field': field}))
@library.global_function
def get_full_name(user):
"""Return user's fullname bugzilla style."""
return u'%s :%s' % (user.get_full_name(), user.userprofile.display_name)
@library.global_function
def user_is_mozillian(user):
"""Check if a user belongs to Mozillians group."""
return user.groups.filter(name='Mozillians').exists()
@library.global_function
def user_is_rep(user):
"""Check if a user belongs to Rep group."""
return (user.groups.filter(name='Rep').exists() and
user.userprofile.registration_complete)
@library.global_function
def user_is_mentor(user):
"""Check if a user belongs to Mentor group."""
return user.groups.filter(name='Mentor').exists()
@library.global_function
def user_is_admin(user):
"""Check if a user belongs to Admin group."""
return user.groups.filter(name='Admin').exists()
@library.global_function
def user_is_council(user):
"""Check if a user belongs to Council group."""
return user.groups.filter(name='Council').exists()
@library.global_function
def user_is_alumni(user):
"""Check if a user belongs to Alumni group."""
return user.groups.filter(name='Alumni').exists()
@library.filter
def ical_escape_char(text):
"""Escape characters as defined in RFC5545.
Original code from https://github.com/collective/icalendar
Altered by John Giannelos <[email protected]>
"""
return (text.replace('\N', '\n')
.replace('\\', '\\\\')
.replace(';', r'\;')
.replace(',', r'\,')
.replace('\r\n', r'\n')
.replace('\n', r'\n'))
@library.filter
def ical_format_lines(text):
"""Make a string folded as defined in RFC5545.
Original code from https://github.com/collective/icalendar
Altered by John Giannelos <[email protected]>
"""
ret_line = u''
byte_count = 0
for char in text:
char_byte_len = len(char.encode('utf-8'))
byte_count += char_byte_len
if byte_count >= LINE_LIMIT:
ret_line += FOLD_SEP
byte_count = char_byte_len
ret_line += char
return ret_line
@library.global_function
def get_attr(obj, value, default):
"""Add a gettatr helper in templates."""
return getattr(obj, value, default)
@library.filter
def absolutify(url):
"""Prepend the SITE_URL to the url."""
return utils.absolutify(url)
@library.global_function
def get_date_n_weeks_before(date, weeks=0):
"""Return the date X weeks before date."""
return date - timedelta(weeks=weeks)
@library.filter
def formset_errors_exist(formset):
for form in formset.values():
if form.errors:
return True
return False
@library.filter
def get_country_code(country_name):
"""Return country code from country name."""
return COUNTRIES_NAME_TO_CODE.get(country_name.lower(), '')
@library.filter
def nl2br(string):
"""Turn newlines into <br>."""
if not string:
return ''
return jinja2.Markup('<br>'.join(jinja2.escape(string).splitlines()))
@library.filter
def ifeq(a, b, text):
"""Return ``text`` if ``a == b``."""
return jinja2.Markup(text if a == b else '')
|
|
import sqlite3
import datetime
import math
import triangulate_events
import sys
sys.path.append('../')
import events
import settings
"""
@author(s): Nathan Heidt
This parses the meteor events and detects simulataneous events
TODO:
- This is linked to work on the local database, when porting this over to
the server, link it to work with whatever local DB is there
CHANGELOG:
-
"""
databasePath = settings.databasePath
dbTable = 'events'
# Max delay (seconds) between meteor events before we section them
# This really only works with small sample sizes
maxDelay = 5.0
# What is the furthest two observers could see the same meteories (km)
maxDistance = 100.0
# How long (seconds) apart can separate cameras see the same sample
maxTimeBetweenSamples = 5.0
def main():
meteorEvents = getAllEvents()
sectionedEvents = sectionMeteorEvents(meteorEvents)
matchedEvents = matchMeteorEvents(sectionedEvents)
triangulators = []
for eventSet in matchedEvents:
triangulator = triangulate_events.Triangulator(eventSet)
triangulators.append(triangulator)
for triangulator in triangulators:
print "meteor seen at: ", triangulator.closest_intersection()
def compareEvents(evt1, evt2):
"""
This takes two event lists and performs checks to see if they are possible
matches of the same meteor
"""
firstEvt1 = evt1[0]
firstEvt2 = evt2[0]
# check time first
current_date = firstEvt1.date #time of this events first evt
most_recent_date = firstEvt2.date
if (current_date - most_recent_date).total_seconds() > maxTimeBetweenSamples:
return False
#TODO: encapsulate distance checks in the event class
# check distance between users
user1lat = firstEvt1.latitude
user1lon = firstEvt1.longitude
user2lat = firstEvt2.latitude
user2lon = firstEvt2.longitude
if distanceBetweenCoords(user1lat, user1lon, user2lat, user2lon) > maxDistance:
return False
# TODO check the skew line distance between the images
# need to extract the meteor location in image, the angle from center,
# the absolute angle from earth center, and then skew line intersection
return True
def matchMeteorEvents(sectionedEvents):
"""
This takes the sectioned events from sectionMeteorEvents and pairs
possible events together.
checks:
- if two users saw an event within some timeframe
- if those users are within some distance of each other
- if the skew line distance between their view is minimal
If the checks pass then it is considered to be the same meteor sighting
"""
#unroll the users events first [[evt1..],[evt2..],..]
unrolledEvents = []
for user in sectionedEvents:
for evt in sectionedEvents[user]:
unrolledEvents.append(evt)
#now sort by time of the first event
#TODO, maybe average the times then sort by that average instead
sortedEvents = sorted(unrolledEvents, key=lambda x: x[0].date)
#compile into sections based on checks
coincidentEvents = []
section = []
for evt in sortedEvents:
if(len(section) > 0):
if compareEvents(evt, section[0]) == False:
coincidentEvents.append(section)
section = []
section.append(evt)
else:
section.append(evt)
if len(section) > 0:
coincidentEvents.append(section)
return coincidentEvents
def sectionMeteorEvents(meteorEvents):
"""
This takes a list of meteor events and sections them into a dictionary
of lists where each key represents a user_key and each list represents
all the events for that user.
It then goes through user by user and splits up the single list into
a list of event frames. For example a single meteor event make take up
2 or more frames of images so they will be put together in a single
list.
Here is what this looks like:
{
'user_key_1' : [[evt1frame1,evt1frame2,...],[evt2frame1,...],...],
'user_key_2' : [[evt1frame1,evt1frame2,...],[evt2frame1,...],...],
...
}
Parameters
----------
meteorEvents : list of dicts
Essentially a list of the meteor events as described by the database
columns. The dictionary key is the name of each database column.
"""
# TODO: maybe this functionality can be in the Events class instead
# Create a dictionary where the key is the user_key and the value is a
# list of that users events
user_events = {}
for evt in meteorEvents:
user_events.setdefault(evt.user_key, [])
user_events[evt.user_key].append(evt)
# Sort each users events by time
for key in user_events:
user_events[key] = sorted(user_events[key], key=lambda k: k.date)
# Here we go through the meteor events, and if there is a sufficiently
# large gap in time between two events, we can rule out the possiblity
# of those events being related. There are better methods using CV,
# but this is fast and has very few false negatives
for key in user_events:
sectionedEvents = []
section = []
for evt in user_events[key]:
if(len(section) > 0):
current_date = evt.date
most_recent_date = section[-1].date
if (current_date - most_recent_date).total_seconds() > maxDelay:
sectionedEvents.append(section)
section = []
section.append(evt)
else:
section.append(evt)
if len(section) > 0:
sectionedEvents.append(section)
user_events[key] = sectionedEvents
# TODO: do the same as above, but with distance
return user_events
def distanceBetweenCoords(lat1, lon1, lat2, lon2):
"""
This uses the haversine formula to calculate the great-circle distance
between two points.
Parameters
----------
lat1 : float
The latitude of the first point
lon1 : float
The longitude of the first point
lat2 : float
The latitude of the second point
lon2 : float
The longitude of the second point
"""
earthRadius = 6371.0 # earths radius in km
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
deltaPhi = math.radians(lat2 - lat1)
deltaLambda = math.radians(lon2 - lon1)
a = math.sin(deltaPhi/2.0)**2 + \
math.cos(phi1)*math.cos(phi2)*(math.sin(deltaLambda/2.0)**2)
c = 2.0*math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = earthRadius*c
return d
def skewLineDistance(evt1, evt2):
"""
given two events compute the skew line distance between them
"""
pass
def eventFactory(cursor, row):
"""
This is a helper function to create a dictionary using the column names
from the database as the keys
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
#fill an Event type with the dict here
evt = events.Event(d)
return evt
def getAllEvents():
"""
This gets all logged events from the database. At this point, we're not
worried about too many instances being returned.
"""
events = []
print("Fetching database tables")
conn = sqlite3.connect(databasePath)
conn.row_factory = eventFactory
c = conn.cursor()
for row in c.execute("SELECT * FROM %s" % dbTable):
events.append(row)
conn.close()
print("Found %d events." % len(events))
return events
if __name__ == "__main__":
main()
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining a color-based blob detector for camera images."""
from typing import Mapping, Optional, Tuple
from absl import logging
import cv2
from dmr_vision import detector
from dmr_vision import types
import numpy as np
class BlobDetector(detector.ImageDetector):
"""Color-based blob detector."""
def __init__(self,
color_ranges: Mapping[str, types.ColorRange],
scale: float = (1. / 6.),
min_area: int = 230,
mask_points: Optional[types.MaskPoints] = None,
visualize: bool = False,
toolkit: bool = False):
"""Constructs a `BlobDetector` instance.
Args:
color_ranges: A mapping between a given blob name and the range of YUV
color used to segment it from an image.
scale: Rescaling image factor. Used for increasing the frame rate, at the
cost of reducing the precision of the blob barycenter and controur.
min_area: The minimum area the detected blob must have.
mask_points: (u, v) coordinates defining a closed regions of interest in
the image where the blob detector will not look for blobs.
visualize: Whether to output a visualization of the detected blob or not.
toolkit: Whether to display a YUV GUI toolkit for parameter tuning.
Enabling this implcitly sets `visualize = True`.
"""
self._color_ranges = color_ranges
self._scale = np.array(scale)
self._min_area = min_area
self._mask_points = mask_points if mask_points is not None else ()
self._visualize = visualize
self._mask = None
self._toolkit = toolkit
if self._toolkit:
self._visualize = True
self._window_name = "UV toolkit"
self._window_size = (800, 1000)
cv2.namedWindow(
self._window_name,
cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
cv2.resizeWindow(self._window_name, self._window_size)
self._trackbar_scale = 1000
num_colors = len(self._color_ranges.keys())
if num_colors > 1:
cv2.createTrackbar("Color selector", self._window_name, 0,
len(self._color_ranges.keys()) - 1,
self._callback_change_color)
cv2.createTrackbar("Subsampling", self._window_name, 5, 10,
lambda x: None)
cv2.setTrackbarMin("Subsampling", self._window_name, 1)
self._u_range_trackbar = CreateRangeTrackbar(self._window_name, "U min",
"U max", self._color_ranges,
"U", self._trackbar_scale)
self._v_range_trackbar = CreateRangeTrackbar(self._window_name, "V min",
"V max", self._color_ranges,
"V", self._trackbar_scale)
self._callback_change_color(0)
def __del__(self):
if self._toolkit:
cv2.destroyAllWindows()
def __call__(self,
image: np.ndarray) -> Tuple[types.Centers, types.Detections]:
"""Finds color blobs in the image.
Args:
image: the input image.
Returns:
A dictionary mapping a blob name with
- the (u, v) coordinate of its barycenter, if found;
- `None`, otherwise;
and a dictionary mapping a blob name with
- its contour superimposed on the input image;
- `None`, if `BlobDetector` is run with `visualize == False`.
"""
# Preprocess the image.
image = self._preprocess(image)
# Convert the image to YUV.
yuv_image = cv2.cvtColor(image.astype(np.float32) / 255., cv2.COLOR_RGB2YUV)
# Find blobs.
blob_centers = {}
blob_visualizations = {}
for name, color_range in self._color_ranges.items():
blob = self._find_blob(yuv_image, color_range)
blob_centers[name] = blob.center * (1. / self._scale) if blob else None
blob_visualizations[name] = (
self._draw_blobs(image, blob) if self._visualize else None)
if self._toolkit:
self._update_gui_toolkit(yuv_image, image)
return blob_centers, blob_visualizations
def _preprocess(self, image: np.ndarray) -> np.ndarray:
"""Preprocesses an image for color-based blob detection."""
# Resize the image to make all other operations faster.
size = np.round(image.shape[:2] * self._scale).astype(np.int32)
resized = cv2.resize(image, (size[1], size[0]))
if self._mask is None:
self._setup_mask(resized)
# Denoise the image.
denoised = cv2.fastNlMeansDenoisingColored(
src=resized, h=7, hColor=7, templateWindowSize=3, searchWindowSize=5)
return cv2.multiply(denoised, self._mask)
def _setup_mask(self, image: np.ndarray) -> None:
"""Initialises an image mask to explude pixels from blob detection."""
self._mask = np.ones(image.shape, image.dtype)
for mask_points in self._mask_points:
cv2.fillPoly(self._mask, np.int32([mask_points * self._scale]), 0)
def _find_blob(self, yuv_image: np.ndarray,
color_range: types.ColorRange) -> Optional[types.Blob]:
"""Find the largest blob matching the YUV color range.
Args:
yuv_image: An image in YUV color space.
color_range: The YUV color range used for segmentation.
Returns:
If found, the (u, v) coordinate of the barycenter and the contour of the
segmented blob. Otherwise returns `None`.
"""
# Threshold the image in YUV color space.
lower = color_range.lower
upper = color_range.upper
mask = cv2.inRange(yuv_image.copy(), lower, upper)
# Find contours.
_, contours, _ = cv2.findContours(
image=mask, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
if not contours:
return None
# Find the largest contour.
max_area_contour = max(contours, key=cv2.contourArea)
# If the blob's area is too small, ignore it.
correction_factor = np.square(1. / self._scale)
normalized_area = cv2.contourArea(max_area_contour) * correction_factor
if normalized_area < self._min_area:
return None
# Compute the centroid.
moments = cv2.moments(max_area_contour)
if moments["m00"] == 0:
return None
cx, cy = moments["m10"] / moments["m00"], moments["m01"] / moments["m00"]
return types.Blob(center=np.array([cx, cy]), contour=max_area_contour)
def _draw_blobs(self, image: np.ndarray, blob: types.Blob) -> np.ndarray:
"""Draws the controuer of the detected blobs."""
frame = image.copy()
if blob:
# Draw center.
cv2.drawMarker(
img=frame,
position=(int(blob.center[0]), int(blob.center[1])),
color=(255, 0, 0),
markerType=cv2.MARKER_CROSS,
markerSize=7,
thickness=1,
line_type=cv2.LINE_AA)
# Draw contours.
cv2.drawContours(
image=frame,
contours=[blob.contour],
contourIdx=0,
color=(0, 0, 255),
thickness=1)
return frame
def _callback_change_color(self, color_index: int) -> None:
"""Callback for YUV GUI toolkit trackbar.
Reads current trackbar value and selects the associated color.
The association between index and color is implementation dependent, i.e.
in the insertion order into a dictionary.
Args:
color_index: The current value of the trackbar. Passed automatically.
"""
colors = list(self._color_ranges.keys())
selected_color = colors[color_index]
min_upper = self._color_ranges[selected_color]
lower = min_upper.lower
upper = min_upper.upper
self._u_range_trackbar.set_trackbar_pos(lower[1], upper[1])
self._v_range_trackbar.set_trackbar_pos(lower[2], upper[2])
cv2.setWindowTitle(self._window_name,
self._window_name + " - Color: " + selected_color)
def _update_gui_toolkit(self, image_yuv: np.ndarray,
image_rgb: np.ndarray) -> None:
"""Updates the YUV GUI toolkit.
Creates and shows the UV representation of the current image.
Args:
image_yuv: The current image in YUV color space.
image_rgb: The current image in RGB color space.
"""
subsample = cv2.getTrackbarPos("Subsampling", self._window_name)
img_u = image_yuv[0::subsample, 0::subsample, 1]
img_v = 1.0 - image_yuv[0::subsample, 0::subsample, 2]
pixel_color = image_rgb[0::subsample, 0::subsample, :]
pixel_color = pixel_color.reshape(np.prod(img_u.shape[0:2]), -1)
img_u = img_u.ravel()
img_v = img_v.ravel()
fig_size = 300
fig = np.full(shape=(fig_size, fig_size, 3), fill_value=255, dtype=np.uint8)
cv2.arrowedLine(
img=fig,
pt1=(0, fig_size),
pt2=(fig_size, fig_size),
color=(0, 0, 0),
thickness=2,
tipLength=0.03)
cv2.arrowedLine(
img=fig,
pt1=(0, fig_size),
pt2=(0, 0),
color=(0, 0, 0),
thickness=2,
tipLength=0.03)
cv2.putText(
img=fig,
text="U",
org=(int(0.94 * fig_size), int(0.97 * fig_size)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 0),
thickness=2)
cv2.putText(
img=fig,
text="V",
org=(int(0.03 * fig_size), int(0.06 * fig_size)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 0),
thickness=2)
for i in range(img_u.size):
color = tuple(int(p) for p in pixel_color[i, ::-1])
position = (int(img_u[i] * fig_size), int(img_v[i] * fig_size))
cv2.drawMarker(
img=fig,
position=position,
color=color,
markerType=cv2.MARKER_SQUARE,
markerSize=3,
thickness=2)
u_min, u_max = self._u_range_trackbar.get_trackbar_pos()
u_min = int(u_min * fig_size)
u_max = int(u_max * fig_size)
v_min, v_max = self._v_range_trackbar.get_trackbar_pos()
v_min = int((1.0 - v_min) * fig_size)
v_max = int((1.0 - v_max) * fig_size)
cv2.line(
img=fig,
pt1=(u_min, v_max),
pt2=(u_min, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_max, v_max),
pt2=(u_max, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_min, v_min),
pt2=(u_max, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_min, v_max),
pt2=(u_max, v_max),
color=(0, 0, 0),
thickness=2)
cv2.imshow(self._window_name, fig)
cv2.waitKey(1)
class CreateRangeTrackbar:
"""Class to create and control, on an OpenCV GUI, two trackbars representing a range of values."""
def __init__(self,
window_name: str,
trackbar_name_lower: str,
trackbar_name_upper: str,
color_ranges: Mapping[str, types.ColorRange],
color_code: str,
trackbar_scale: int = 1000):
"""Initializes the class.
Args:
window_name: Name of the window that will be used as a parent of the
created trackbar.
trackbar_name_lower: The name of the trackbar implementing the lower bound
of the range.
trackbar_name_upper: The name of the trackbar implementing the upper bound
of the range.
color_ranges: A mapping between a given blob name and the range of YUV
color used to segment it from an image.
color_code: The color code to change in `color_ranges`. Shall be "U" or
"V".
trackbar_scale: The trackbar scale to recover the real value from the
current trackbar position.
"""
self._window_name = window_name
self._trackbar_name_lower = trackbar_name_lower
self._trackbar_name_upper = trackbar_name_upper
self._color_ranges = color_ranges
self._color_code = color_code
self._trackbar_scale = trackbar_scale
self._trackbar_reset = False
# pylint: disable=g-long-lambda
cv2.createTrackbar(
self._trackbar_name_lower, self._window_name, 0,
self._trackbar_scale, lambda x: self._callback_update_threshold(
"lower", "lower", self._color_code, x))
cv2.createTrackbar(
self._trackbar_name_upper, self._window_name, 0,
self._trackbar_scale, lambda x: self._callback_update_threshold(
"upper", "upper", self._color_code, x))
# pylint: enable=g-long-lambda
def set_trackbar_pos(self, lower_value: float, upper_value: float) -> None:
"""Sets the trackbars to specific values."""
if lower_value > upper_value:
logging.error(
"Wrong values for setting range trackbars. Lower value "
"must be less than upper value. Provided lower: %d. "
"Provided upper: %d.", lower_value, upper_value)
return
# To change the trackbar values avoiding the consistency check enforced by
# the callback to implement a range of values with two sliders, we set the
# variable self._trackbar_reset to `True` and then bring it back to
# `False`.
self._trackbar_reset = True
cv2.setTrackbarPos(self._trackbar_name_lower, self._window_name,
int(lower_value * self._trackbar_scale))
cv2.setTrackbarPos(self._trackbar_name_upper, self._window_name,
int(upper_value * self._trackbar_scale))
self._trackbar_reset = False
def get_trackbar_pos(self, normalized: bool = True) -> Tuple[float, float]:
"""Gets the trackbars lower and upper values."""
lower = cv2.getTrackbarPos(self._trackbar_name_lower, self._window_name)
upper = cv2.getTrackbarPos(self._trackbar_name_upper, self._window_name)
if normalized:
return lower / self._trackbar_scale, upper / self._trackbar_scale
else:
return lower, upper
def _callback_update_threshold(self, lower_or_upper: str, attribute: str,
color_code: str, value: int) -> None:
"""Callback for YUV GUI toolkit trackbar.
Reads current trackbar value and updates the associated U or V threshold.
This callback assumes that two trackbars, `trackbar_name_lower` and
`trackbar_name_upper`, form a range of values. As a consequence, when one
of the two trackbar is moved, there is a consistency check that the range
is valid (i.e. lower value less than max value and vice versa).
Typical usage example:
To pass it to an OpenCV/Qt trackbar, use this function in a lambda
as follows:
cv2.createTrackbar("Trackbar lower", ..., lambda x:
class_variable._callback_update_threshold("lower", "lower", "U", x))
Args:
lower_or_upper: The behaviour of this callback for the range. Shall be
`lower` or `upper`.
attribute: The name of the threshold in `self._color_ranges` for the
current selected color.
color_code: The color code to change. Shall be "U" or "V".
value: The current value of the trackbar.
"""
if not self._trackbar_reset:
if lower_or_upper == "lower":
limiting_value = cv2.getTrackbarPos(self._trackbar_name_upper,
self._window_name)
if value > limiting_value:
cv2.setTrackbarPos(self._trackbar_name_lower, self._window_name,
limiting_value)
return
elif lower_or_upper == "upper":
limiting_value = cv2.getTrackbarPos(self._trackbar_name_lower,
self._window_name)
if value < limiting_value:
cv2.setTrackbarPos(self._trackbar_name_upper, self._window_name,
limiting_value)
return
selected_color_index = cv2.getTrackbarPos("Color selector",
self._window_name)
colors = list(self._color_ranges.keys())
selected_color = colors[selected_color_index]
updated_value = value / self._trackbar_scale
color_threshold = getattr(self._color_ranges[selected_color], attribute)
if color_code == "U":
color_threshold[1] = updated_value
elif color_code == "V":
color_threshold[2] = updated_value
else:
logging.error(
"Wrong trackbar name. No U/V color code correspondence."
"Provided: `%s`.", color_code)
return
|
|
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError, DatabaseError, ProgrammingError
import vmcatcher.databaseDefinition as model
import os
import re
import logging
import optparse
import smimeX509validation
from vmcatcher.__version__ import version
import vmcatcher
import urllib
import urllib2
import hashlib
import datetime
import uuid
from hepixvmitrust.vmitrustlib import VMimageListDecoder as VMimageListDecoder
from hepixvmitrust.vmitrustlib import time_format_definition as time_format_definition
from vmcatcher.vmcatcher_subscribe.stringsort import split_numeric_sort
from vmcatcher.launch import EventObj
from vmcatcher.vmcatcher_subscribe.msgcheck import fileView
from vmcatcher.listutils import pairsNnot
import vmcatcher.outputfacard
import retrieveFacard
import vmcatcher.queryby
try:
import simplejson as json
except:
import json
import vmcatcher.urimunge as urimunge
from vmcatcher.urimunge import uriNormalise,uriNormaliseAnonymous
# command line error codes.
# 10 failed to download image list.
# 11 failed to validate image list.
# 12 metadata and certificate dont match.
# 13 Endorser not authorised on subscription.
# 14 trust anchor missing
# 15 Database integrity error.
# 16 New version number is same as old version number.
# 17 New version number is less than old version number.
# 31 imagelist dc:identifier invalid.
# 32 image dc:identifier invalid.
# 33 imagelist dc:date:created invalid.
# 34 image has missing parameters in the message.
# 35 image has missing parameters in the message.
# 36 image is not registeresd with subscription.
# 37 Message was not valid JSON.
# 38 Message JSON was not valid to build image list.
# 39 Creation of ImageDefinition referance failed
PY2 = False
if sys.version_info[0] < (3):
PY2 = True
text_type = str
if PY2:
text_type = unicode # noqa
class db_actions(object):
def __init__(self,session):
self.session = session
self.log = logging.getLogger("db_actions")
def endorser_get(self,metadata):
return self.session.query(model.Endorser).\
filter(model.Endorser.id==model.EndorserPrincible.id).\
filter(model.EndorserPrincible.hv_dn==metadata[u'hv:dn']).\
filter(model.EndorserPrincible.hv_ca==metadata[u'hv:ca'])
def endorser_create(self,metadata):
hv_dn_str = metadata[u'hv:dn']
gotquery = self.session.query(model.Endorser).\
filter(model.Endorser.id==model.EndorserPrincible.id).\
filter(model.EndorserPrincible.hv_dn==metadata[u'hv:dn'])
if gotquery.count() != 0:
return gotquery
newlist = model.Endorser(metadata)
self.session.add(newlist)
self.session.commit()
new_endorser = model.EndorserPrincible(newlist.id,metadata)
self.session.add(new_endorser)
self.session.commit()
return self.endorser_get(metadata)
def subscription_get(self,metadata):
subscriptionlist = self.session.query(model.Subscription).\
filter(model.Subscription.uri==metadata[u'hv:uri'])
return subscriptionlist
def subscription_create(self,metadata,authorised):
#self.log.debug( "subscription_create called with=%s" % json.dumps(metadata.keys(),sort_keys=True, indent=4))
subscription_query = self.subscription_get(metadata)
if subscription_query.count() > 0:
return subscription_query
endorser_list = self.endorser_get(metadata)
if endorser_list.count() == 0:
return subscription_query
endorser = endorser_list.one()
endorserId = int(endorser.id)
new_subscription = model.Subscription(metadata)
# We will make the new subscription enabled by default
new_subscription.authorised = True
self.session.add(new_subscription)
self.session.commit()
new_auth = model.SubscriptionAuth(new_subscription.id,endorser.id,authorised)
self.session.add(new_auth)
try:
self.session.commit()
except IntegrityError as expt:
self.log.error("Database integrity error '%s' while subscribing to '%s'." % (expt.args, metadata))
self.log.debug(expt.params)
self.session.rollback()
return self.subscription_get(metadata)
def ImageDefinition_get(self,subscriptionKey,metadata):
subscriptionlist = self.session.query(model.ImageDefinition).\
filter(model.ImageDefinition.identifier == metadata[u'dc:identifier'])
return subscriptionlist
def ImageDefinition_create(self,subscriptionKey,metadata):
ImageDefinitionQuery = self.ImageDefinition_get(subscriptionKey,metadata)
if ImageDefinitionQuery.count() > 0:
return ImageDefinitionQuery
newlist = model.ImageDefinition(subscriptionKey,metadata)
self.session.add(newlist)
self.session.commit()
ImageDefinitionQuery = self.ImageDefinition_get(subscriptionKey,metadata)
return ImageDefinitionQuery
def ImageDefinition_list(self,subscriptionKey):
imagedefList = self.session.query(model.ImageDefinition).\
filter(model.ImageDefinition.subscription==subscriptionKey)
return imagedefList
class db_controler(object):
def __init__(self,dboptions,dblog = False):
self.log = logging.getLogger("db_controler")
self.engine = create_engine(dboptions, echo=dblog)
model.init(self.engine)
self.SessionFactory = sessionmaker(bind=self.engine)
self.anchor = None
self.factory_selector = None
#self.factory_view = None
# Set all callbacks to empty
self.callbackEventImageNew = None
self.selectors_available = ['sub_uuid', 'sub_uri']
self.selector_curent = None
self._outputter = vmcatcher.outputfacard.outputFacade()
def _retiver_uri(self,metadata):
retriever = retrieveFacard.retrieveFacard()
if "uri" in metadata.keys():
if metadata["uri"] != None:
if len(metadata["uri"]) > 0:
uri = metadata["uri"]
retriever.uri = uri
if "userName" in metadata.keys():
if metadata["userName"] != None:
if len(metadata["userName"]) > 0:
userName = metadata["userName"]
retriever.username = userName
if "password" in metadata.keys():
if metadata["password"] != None:
if len(metadata["password"]) > 0:
password = metadata["password"]
retriever.password = password
if "anchor" in metadata.keys():
if metadata["anchor"] != None:
if len(metadata["anchor"]) > 0:
anchor = metadata["anchor"]
retriever.trustanchor = anchor
if "filename" in metadata.keys():
if metadata["filename"] != None:
if len(metadata["filename"]) > 0:
filename = metadata["filename"]
retriever.uri = filename
if "protocol" in metadata.keys():
if metadata["protocol"] != None:
if len(metadata["protocol"]) > 0:
protocol = metadata["protocol"]
retriever.protocol = protocol
self.log.debug("protocol=%s" % (protocol))
if "server" in metadata.keys():
if metadata["server"] != None:
if len(metadata["server"]) > 0:
server = metadata["server"]
retriever.server = server
self.log.debug("server=%s" % (server))
resultDict = retriever.requestAsString()
if resultDict == None:
return {'code' : 800}
resultDict['uri'] = uriNormaliseAnonymous(retriever.uri)
return resultDict
def set_selector(self,selector_string):
self.selector_curent = None
if not selector_string in self.selectors_available:
self.log.warning("Invalid selector string set:%s" % (selector_string))
return False
if selector_string == 'sub_uuid':
self.selector_curent = vmcatcher.queryby.query_subscriptions_by_identifier
elif selector_string == 'sub_uri':
self.selector_curent = vmcatcher.queryby.query_subscriptions_by_uri
return True
def setup_trust_anchor(self,directory):
self.anchor = smimeX509validation.LoadDirChainOfTrust(directory)
def setup_view_format(self,format):
self._outputter.format = format
def sessions_list(self):
Session = self.SessionFactory()
self._outputter.fpOutput = sys.stdout
self._outputter.saSession = Session
self._outputter.x509anchor = self.anchor
self._outputter.list_vmcatcher_subscribe()
#view = self.factory_view(sys.stdout,Session,self.anchor)
#view.subscriptions_lister()
return True
def subscriptions_delete(self,subscriptions_selected):
foundOne = False
Session = self.SessionFactory()
db = db_actions(Session)
for selection_item in subscriptions_selected:
query_subscription = self.selector_curent(Session, selection_item)
for a_sub in query_subscription:
# should not need thsi code but do maybe a bug in slqalchamy or more likely my db definition"
query_image_def_linked = db.ImageDefinition_list(a_sub.id)
for image_def_linked in query_image_def_linked:
Session.delete(image_def_linked)
Session.delete(a_sub)
foundOne = True
Session.commit()
return foundOne
def subscriptions_subscribe(self,inmetadata):
#self.log.debug("subscriptions_subscribe called with %s" % (inmetadata))
metadata = {}
if 'autoEndorse' in inmetadata:
metadata["autoEndorse"] = inmetadata['autoEndorse']
urls_selected = inmetadata['subscription_url_list']
if "userName" in inmetadata:
userName = inmetadata['userName']
metadata["userName"] = userName
password = None
if "password" in inmetadata:
metadata["password"] = inmetadata['password']
trustAnchor = self.anchor
if "trustAnchor" in inmetadata:
trustAnchor = inmetadata['trustAnchor']
metadata["trustAnchor"] = trustAnchor
rc = True
Session = self.SessionFactory()
db = db_actions(Session)
for uri in urls_selected:
mungedUri = urimunge.setUri(uri)
#self.log.error("mungedUri=%s" % (json.dumps(mungedUri,sort_keys=True, indent=4)))
newmetatdata = dict(mungedUri)
newmetatdata["filename"] = uri
newmetatdata["trustAnchor"] = self.anchor
newmetatdata.update(metadata)
if not self.subscribe_file(Session,newmetatdata):
self.log.error("subscriptions subscribe failed for %s" % (uri))
rc = False
continue
if "imagelist_newimage" in inmetadata:
slectorUri = urimunge.getUriAnonymous(newmetatdata)
self.set_selector("sub_uri")
if not self.subscriptions_imagelist_newimage_set([slectorUri],inmetadata["imagelist_newimage"]):
self.log.error("setting subscriptions update policy failed for %s" % (uri))
rc = False
continue
if inmetadata["imagelist_newimage"] == 3:
# Now we have new images so make all images subscribed.
allImages = Session.query(model.ImageDefinition).\
filter(model.ImageDefinition.subscription ==model.Subscription.id).\
filter(model.Subscription.uri == slectorUri)
for image in allImages:
image.cache = 1
Session.add(image)
Session.commit()
return rc
def subscriptions_info(self,subscriptions_selected,outputfiles):
pairs, extra_selectors ,extra_paths = pairsNnot(subscriptions_selected,outputfiles)
for item in extra_selectors:
pairs.append([item,None])
errorhappened = False
Session = self.SessionFactory()
for pair in pairs:
selector_filter = pair[0]
output_file_name = pair[1]
output_fileptr = sys.stdout
if output_file_name != None:
output_fileptr = open(output_file_name,'w+')
output_fileptr.flush()
query_subscription = self.selector_curent(Session,selector_filter)
if query_subscription.count() == 0:
self.log.warning("Selections '%s' does not match any known subscriptions." % (selector_filter))
continue
firstSubscription = query_subscription.first()
self._outputter.fpOutput = output_fileptr
self._outputter.saSession = Session
self._outputter.x509anchor = self.anchor
for item in query_subscription:
self._outputter.display_subscription(item)
if output_file_name != None:
output_fileptr.close()
def setEventObj(self,obj):
self.eventObj = obj
def subscribe_file(self,Session,inmetadata):
metadata_retriver = {}
metadata = {}
autoEndorse = False
if 'autoEndorse' in inmetadata:
if inmetadata["autoEndorse"] == True:
autoEndorse = inmetadata["autoEndorse"]
if 'filename' in inmetadata:
metadata["uri"] = inmetadata["filename"]
if 'trustAnchor' in inmetadata:
metadata["trustAnchor"] = inmetadata["trustAnchor"]
else:
metadata[u'il.transfer.protocol:trustAnchor'] = self.anchor
if 'userName' in inmetadata:
metadata["userName"] = inmetadata["userName"]
metadata[u'il.transfer.protocol:userName'] = inmetadata["userName"]
elif 'username' in inmetadata:
metadata["userName"] = inmetadata["username"]
metadata[u'il.transfer.protocol:userName'] = inmetadata["username"]
if 'password' in inmetadata:
metadata["password"] = inmetadata["password"]
metadata[u'il.transfer.protocol:password'] = inmetadata["password"]
#print inmetadata.keys()
if 'protocol' in inmetadata:
metadata["protocol"] = inmetadata["protocol"]
metadata[u'il.transfer.protocol'] = inmetadata["protocol"]
resultDict = self._retiver_uri(inmetadata)
rc = resultDict['code']
if rc != 0:
if 'error' in resultDict:
self.log.error("%s, while retrieving %s" % (['error'],metadata["uri"]))
self.log.debug(resultDict)
else:
self.log.error("Download of uri '%s' failed." % (metadata["uri"]))
if rc > 255:
return rc
else:
return 10
smimeProcessor = smimeX509validation.smimeX509validation(metadata["trustAnchor"])
try:
smimeProcessor.Process(resultDict['responce'])
except smimeX509validation.truststore.TrustStoreError as expt:
self.log.error("Validate text '%s' produced error '%s'" % (metadata["uri"], expt))
self.log.debug("Downloaded=%s" % (resultDict['responce']))
return False
except smimeX509validation.smimeX509ValidationError as expt:
self.log.error("Validate text '%s' produced error '%s'" % (metadata["uri"], expt))
self.log.debug("Downloaded=%s" % (resultDict['responce']))
return False
if not smimeProcessor.verified:
self.log.error("Failed to verify text '%s'" % (resultDict['uri']))
return False
jsontext = json.loads(smimeProcessor.InputDaraStringIO.getvalue())
if jsontext == None:
self.log.error("Message down loaded from '%s' was not valid JSON." % (resultDict['uri']))
self.log.debug("Downloaded=" % (jsontext))
return False
vmilist = VMimageListDecoder(jsontext)
if vmilist == None:
self.log.error("Failed to decode the json as an image list Object for '%s'." % (resultDict['uri']))
return False
if 'userName' in inmetadata:
metadata["userName"] = inmetadata["userName"]
metadata[u'il.transfer.protocol:userName'] = inmetadata["userName"]
if 'password' in inmetadata:
metadata["password"] = inmetadata["password"]
metadata[u'il.transfer.protocol:password'] = inmetadata["password"]
metadata.update(vmilist.metadata)
metadata.update(vmilist.endorser.metadata)
if u'dc:identifier' not in metadata.keys():
self.log.error('list dc:identifier does not found')
return False
if metadata[u'hv:dn'] != smimeProcessor.InputCertMetaDataList[0]['subject']:
self.log.error('Endorser DN does not match signature')
return False
if metadata[u'hv:ca'] != smimeProcessor.InputCertMetaDataList[0]['issuer']:
self.log.error('list hv:ca does not match signature')
return False
#if uriNormaliseAnonymous(metadata[u'hv:uri']) != uriNormaliseAnonymous(resultDict["uri"]):
# self.log.warning('list hv:uri does not match subscription uri')
# self.log.info('hv:uri=%s' % (metadata[u'hv:uri']))
# self.log.info('subscription uri=%s' % (resultDict['uri']))
db = db_actions(Session)
endorser_list = db.endorser_get(metadata)
if endorser_list.count() == 0:
if not autoEndorse:
self.log.error("Endorser '%s':'%s' was not found in database." % (metadata[u'hv:dn'],metadata[u'hv:ca']))
self.log.info("Use '--auto-endorse' to add endorser '%s':'%s' to subscription database." % (metadata[u'hv:dn'],metadata[u'hv:ca']))
return False
else:
# We can create an endorser.
newmetadata = dict(metadata)
newmetadata[u'dc:identifier'] = text_type(uuid.uuid4())
endorser_list = db.endorser_create(newmetadata)
self.log.warning("Endorser '%s':'%s' added to database." % (metadata[u'hv:dn'],metadata[u'hv:ca']))
if endorser_list.count() == 0:
self.log.error('Failed to create an authorised endorser in Database.')
return False
subscription_query = db.subscription_create(metadata,True)
if subscription_query.count() != 1:
self.log.error('Creation of Subscription reference failed.')
return False
subscription = subscription_query.one()
subscriptionKey = int(subscription.id)
failedToCreateImages = []
for imageReferance in vmilist.images:
# Now we create image definitions
metadata = {}
metadata.update(imageReferance.metadata)
metadata['cache'] = 0
ImageDefinition_query = db.ImageDefinition_create(subscriptionKey,metadata)
if ImageDefinition_query.count() != 1:
self.log.error('Creation of ImageDefinition referance failed.')
failedToCreateImages.append(imageReferance)
continue
if len(failedToCreateImages) > 0:
return False
return True
def subscript_update_image(self,Session,subscription,imagelistref,imageObj):
subscriptionKey = subscription.id
ProcessingSubscriptionUuid = subscription.identifier
if not u'dc:identifier' in imageObj.metadata.keys():
self.log.error('Image had no ID so ignored')
# Error code - imagelist dc:identifier invalid.
return 31
db = db_actions(Session)
imageDefQuery = db.ImageDefinition_get(subscriptionKey,imageObj.metadata)
if imageDefQuery.count() != 1:
if self.callbackEventImageNew != None:
# Triggor an event for new image.
self.callbackEventImageNew(imageObj.metadata)
# Now we see the updatemode
if (subscription.updateMode & 1 != 1):
# We should not create the image referance
self.log.info("ImageId '%s' refused by subscription '%s'" %
(imageObj.metadata[u'dc:identifier'],ProcessingSubscriptionUuid))
# Error code - image dc:identifier invalid.
return 32
else:
# We should not create the image referance
metadata = {}
metadata.update(imageObj.metadata)
metadata['cache'] = 0
if (subscription.updateMode & 2 == 2):
metadata['cache'] = 1
ImageDefinition_query = db.ImageDefinition_create(subscriptionKey,metadata)
if ImageDefinition_query.count() != 1:
self.log.error('Finding ImageDefinition failed.')
return 39
imageDefQuery = db.ImageDefinition_get(subscriptionKey,imageObj.metadata)
ThisImageDef = imageDefQuery.one()
ThisImageDefId = int(ThisImageDef.id)
#print ("ThisImageDefId=%s" % (ThisImageDefId))
try:
imageinstance = model.ImageInstance(imagelistref,ThisImageDefId,imageObj.metadata)
except KeyError as expt:
self.log.error("missing parameters '%s'" % expt.message)
Session.rollback()
return 34
Session.add(imageinstance)
try:
Session.commit()
except IntegrityError as expt:
self.log.error("Database integrity error '%s' processing '%s'." % (expt.args, ProcessingSubscriptionUuid))
self.log.debug(expt.params)
Session.rollback()
return 0
# So now we have done the updating of the database and just need to update
# the latest image instance record in the database.
latestimageInstanceQuery = Session.query(model.ImageInstance).\
filter(model.ImageInstance.fkimagelistinstance == imagelistref).\
filter(model.ImageInstance.fkIdentifier == ThisImageDefId)
if latestimageInstanceQuery.count() != 1:
return 0
imageInstancelatest = latestimageInstanceQuery.one()
imageDefQuery = db.ImageDefinition_get(subscriptionKey,imageObj.metadata)
if imageDefQuery.count() != 1:
self.log.error("ImageId '%s' not accepted for subscription '%s'" %
(imageObj.metadata[u'dc:identifier'],ProcessingSubscriptionUuid))
return 36
ThisImageDef = imageDefQuery.one()
ThisImageDef.latest = imageInstancelatest.id
Session.add(ThisImageDef)
Session.commit()
return 0
def subscription_update(self,Session,subscription):
subscriptionKey = int(subscription.id)
ProcessingSubscriptionUuid = str(subscription.identifier)
self.log.info("Updating:%s" % (ProcessingSubscriptionUuid))
retriever = retrieveFacard.retrieveFacard()
retriever.uri = subscription.uri
resultDict = self._retiver_uri({"uri" : subscription.uri,
"trustAnchor" : subscription.trustAnchor,
"userName" : subscription.userName,
"password" : subscription.password,
})
rc = resultDict['code']
if rc != 0:
if 'error' in resultDict:
self.log.error("%s, while retrieving %s" % (resultDict['error'],retriever.uri))
else:
self.log.error("Download of uri '%s' failed." % (subscriptionKey))
if rc > 255:
return rc
else:
return 10
update_unprocessed = resultDict['responce']
#update_unprocessed = str(f.read())
# Now we have the update lets first check its hash
messagehash = hashlib.sha512(update_unprocessed).hexdigest()
now = datetime.datetime.utcnow()
metadataFV = {
u'hv:uri' : str(subscription.uri),
u'dc:identifier' : str(subscription.identifier),
}
#self.log.error("errr:%s" % (ProcessingSubscriptionUuid))
checker = fileView(self.anchor,update_unprocessed,metadataFV)
if checker.errorNo != 0:
self.log.error("Failed to verify subscription '%s' with URI '%s'" % (subscription.identifier,subscription.uri))
self.log.debug(update_unprocessed)
return checker.errorNo
if checker.Json == None:
return 14
metadata = checker.vmilist.metadata
metadata[u'data'] = update_unprocessed
metadata[u'data-hash'] = messagehash
if checker.errorNo != 0:
self.log.info('Message Expired:%s' % (ProcessingSubscriptionUuid))
metadata[u'expired'] = now
Session.commit()
# Now we know the data better check the SubscriptionAuth
subq = Session.query(model.Subscription, model.SubscriptionAuth).\
filter(model.Endorser.id == model.EndorserPrincible.id).\
filter(model.EndorserPrincible.hv_dn == checker.subject).\
filter(model.EndorserPrincible.hv_ca == checker.issuer).\
filter(model.SubscriptionAuth.endorser == model.Endorser.id).\
filter(model.SubscriptionAuth.subscription == model.Subscription.id).\
filter(model.Subscription.id == subscription.id)
count = subq.count()
if count == 0:
self.log.error("Endorser subject='%s' issuer='%s' not authorised on subscription '%s'" % (checker.subject,checker.issuer,ProcessingSubscriptionUuid))
# Error code - Endorser not authorised on subscription.
return 13
if count != 1:
self.log.error('Database Error processing subq:%s' % (ProcessingSubscriptionUuid))
assert (False)
subscription, auth = subq.one()
# Sets
VersionCompare = 0
qeryJunction = Session.query(model.ImageListInstance).\
filter(model.Subscription.imagelist_latest == model.ImageListInstance.id).\
filter(model.Subscription.id == subscription.id)
if qeryJunction.count() == 0:
#"we have no older version"
self.log.info("First version of:%s" % (ProcessingSubscriptionUuid))
else:
if qeryJunction.count() != 1:
self.log.error('Database Error processing qeryJunction:%s' % (ProcessingSubscriptionUuid))
assert (False)
imageList = qeryJunction.one()
if imageList.data_hash == messagehash:
self.log.debug('Same version:%s' % (ProcessingSubscriptionUuid))
if now > imageList.expires:
self.log.info("Image list '%s' has expired on: '%s'" % (ProcessingSubscriptionUuid,imageList.expires))
if imageList.expired == None:
imageList.expired = now
Session.commit()
# We now know imageList is not too old.
if ((imageList.expired != None) and (checker.errorNo == 0)):
# we have expired previously but now it looks good.
self.log.info('imageList Validated:%s' % (ProcessingSubscriptionUuid))
imageList.expired = None
Session.commit()
if ((imageList.expired == None) and (checker.errorNo != 0)):
# should expire.
self.log.info('imageList Expired:%s' % (ProcessingSubscriptionUuid))
imageList.expired = now
Session.commit()
return 0
messageVersion = checker.Json[u'hv:imagelist'][u'hv:version']
self.log.debug('Downloaded version:%s' % (messageVersion))
VersionCompare = split_numeric_sort(imageList.version,messageVersion)
if VersionCompare == 0:
self.log.warning('Downloaded version "%s" version "%s" has the same version number than the old version "%s".' % (ProcessingSubscriptionUuid,messageVersion, imageList.version))
#return 16 # 16 New version number is same as old version number.
if VersionCompare < 0:
self.log.error('Downloaded version "%s" version "%s" has lower version number than the old version "%s".' % (ProcessingSubscriptionUuid,messageVersion, imageList.version))
return 17 # 17 New version number is less than old version number.
metadata[u'hv:uri'] = uriNormaliseAnonymous(metadata[u'hv:uri'])
imagelist = model.ImageListInstance(auth.id,metadata)
Session.add(imagelist)
try:
Session.commit()
except IntegrityError as expt:
self.log.error("Database integrity error '%s' processing '%s'." % (expt.args,ProcessingSubscriptionUuid))
self.log.debug(expt.params)
Session.rollback()
# Error code - Database integrity error.
return 15
imagelistref = int(imagelist.id)
# Now make a global return number
globalRc = 0
for imageObj in checker.vmilist.images:
# Now update each Image
thisRc = self.subscript_update_image(Session,subscription,imagelistref,imageObj)
if thisRc != 0:
globalRc = thisRc
if subscription.imagelist_latest != None:
oldimagelist_q = Session.query(model.ImageListInstance).\
filter(model.ImageListInstance.id == subscription.imagelist_latest)
for imagelist in oldimagelist_q:
imagelist.authorised = False
Session.add(imagelist)
subscription.updated = datetime.datetime.utcnow()
subscription.uri = metadata[u'hv:uri']
subscription.imagelist_latest = imagelistref
Session.add(subscription)
Session.commit()
return globalRc
def subscriptions_update(self):
if self.anchor == None:
self.log.warning("No enabled certificates, check your x509 dir.")
return 12
Session = self.SessionFactory()
db = db_actions(Session)
rc = 0
subscriptionlist = Session.query(model.Subscription).all()
for subscription in subscriptionlist:
thisRc = self.subscription_update(Session,subscription)
if thisRc != 0:
rc = thisRc
return rc
def subscriptions_image_list(self,subscriptions_selected,outputfiles):
pairs, extra_selectors ,extra_paths = pairsNnot(subscriptions_selected,outputfiles)
for item in extra_selectors:
pairs.append([item,None])
errorhappened = False
Session = self.SessionFactory()
for pair in pairs:
selector_filter = pair[0]
output_file_name = pair[1]
output_fileptr = sys.stdout
if output_file_name != None:
output_fileptr = open(output_file_name,'w+')
output_fileptr.flush()
query_subscription = self.selector_curent(Session,selector_filter)
if query_subscription.count() == 0:
self.log.warning("Selections '%s' does not match any known subscriptions." % (selector_filter))
continue
self._outputter.fpOutput = output_fileptr
self._outputter.saSession = Session
self._outputter.x509anchor = self.anchor
for item in query_subscription:
self._outputter.display_subscription(item)
query_image_def = Session.query(model.ImageDefinition).\
filter(model.ImageDefinition.subscription==item.id)
for imagedef in query_image_def:
self._outputter.display_image_def(imagedef)
if output_file_name != None:
output_fileptr.close()
def subscriptions_image_accept(self,subscriptions,images):
pairs, extra_subscriptions ,extra_images = pairsNnot(subscriptions,images)
if len(extra_subscriptions) > 0:
self.log.error('Not enough images selected')
return False
if len(extra_images) > 0:
self.log.error('Not enough subscriptions selected')
return False
Session = self.SessionFactory()
errors = 0
for sub_uuid, image_uuid in pairs:
image_known = Session.query(model.ImageDefinition.identifier,model.Subscription.identifier).\
filter(model.ImageDefinition.identifier == image_uuid).\
filter(model.ImageDefinition.subscription == model.Subscription.id)
if image_known.count() > 0:
iident, sident = image_known.one()
self.log.error("Subscription '%s' has image '%s' already." % (sident,iident))
errors = errors + 1
continue
sub_known = Session.query(model.Subscription).\
filter(model.Subscription.identifier == sub_uuid)
if sub_known.count() == 0:
self.log.error("Subscription '%s' is unknown." % (sub_uuid))
errors = errors + 1
return False
subscription = sub_known.one()
subscription.imagelist_latest = None
key = subscription.id
metadata = { 'dc:identifier' : image_uuid,
'cache' : 0}
newlist = model.ImageDefinition(key,metadata)
Session.add(newlist)
Session.add(subscription)
Session.commit()
#self.log.info("Subscription '%s' include image '%s'." % (sub_uuid,image_uuid))
if errors != 0:
return False
return True
def subscriptions_image_refuse(self,subscriptions,images):
pairs, extra_subscriptions ,extra_images = pairsNnot(subscriptions,images)
if len(extra_subscriptions) > 0:
self.log.error('Not enough images selected')
return False
if len(extra_images) > 0:
self.log.error('Not enough subscriptions selected')
return False
Session = self.SessionFactory()
errors = 0
for sub_uuid, image_uuid in pairs:
image_known = Session.query(model.Subscription,model.ImageDefinition).\
filter(model.ImageDefinition.identifier == image_uuid).\
filter(model.ImageDefinition.subscription == model.Subscription.id).\
filter(model.Subscription.identifier == sub_uuid)
if image_known.count() == 0:
self.log.error("Subscription '%s' already refuses image '%s'." % (sub_uuid,image_uuid))
errors = errors + 1
continue
subscription ,imageDef = image_known.one()
imageInstance_known = Session.query(model.ImageInstance).\
filter(model.ImageInstance.fkIdentifier == imageDef.id)
for instance in imageInstance_known:
Session.delete(instance)
subscription.imagelist_latest = None
Session.delete(imageDef)
Session.add(subscription)
Session.commit()
if errors != 0:
return False
return True
def subscriptions_trustanchor_set(self,subscriptions, trustAnchor):
errorhappened = False
Session = self.SessionFactory()
for subscription_filter in subscriptions:
query_subscription = self.selector_curent(Session,subscription_filter)
if query_subscription.count() == 0:
self.log.warning("Selection '%s' does not match any known subscriptions." % (subscription_filter))
errorhappened = True
continue
firstSubscription = query_subscription.first()
firstSubscription.trustAnchor = trustAnchor
Session.add(firstSubscription)
Session.commit()
if errorhappened:
return False
return True
def subscriptions_username_set(self,subscriptions, username):
errorhappened = False
Session = self.SessionFactory()
for subscription_filter in subscriptions:
query_subscription = self.selector_curent(Session,subscription_filter)
if query_subscription.count() == 0:
self.log.warning("Selections '%s' does not match any known subscriptions." % (subscription_filter))
errorhappened = True
continue
firstSubscription = query_subscription.first()
firstSubscription.userName = username
Session.add(firstSubscription)
Session.commit()
if errorhappened:
return False
return True
def subscriptions_password_set(self,subscriptions, password):
errorhappened = False
Session = self.SessionFactory()
for subscription_filter in subscriptions:
query_subscription = self.selector_curent(Session,subscription_filter)
if query_subscription.count() == 0:
self.log.warning("Selections '%s' does not match any known subscriptions." % (subscription_filter))
errorhappened = True
continue
firstSubscription = query_subscription.first()
firstSubscription.password = password
Session.add(firstSubscription)
Session.commit()
if errorhappened:
return False
return True
def subscriptions_imagelist_newimage_set(self, subscriptions, imagelist_newimage):
errorhappened = False
Session = self.SessionFactory()
for subscription_filter in subscriptions:
query_subscription = self.selector_curent(Session,subscription_filter)
if query_subscription.count() == 0:
self.log.warning("Selections '%s' does not match any known subscriptions." % (subscription_filter))
errorhappened = True
continue
firstSubscription = query_subscription.first()
firstSubscription.updateMode = imagelist_newimage
# clear cache of image list if one exists
if firstSubscription.imagelist_latest != None:
imagelist_inst_qry = Session.query(model.ImageListInstance).\
filter(model.ImageListInstance.id == firstSubscription.imagelist_latest).\
filter(model.ImageListInstance.id == model.Subscription.imagelist_latest)
il_instance = imagelist_inst_qry.one()
if il_instance == None:
continue
il_instance.data_hash = "reload cache"
Session.add(il_instance)
Session.add(firstSubscription)
Session.commit()
if errorhappened:
return False
return True
def subscriptions_update_selected(self, subscriptions):
errorhappened = False
Session = self.SessionFactory()
for subscription_filter in subscriptions:
query_subscription = self.selector_curent(Session,subscription_filter)
if query_subscription.count() == 0:
self.log.warning("Selections '%s' does not match any known subscriptions." % (subscription_filter))
errorhappened = True
continue
firstSubscription = query_subscription.first()
thisRc = self.subscription_update(Session,firstSubscription)
if thisRc != 0:
errorhappened = True
if errorhappened:
return False
return True
|
|
# -*- coding: utf-8 -*-}
import math
import logging
from tahiti.app_auth import requires_auth, requires_permission
from flask import request, current_app, g as flask_globals
from flask_restful import Resource
from sqlalchemy import or_
from http import HTTPStatus
from marshmallow.exceptions import ValidationError
from tahiti.schema import *
from tahiti.util import translate_validation
from flask_babel import gettext
log = logging.getLogger(__name__)
# region Protected
# endregion
class OperationSubsetListApi(Resource):
""" REST API for listing class OperationSubset """
def __init__(self):
self.human_name = gettext('OperationSubset')
@requires_auth
def get(self):
if request.args.get('fields'):
only = [f.strip() for f in request.args.get('fields').split(',')]
else:
only = ('id', ) if request.args.get(
'simple', 'false') == 'true' else None
operation_subsets = OperationSubset.query.all()
page = request.args.get('page') or '1'
if page is not None and page.isdigit():
page_size = int(request.args.get('size', 20))
page = int(page)
pagination = operation_subsets.paginate(page, page_size, True)
result = {
'data': OperationSubsetListResponseSchema(
many=True, only=only).dump(pagination.items),
'pagination': {
'page': page, 'size': page_size,
'total': pagination.total,
'pages': int(math.ceil(1.0 * pagination.total / page_size))}
}
else:
result = {
'data': OperationSubsetListResponseSchema(
many=True, only=only).dump(
operation_subsets)}
if log.isEnabledFor(logging.DEBUG):
log.debug(gettext('Listing %(name)s', name=self.human_name))
return result
@requires_auth
@requires_permission('ADMINISTRATOR',)
def post(self):
result = {'status': 'ERROR',
'message': gettext("Missing json in the request body")}
return_code = HTTPStatus.BAD_REQUEST
if request.json is not None:
request_schema = OperationSubsetCreateRequestSchema()
response_schema = OperationSubsetItemResponseSchema()
operation_subset = request_schema.load(request.json)
try:
if log.isEnabledFor(logging.DEBUG):
log.debug(gettext('Adding %s'), self.human_name)
operation_subset = operation_subset
db.session.add(operation_subset)
db.session.commit()
result = response_schema.dump(operation_subset)
return_code = HTTPStatus.CREATED
except ValidationError as e:
result= {
'status': 'ERROR',
'message': gettext('Invalid data for %(name)s.)',
name=self.human_name),
'errors': translate_validation(e.messages)
}
except Exception as e:
result = {'status': 'ERROR',
'message': gettext("Internal error")}
return_code = 500
if current_app.debug:
result['debug_detail'] = str(e)
log.exception(e)
db.session.rollback()
return result, return_code
class OperationSubsetDetailApi(Resource):
""" REST API for a single instance of class OperationSubset """
def __init__(self):
self.human_name = gettext('OperationSubset')
@requires_auth
def get(self, operation_subset_id):
if log.isEnabledFor(logging.DEBUG):
log.debug(gettext('Retrieving %s (id=%s)'), self.human_name,
operation_subset_id)
operation_subset = OperationSubset.query.get(operation_subset_id)
return_code = HTTPStatus.OK
if operation_subset is not None:
result = {
'status': 'OK',
'data': [OperationSubsetItemResponseSchema().dump(
operation_subset)]
}
else:
return_code = HTTPStatus.NOT_FOUND
result = {
'status': 'ERROR',
'message': gettext(
'%(name)s not found (id=%(id)s)',
name=self.human_name, id=operation_subset_id)
}
return result, return_code
@requires_auth
@requires_permission('ADMINISTRATOR',)
def delete(self, operation_subset_id):
return_code = HTTPStatus.NO_CONTENT
if log.isEnabledFor(logging.DEBUG):
log.debug(gettext('Deleting %s (id=%s)'), self.human_name,
operation_subset_id)
operation_subset = OperationSubset.query.get(operation_subset_id)
if operation_subset is not None:
try:
db.session.delete(operation_subset)
db.session.commit()
result = {
'status': 'OK',
'message': gettext('%(name)s deleted with success!',
name=self.human_name)
}
except Exception as e:
result = {'status': 'ERROR',
'message': gettext("Internal error")}
return_code = HTTPStatus.INTERNAL_SERVER_ERROR
if current_app.debug:
result['debug_detail'] = str(e)
db.session.rollback()
else:
return_code = HTTPStatus.NOT_FOUND
result = {
'status': 'ERROR',
'message': gettext('%(name)s not found (id=%(id)s).',
name=self.human_name, id=operation_subset_id)
}
return result, return_code
@requires_auth
@requires_permission('ADMINISTRATOR',)
def patch(self, operation_subset_id):
result = {'status': 'ERROR', 'message': gettext('Insufficient data.')}
return_code = HTTPStatus.NOT_FOUND
if log.isEnabledFor(logging.DEBUG):
log.debug(gettext('Updating %s (id=%s)'), self.human_name,
operation_subset_id)
if request.json:
request_schema = partial_schema_factory(
OperationSubsetCreateRequestSchema)
# Ignore missing fields to allow partial updates
operation_subset = request_schema.load(request.json, partial=True)
response_schema = OperationSubsetItemResponseSchema()
try:
operation_subset.id = operation_subset_id
operation_subset = db.session.merge(operation_subset)
db.session.commit()
if operation_subset is not None:
return_code = HTTPStatus.OK
result = {
'status': 'OK',
'message': gettext(
'%(n)s (id=%(id)s) was updated with success!',
n=self.human_name,
id=operation_subset_id),
'data': [response_schema.dump(
operation_subset)]
}
except ValidationError as e:
result= {
'status': 'ERROR',
'message': gettext('Invalid data for %(name)s (id=%(id)s)',
name=self.human_name,
id=operation_subset_id),
'errors': translate_validation(e.messages)
}
except Exception as e:
result = {'status': 'ERROR',
'message': gettext("Internal error")}
return_code = 500
if current_app.debug:
result['debug_detail'] = str(e)
db.session.rollback()
return result, return_code
|
|
#! /usr/bin/env python
"""Test script for the bsddb C module by Roger E. Masse
Adapted to unittest format and expanded scope by Raymond Hettinger
"""
import os, sys
import copy
import bsddb
import dbhash # Just so we know it's imported
import unittest
from test import test_support
from sets import Set
class TestBSDDB(unittest.TestCase):
def setUp(self):
self.f = self.openmethod[0](self.fname, 'c')
self.d = dict(q='Guido', w='van', e='Rossum', r='invented', t='Python', y='')
for k, v in self.d.iteritems():
self.f[k] = v
def tearDown(self):
self.f.sync()
self.f.close()
if self.fname is None:
return
try:
os.remove(self.fname)
except os.error:
pass
def test_getitem(self):
for k, v in self.d.iteritems():
self.assertEqual(self.f[k], v)
def test_len(self):
self.assertEqual(len(self.f), len(self.d))
def test_change(self):
self.f['r'] = 'discovered'
self.assertEqual(self.f['r'], 'discovered')
self.assert_('r' in self.f.keys())
self.assert_('discovered' in self.f.values())
def test_close_and_reopen(self):
if self.fname is None:
# if we're using an in-memory only db, we can't reopen it
# so finish here.
return
self.f.close()
self.f = self.openmethod[0](self.fname, 'w')
for k, v in self.d.iteritems():
self.assertEqual(self.f[k], v)
def assertSetEquals(self, seqn1, seqn2):
self.assertEqual(Set(seqn1), Set(seqn2))
def test_mapping_iteration_methods(self):
f = self.f
d = self.d
self.assertSetEquals(d, f)
self.assertSetEquals(d.keys(), f.keys())
self.assertSetEquals(d.values(), f.values())
self.assertSetEquals(d.items(), f.items())
self.assertSetEquals(d.iterkeys(), f.iterkeys())
self.assertSetEquals(d.itervalues(), f.itervalues())
self.assertSetEquals(d.iteritems(), f.iteritems())
def test_iter_while_modifying_values(self):
if not hasattr(self.f, '__iter__'):
return
di = iter(self.d)
while 1:
try:
key = di.next()
self.d[key] = 'modified '+key
except StopIteration:
break
# it should behave the same as a dict. modifying values
# of existing keys should not break iteration. (adding
# or removing keys should)
fi = iter(self.f)
while 1:
try:
key = fi.next()
self.f[key] = 'modified '+key
except StopIteration:
break
self.test_mapping_iteration_methods()
def test_iteritems_while_modifying_values(self):
if not hasattr(self.f, 'iteritems'):
return
di = self.d.iteritems()
while 1:
try:
k, v = di.next()
self.d[k] = 'modified '+v
except StopIteration:
break
# it should behave the same as a dict. modifying values
# of existing keys should not break iteration. (adding
# or removing keys should)
fi = self.f.iteritems()
while 1:
try:
k, v = fi.next()
self.f[k] = 'modified '+v
except StopIteration:
break
self.test_mapping_iteration_methods()
def test_first_next_looping(self):
items = [self.f.first()]
for i in xrange(1, len(self.f)):
items.append(self.f.next())
self.assertSetEquals(items, self.d.items())
def test_previous_last_looping(self):
items = [self.f.last()]
for i in xrange(1, len(self.f)):
items.append(self.f.previous())
self.assertSetEquals(items, self.d.items())
def test_set_location(self):
self.assertEqual(self.f.set_location('e'), ('e', self.d['e']))
def test_contains(self):
for k in self.d:
self.assert_(k in self.f)
self.assert_('not here' not in self.f)
def test_has_key(self):
for k in self.d:
self.assert_(self.f.has_key(k))
self.assert_(not self.f.has_key('not here'))
def test_clear(self):
self.f.clear()
self.assertEqual(len(self.f), 0)
def test__no_deadlock_first(self, debug=0):
# do this so that testers can see what function we're in in
# verbose mode when we deadlock.
sys.stdout.flush()
# in pybsddb's _DBWithCursor this causes an internal DBCursor
# object is created. Other test_ methods in this class could
# inadvertently cause the deadlock but an explicit test is needed.
if debug: print "A"
k,v = self.f.first()
if debug: print "B", k
self.f[k] = "deadlock. do not pass go. do not collect $200."
if debug: print "C"
# if the bsddb implementation leaves the DBCursor open during
# the database write and locking+threading support is enabled
# the cursor's read lock will deadlock the write lock request..
# test the iterator interface (if present)
if hasattr(self.f, 'iteritems'):
if debug: print "D"
i = self.f.iteritems()
k,v = i.next()
if debug: print "E"
self.f[k] = "please don't deadlock"
if debug: print "F"
while 1:
try:
k,v = i.next()
except StopIteration:
break
if debug: print "F2"
i = iter(self.f)
if debug: print "G"
while i:
try:
if debug: print "H"
k = i.next()
if debug: print "I"
self.f[k] = "deadlocks-r-us"
if debug: print "J"
except StopIteration:
i = None
if debug: print "K"
# test the legacy cursor interface mixed with writes
self.assert_(self.f.first()[0] in self.d)
k = self.f.next()[0]
self.assert_(k in self.d)
self.f[k] = "be gone with ye deadlocks"
self.assert_(self.f[k], "be gone with ye deadlocks")
def test_for_cursor_memleak(self):
if not hasattr(self.f, 'iteritems'):
return
# do the bsddb._DBWithCursor _iter_mixin internals leak cursors?
nc1 = len(self.f._cursor_refs)
# create iterator
i = self.f.iteritems()
nc2 = len(self.f._cursor_refs)
# use the iterator (should run to the first yeild, creating the cursor)
k, v = i.next()
nc3 = len(self.f._cursor_refs)
# destroy the iterator; this should cause the weakref callback
# to remove the cursor object from self.f._cursor_refs
del i
nc4 = len(self.f._cursor_refs)
self.assertEqual(nc1, nc2)
self.assertEqual(nc1, nc4)
self.assert_(nc3 == nc1+1)
def test_popitem(self):
k, v = self.f.popitem()
self.assert_(k in self.d)
self.assert_(v in self.d.values())
self.assert_(k not in self.f)
self.assertEqual(len(self.d)-1, len(self.f))
def test_pop(self):
k = 'w'
v = self.f.pop(k)
self.assertEqual(v, self.d[k])
self.assert_(k not in self.f)
self.assert_(v not in self.f.values())
self.assertEqual(len(self.d)-1, len(self.f))
def test_get(self):
self.assertEqual(self.f.get('NotHere'), None)
self.assertEqual(self.f.get('NotHere', 'Default'), 'Default')
self.assertEqual(self.f.get('q', 'Default'), self.d['q'])
def test_setdefault(self):
self.assertEqual(self.f.setdefault('new', 'dog'), 'dog')
self.assertEqual(self.f.setdefault('r', 'cat'), self.d['r'])
def test_update(self):
new = dict(y='life', u='of', i='brian')
self.f.update(new)
self.d.update(new)
for k, v in self.d.iteritems():
self.assertEqual(self.f[k], v)
def test_keyordering(self):
if self.openmethod[0] is not bsddb.btopen:
return
keys = self.d.keys()
keys.sort()
self.assertEqual(self.f.first()[0], keys[0])
self.assertEqual(self.f.next()[0], keys[1])
self.assertEqual(self.f.last()[0], keys[-1])
self.assertEqual(self.f.previous()[0], keys[-2])
self.assertEqual(list(self.f), keys)
class TestBTree(TestBSDDB):
fname = test_support.TESTFN
openmethod = [bsddb.btopen]
class TestBTree_InMemory(TestBSDDB):
fname = None
openmethod = [bsddb.btopen]
class TestHashTable(TestBSDDB):
fname = test_support.TESTFN
openmethod = [bsddb.hashopen]
class TestHashTable_InMemory(TestBSDDB):
fname = None
openmethod = [bsddb.hashopen]
## # (bsddb.rnopen,'Record Numbers'), 'put' for RECNO for bsddb 1.85
## # appears broken... at least on
## # Solaris Intel - rmasse 1/97
def test_main(verbose=None):
test_support.run_unittest(
TestBTree,
TestHashTable,
TestBTree_InMemory,
TestHashTable_InMemory,
)
if __name__ == "__main__":
test_main(verbose=True)
|
|
""" Here we access geojson held in a textfile - it is then used to perform the
following tasks reqd for the GIS programming assignment.....
Specifically, we want to do the following:
Create a single polygon from the Union of all the polygons.
Compute the centroid of the single polygon.
Extract the points that lie within the single polygon.
Compute a convex hull and centroid for the extracted points
Compute the distance between the centroid of the single polygon and the centroid of the points that lie within the single polygon.
Create a representation of the line joining the two centroids
Geocode both centroids and add their names to the appropriate point as an attribute
Create shapefiles to store the results of the above. Bear in mind that a shapefile contains a single geometry type and is a set of thematically related features. Therefore you will need to create shapefiles as follows:
Combined polygon from Union
Points that lie within Combined Polygon
Convex hull of the points from above
Both centroids. Each should have an attribute to hold its name returned from the geocoding process.
Linestring representing the distance between the centroids
"""
from tkinter import *
from tkinter import ttk
from collections import defaultdict
from tkinter import messagebox
from shapely.ops import cascaded_union
import shapely.geometry as geometry
from descartes import PolygonPatch
import matplotlib.pyplot as plt
import fiona
from fiona.crs import from_epsg
import json
import os
def main():
scriptDir = os.path.dirname(__file__)
op_data = os.path.normpath(os.path.join(scriptDir, "op_data"))
if not os.path.exists(op_data):
os.mkdir(op_data)
root = Tk()
LoadingGUI(root)
root.mainloop()
class MyShape:
#todo add methods to reproject, perform geometric functions etc.
def __init__(self, geojson_obj, feature_id):
from shapely import geometry
self.crs = geojson_obj['crs']
self.type = geojson_obj['type']
self.bbox = geojson_obj['bbox']
# create a dict of {name: (geom, properties)} for each feature in the dataset
self.features = {f['properties'][feature_id]:(geometry.asShape(f['geometry']),f['properties'])
for f in geojson_obj['features']}
class MicksGis:
"""
This class will construct the gis gui.
We pass in the collection of MyShape objects.
"""
def __init__(self, master, datasets):
with open("provinces.txt",'r') as f2:
prov_str = f2.read()
prov_polygons = json.loads(prov_str)
provs = []
for f in prov_polygons['features']:
provs.append(geometry.asShape(f['geometry']))
self.bg = cascaded_union(provs)
self.master = master
self.datasets = datasets
self.current_dataset = ""
self.op_counter = 0
self.op_stack = {}
self.operation = 'N' # this holds a value to tell which operation is currently in progress
# M = Merge, I = Intermediate, G = Geocode, N = None
self.master.title("SIMPLE GIS")
# Set Button style
s = ttk.Style()
s.configure('Wait.TButton',foreground = 'red', state = 'disabled')
s.configure('Go.TButton', foreground = 'green', state = 'active')
# Declaring variables
self.cb_datasets_source = []
self.cb_datasets_source = [d for d in self.datasets]
self.cb_op_data_source = []
self.lb_features_source = StringVar()
self.lb_feature_data_source = StringVar()
self.dialog_text = StringVar()
self.dialog_text.set('Messages will appear here.')
# widget declarations
self.frm_mainframe = ttk.Frame(self.master,
)
self.lbl_message = ttk.Label(self.master,
font = ('Helvetica', 16),
foreground = 'blue',
textvariable = self.dialog_text)
# Set up frames
self.frm_data_pane_top = ttk.LabelFrame(self.frm_mainframe,
text = 'Dataset Explorer',
width = 40)
self.frm_data_pane_middle = ttk.LabelFrame(self.frm_mainframe,
text = 'Feature Explorer',
width = 40)
self.frm_data_pane_bottom = ttk.LabelFrame(self.frm_mainframe,
text = 'Feature Data',
width = 40)
self.frm_functions = ttk.LabelFrame(self.frm_mainframe,
text = 'Functions')
#Set up widgets
# Data selection and viewing
self.lbl_ip_data = ttk.Label(self.frm_data_pane_top,
text = 'Input Data:')
self.cb_datasets = ttk.Combobox(self.frm_data_pane_top,
height = 5,
values = self.cb_datasets_source,
width = 40)
self.lbl_op_data = ttk.Label(self.frm_data_pane_top,
text = 'Output Data:')
self.cb_op_data = ttk.Combobox(self.frm_data_pane_top,
height = 5,
values = self.cb_op_data_source,
width = 40,
state = 'disabled')
self.lb_features = Listbox(self.frm_data_pane_middle,
height = 10,
listvariable = self.lb_features_source,
width = 40,
state = 'disabled')
self.lb_feature_data = Listbox(self.frm_data_pane_bottom,
height = 10,
listvariable = self.lb_feature_data_source,
width = 40)
# Functions
self.btn_feature_display = ttk.Button(self.frm_data_pane_middle,
text = 'DISPLAY SELECTED',
style = 'Wait.TButton',
command = lambda: self.display(feature_name =
self.lb_features.get(
self.lb_features.curselection())))
self.btn_confirm = ttk.Button(self.frm_data_pane_middle,
text = 'CONFIRM SELECTED',
style = 'Wait.TButton',
state = 'disabled',
command = lambda: self.confirm(self.lb_features.curselection()))
self.btn_merge_polygons = ttk.Button(self.frm_functions,
width = 20,
cursor = 'hand1',
text = 'MERGE',
style = 'Wait.TButton',
command = self.merge_polys)
self.btn_points_within_poly = ttk.Button(self.frm_functions,
width = 20,
cursor = 'hand1',
text = 'Ps in POLY',
style = 'Wait.TButton',
command = self.points_within_poly)
self.btn_centroid = ttk.Button(self.frm_functions,
width = 20,
cursor = 'hand1',
text = 'CENTROID',
style = 'Wait.TButton',
command = self.centroid)
self.btn_make_shp = ttk.Button(self.frm_functions,
width = 20,
cursor = 'hand1',
text = 'MAKE .SHP',
style = 'Wait.TButton',
command = self.make_shp)
self.geocode = ttk.Button(self.frm_functions,
width = 20,
cursor = 'hand1',
text = 'GEOCODE',
style = 'Wait.TButton',
command = self.geocode)
# widget placement
self.lbl_message.grid(row = 0, column = 0)
self.frm_mainframe.grid(row = 1, column = 0)
self.frm_data_pane_top.grid(row = 0, column = 0, sticky = 'w')
self.lbl_ip_data.grid(row = 0, column = 0, sticky = 'new')
self.cb_datasets.grid(row = 0, column = 1, sticky = 'ew')
self.lbl_op_data.grid(row = 0, column = 2, sticky = 'nw')
self.cb_op_data.grid(row = 0, column = 3, sticky = 'new')
self.frm_data_pane_middle.grid(row = 1, column = 0, sticky = 'ew')
self.lb_features.grid(row = 0, column = 0, sticky = 'ew')
self.btn_feature_display.grid(row = 1, column = 0, sticky = 'ew')
self.btn_confirm.grid(row = 2, column = 0, sticky = 'ew')
self.frm_data_pane_bottom.grid(row = 2, column = 0, sticky = 'ew')
self.lb_feature_data.grid(row = 0, column = 0, sticky = 'ew')
self.frm_functions.grid(row = 3, column = 0,
columnspan = 1)
self.btn_merge_polygons.grid(row = 0, column = 0)
self.btn_points_within_poly.grid(row = 0, column = 1)
self.btn_centroid.grid(row = 0, column = 2)
self.btn_make_shp.grid(row = 0, column = 3)
self.geocode.grid(row = 0, column = 4)
# event handling
_ = self.cb_datasets.bind("<<ComboboxSelected>>", self.dataset_cb_newselection)
_ = self.lb_features.bind("<<ListboxSelect>>", self.feature_lb_newselection)
_ = self.frm_functions.bind("<<Button1>>", self.check_op_stack)
# functions
def check_op_stack(self):
if self.op_stack:
self.cb_op_data.configure(state = 'normal')
def display(self, feature_name = None, *args):
# allows function to be used by multiple processes, first option (when a feature_name is passed)
# is for viewing data, second option is for viewing created geometries
if feature_name:
geom = self.datasets[self.current_dataset].features[feature_name][0]
if geom.geom_type != ('Polygon' or 'MultiPolygon'):
self.dialog_text.set('This geometry is invalid. Please use a different dataset')
pass
geom = cascaded_union(geom) #to dissolve multipolygons
geom_bdry = geom.boundary
minx, miny, maxx, maxy = self.bg.bounds
w, h = maxx - minx, maxy - miny
fig = plt.figure(1, figsize = (5, 5), dpi = 180, frameon = False)
ax = fig.add_subplot(111)
ax.set_xlim(minx,maxx)
ax.set_ylim(miny,maxy)
for poly in self.bg:
bg_patch = PolygonPatch(poly, fc = 'lightsage', ec = 'k', alpha = 1)
ax.add_patch(bg_patch)
if geom.geom_type == 'MultiPolygon':
for poly in geom:
patch = PolygonPatch(poly, fc= 'teal', ec='navy', alpha = 0.5)
ax.add_patch(patch)
else:
patch = PolygonPatch(geom, fc='teal', ec='navy', alpha = 0.5)
ax.add_patch(patch)
plt.show()
else:
geom = args[0]
name = args[1]
geom = cascaded_union(geom) #to dissolve multipolygons
minx, miny, maxx, maxy = self.bg.bounds
w, h = maxx - minx, maxy - miny
fig = plt.figure(1, figsize = (5, 5), dpi = 180, frameon = False)
ax = fig.add_subplot(111)
ax.set_xlim(minx,maxx)
ax.set_ylim(miny,maxy)
for poly in self.bg:
bg_patch = PolygonPatch(poly, fc = 'lightsage', ec = 'k', alpha = 1)
ax.add_patch(bg_patch)
if geom.geom_type == 'MultiPolygon':
for poly in geom:
patch = PolygonPatch(poly, fc= 'teal', ec='navy', alpha = 0.5)
ax.add_patch(patch)
else:
patch = PolygonPatch(geom, fc='teal', ec='navy', alpha = 0.5)
ax.add_patch(patch)
plt.title(name)
plt.show()
def dataset_cb_newselection(self, event):
self.lb_feature_data_source.set([]) # wipe the feature data source
self.current_dataset = event.widget.get()
self.dialog_text.set("You have chosen - " + self.current_dataset.capitalize())
self.update_feature_explorer(self.current_dataset)
def update_feature_explorer(self, dataset_name):
item_list = list(self.datasets[dataset_name].features.keys())
self.lb_features_source.set(item_list)
self.lb_features.configure(state = 'normal')
def feature_lb_newselection(self, event):
owner = event.widget
if self.operation != 'N':
pass
else:
self.value_of_combo = owner.get(owner.curselection())
self.dialog_text.set("You have chosen - " + self.value_of_combo.capitalize())
self.update_feature_data_explorer(self.value_of_combo)
def update_feature_data_explorer(self, feature_name):
properties = self.datasets[self.current_dataset].features[feature_name][1]
op_list = ["{} : {}".format(k,v) for k, v in properties.items()]
self.lb_feature_data_source.set(op_list)
self.lb_feature_data.configure(state = 'normal')
def confirm(self, data_lines): # this acts as a confirm for selection of data, and returns to
# origin function with the data selected
if self.operation == 'M':
items = [self.lb_features.get(i) for i in data_lines]
data = [self.datasets[self.current_dataset].features[feature_name][0]
for feature_name in items]
self.merge_polys(data, items)
#elif
def merge_polys(self, data = None, *args):
# allows the feature listbox to become enabled for multiple selections
# and waits for items to be selected and confirmed
if data == None:
self.lb_feature_data_source.set([])
self.btn_feature_display.configure(state = 'disabled')
self.lb_features.configure(selectmode = 'multiple')
self.operation = 'M'
self.btn_confirm.configure(state = 'normal')
self.dialog_text.set('Please confirm when you have selected your items')
pass
else: # this is the return from the confirm button
merged_geom = cascaded_union(data)
name = 'merged' + str(self.op_counter)
self.display(None, merged_geom, name)
self.make_merged_shp(merged_geom, name = args[0]) # this makes a shapefile
self.btn_confirm.configure(state = 'disabled')
self.lb_features.configure(selectmode = 'single')
self.btn_feature_display.configure(state = 'normal')
self.btn_confirm.configure(state = 'disabled')
self.points_within_poly(merged_geom)
self.centroid(merged_geom)
def points_within_poly(self, poly):
if 'dit:geonames_pop_5000' in self.datasets.keys():
self.current_dataset = 'dit:geonames_pop_5000'
elif 'dit:geonames_populated' in self.datasets.keys():
self.current_dataset = 'towns'
else:
self.dialog_text.set('Please return to last GUI and pick a point dataset:')
pass
points = self.datasets[self.current_dataset].features
print(len(points))
contained_points = {}
for k,v in points.items():
if poly.contains(v[0]):
contained_points[k] = v
# it works!!!
def centroid(self, geom):
pass
def make_shp(self):
pass
def make_merged_shp(self, data, name, crs = None):
self.op_counter += 1
geom_type = data.geom_type
a_schema = {'geometry': geom_type,
'properties': {'name':'str'}
}
filename = 'merged' + str(self.op_counter) + ".shp"
path = os.path.join('op_data',filename)
obj_name = 'merged' + str(self.op_counter)
if not crs:
my_crs = self.datasets[self.current_dataset].crs
crs = from_epsg(my_crs['properties']['code'])
with fiona.open(path,
'w',
driver= 'ESRI Shapefile',
crs= crs,
schema= a_schema) as c:
c.write({
'properties':{'name':obj_name},
'geometry':geometry.mapping(data)})
def geocode(self):
pass
class LoadingGUI():
def __init__(self, master):
self.master = master
self.master.title("Dataset selection")
master.protocol("WM_DELETE_WINDOW", self.catch_destroy)
# Set Button style
s = ttk.Style()
s.configure('Wait.TButton',foreground = 'red', state = 'disabled')
s.configure('Go.TButton', foreground = 'green', state = 'active')
# Initialise variables here
self.base_params = {'host': "mf2.dit.ie:8080",
'layer': "cso:ctygeom",
'srs_code': 29902,
'properties': "",
'geom_field': "",
'filter_property': "",
'filter_values': ""} # dict to store the fetch params
self.param1 = StringVar()
self.param2 = StringVar()
self.param3 = StringVar()
self.param4 = StringVar()
self.param5 = StringVar()
self.param6 = StringVar()
self.param7 = StringVar()
self.params_list = [self.param1,
self.param2,
self.param3,
self.param4,
self.param5,
self.param6,
self.param7] # list to allow iterative assignment and retrieval of
# params
self.gj_stack = defaultdict(list) #out_stack to store geojson objects retrieved
self.prop_list = StringVar()
self.prop_sample = StringVar()
self.prop_sample.set('Data values will appear here')
self.feature_property = StringVar()
self.out_stack = []
self.stack_text = []
self.lv_stack = StringVar()
# Initialise the widgets
self.mainframe = ttk.Frame(self.master)
self.label1 = ttk.Label(self.mainframe,
text = "THIS GUI SUPPORTS INTERACTION WITH\n"+
"A GEOSERVER.",
foreground = 'black',
relief = 'sunken',
font =('Helvetica', '12'),
justify = 'center',
anchor = 'center')
self.label2 = ttk.Label(self.mainframe,
text = "Please use buttons to select datasets or enter custom\n"
+ "parameters using the boxes on the left",
foreground = 'blue',
relief = 'sunken',
anchor = 'center')
self.entry_frame = ttk.LabelFrame(self.mainframe,
text = 'Enter parameters here:',
relief = 'sunken')
self.display_frame = ttk.LabelFrame(self.mainframe,
text = 'Current Parameters:',
relief = 'sunken',
width = 30)
self.button_frame = ttk.LabelFrame(self.mainframe,
text = 'Select one of the datasets\n' +
'by clicking the button',
relief = 'sunken')
self.geojson_nav_frame = ttk.LabelFrame(self.mainframe,
text = 'Please explore the gj_stack here',
relief = 'sunken')
self.entry1 = ttk.Entry(self.entry_frame,
textvariable = self.param1)
self.entry2 = ttk.Entry(self.entry_frame,
textvariable = self.param2)
self.entry3 = ttk.Entry(self.entry_frame,
textvariable = self.param3)
self.entry4 = ttk.Entry(self.entry_frame,
textvariable = self.param4)
self.entry5 = ttk.Entry(self.entry_frame,
textvariable = self.param5)
self.entry6 = ttk.Entry(self.entry_frame,
textvariable = self.param6)
self.entry7 = ttk.Entry(self.entry_frame,
textvariable = self.param7)
self.lbl_p1 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'host:')
self.lbl_p2 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'layer')
self.lbl_p3 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'spatial ref:')
self.lbl_p4 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'properties:')
self.lbl_p5 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'geom field:')
self.lbl_p6 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'filter field:')
self.lbl_p7 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'filter criteria:')
self.button_load_params = ttk.Button(self.entry_frame,
text = "^ Load ^",
command = self.load_params)
self.display1 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display2 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display3 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display4 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display5 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display6 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display7 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.button_County = ttk.Button(self.button_frame,
text = 'County Polygons',
command = self.county_polygons,
width = 30)
self.button_Towns = ttk.Button(self.button_frame,
text = 'Town Points',
command = self.town_points,
width = 30)
self.button_LargeTowns = ttk.Button(self.button_frame,
text = 'Large Town Points',
command = self.large_town_points,
width = 30)
self.button_EDs = ttk.Button(self.button_frame,
text = 'ED Polygons',
command = self.ed_polygons,
width = 30)
self.button_Provinces = ttk.Button(self.button_frame,
text = 'Province Polygons',
command = self.province_polygons,
width = 30)
self.button_SAs = ttk.Button(self.button_frame,
text = 'SA Polygons',
command = self.sa_polygons,
width = 30)
self.button_Fetch = ttk.Button(self.display_frame,
text = '^ FETCH ^',
width = 40,
command = self.fetch_geojson)
# Bottom half of GUI
self.l_frame = ttk.LabelFrame(self.mainframe,
text = 'Pick dataset and choose feature identifier')
self.r_frame = ttk.LabelFrame(self.mainframe,
text= 'View out_stack here, and send to GIS')
#todo add a labelframe to display the geojson obj metadata
self.cb_dataset = ttk.Combobox(self.l_frame)
self.lbl_properties = ttk.Label(self.l_frame,
text = 'Feature properties')
self.lb_properties = Listbox(self.l_frame,
exportselection = 0,
bd = 5,
width = 40,
selectmode = SINGLE,
listvariable = self.prop_list,
state = 'disabled'
)
self.lbl_example = ttk.Label(self.l_frame,
textvariable = self.feature_property,
foreground = 'red',
background = 'white',
relief = 'sunken',
anchor = 'center',
font =('Helvetica', '12'))
self.lb_stack = Listbox(self.r_frame,
exportselection = 0,
bd = 5,
width = 40,
selectmode = SINGLE,
state = 'disabled',
listvariable = self.lv_stack
)
self.button_confirm_send = ttk.Button(self.l_frame,
text = 'Confirm and Add to Stack',
command = self.confirm,
)
self.button_clear_stack = ttk.Button(self.r_frame,
text = 'Clear Stack',
command = self.clear_stack)
self.button_gis_open = ttk.Button(self.r_frame,
text = 'Open GIS with current Stack',
command = lambda: self.open_gis(self.out_stack))
self.info_text = ttk.Label(self.mainframe,
text = 'Information messages will appear here',
foreground = 'blue')
# Layout the GUI
self.mainframe.grid(row=0, column = 0)
self.label1.grid(row = 0, column = 0, columnspan = 4, sticky = 'ew')
self.entry_frame.grid(row = 2, column = 0, sticky = 'ns')
self.lbl_p1.grid(row = 0, column = 0, sticky = 'ew')
self.lbl_p2.grid(row = 1, column = 0, sticky = 'ew')
self.lbl_p3.grid(row = 2, column = 0, sticky = 'ew')
self.lbl_p4.grid(row = 3, column = 0, sticky = 'ew')
self.lbl_p5.grid(row = 4, column = 0, sticky = 'ew')
self.lbl_p6.grid(row = 5, column = 0, sticky = 'ew')
self.lbl_p7.grid(row = 6, column = 0, sticky = 'ew')
self.entry1.grid(row = 0, column = 1, sticky = 'ew')
self.entry2.grid(row = 1, column = 1, sticky = 'ew')
self.entry3.grid(row = 2, column = 1, sticky = 'ew')
self.entry4.grid(row = 3, column = 1, sticky = 'ew')
self.entry5.grid(row = 4, column = 1, sticky = 'ew')
self.entry6.grid(row = 5, column = 1, sticky = 'ew')
self.entry7.grid(row = 6, column = 1, sticky = 'ew')
self.button_load_params.grid(row = 7, column = 1, sticky = 'ew')
self.display_frame.grid(row = 2, column = 1, sticky = 'ns')
self.display1.grid(row = 0, sticky = 'ew')
self.display2.grid(row = 1, sticky = 'ew')
self.display3.grid(row = 2, sticky = 'ew')
self.display4.grid(row = 3, sticky = 'ew')
self.display5.grid(row = 4, sticky = 'ew')
self.display6.grid(row = 5, sticky = 'ew')
self.display7.grid(row = 6, sticky = 'ew')
self.button_Fetch.grid(row = 7, column = 0,
sticky = 'ew',
columnspan = 2)
for child, i in zip(self.display_frame.winfo_children(), self.params_list):
child.configure(text = i.get())
self.button_frame.grid(row = 2, column = 2, sticky = 'ns')
self.button_LargeTowns.grid(row = 0, sticky = 'ew')
self.button_County.grid(row = 1, sticky = 'ew')
self.button_EDs.grid(row = 2, sticky = 'ew')
self.button_Provinces.grid(row = 3, sticky = 'ew')
self.button_SAs.grid(row = 4, sticky = 'ew')
self.button_Towns.grid(row = 5, sticky = 'ew')
self.l_frame.grid(row = 3, column = 0, sticky = 'ew')
self.r_frame.grid(row = 3, column = 1,
sticky = 'ew',
columnspan = 2)
self.cb_dataset.grid(row = 1, sticky = 'ew')
self.lbl_properties.grid(row = 2)
self.lb_properties.grid(row = 3, sticky = 'sew')
self.button_confirm_send.grid(row = 4, column = 0, sticky ='ew')
self.lbl_example.grid(row = 3, column = 0, sticky = 'sew')
self.lb_stack.grid(row = 1, column = 0, sticky = 'sw')
self.button_gis_open.grid(row = 2, column = 0,
sticky = 'sew')
self.button_clear_stack.grid(row = 3, column = 0,
sticky = 'sew')
self.info_text.grid(row = 4, columnspan = 4)
# Event Management
self.cb_dataset.bind("<<ComboboxSelected>>", self.cb_dataset_selection)
self.lb_properties.bind("<<ListboxSelect>>", self.item_selection)
def clear_stack(self):
self.out_stack = []
self.stack_text = []
self.lv_stack.set('')
def item_selection(self, event):
owner = event.widget
item = owner.get(owner.curselection())
current_dataset = self.gj_stack[self.cb_dataset.get()]
item_str = str(current_dataset['features'][0]['properties'][item])
if len(item_str) > 30:
item_str = "{}{}".format(item_str[:25],'....')
self.feature_property.set(item_str)
def catch_destroy(self):
if messagebox.askokcancel("Quit", "Do you really want to quit?"):
self.master.destroy()
def cb_dataset_selection(self, event):
owner = event.widget
feature_props = self.gj_stack[owner.get()]['features'][0]['properties']
self.prop_list.set(list(feature_props))
self.lb_properties.configure(state = 'normal')
def confirm(self):
#todo add exception handling for no item selected
ds_name = self.cb_dataset.get()
current_dataset = self.gj_stack[ds_name]
if ds_name in [i[2] for i in self.out_stack]:
messagebox.showerror('Info','The dataset is already in the out_stack')
pass
else:
try:
feature_name = self.lb_properties.get(self.lb_properties.curselection())
self.out_stack.append([current_dataset, feature_name, ds_name])
#todo format the below string to align nicely
self.stack_text.append('Dataset: {} --\t\t Feature Name: {}'.format(ds_name,feature_name))
self.lv_stack.set(self.stack_text)
self.info_text.configure(text = 'Please examine the item.')
except TclError:
self.info_text.configure(text = 'There is no item selected.')
def open_gis(self, stack):
op_dict = defaultdict()
if stack:
for obj in stack:
op_dict[obj[2]] = MyShape(obj[0],obj[1])
self.new_window = Toplevel(self.master)
self.my_gis = MicksGis(self.new_window, op_dict)
else:
self.info_text.set('Please highlight the feature name and send again:')
pass
def load_params(self):
for child, i in zip(self.display_frame.winfo_children(), self.params_list):
child.configure(text = i.get())
def county_polygons(self):
self.params_list[1].set('cso:ctygeom')
self.load_params()
def town_points(self):
self.params_list[1].set('dit:geonames_populated')
self.load_params()
def sa_polygons(self):
self.params_list[1].set('cso:sageom')
self.load_params()
def large_town_points(self):
self.params_list[1].set('dit:geonames_pop_5000')
self.load_params()
def province_polygons(self):
self.params_list[1].set('cso:prgeom')
self.load_params()
def ed_polygons(self):
self.params_list[1].set('cso:edgeom')
self.load_params()
def fetch_geojson(self):
#TODO Set styles to show when gj_stack is loading
try:
self.info_text.configure(text = 'LOADING DATA....',
foreground = 'red')
self.mainframe.update_idletasks()
btn = self.button_Fetch
btn.configure(style = 'Wait.TButton')
self.param1.set(self.base_params['host'])
self.param3.set(self.base_params['srs_code'])
self.param4.set(self.base_params['properties'])
self.param5.set(self.base_params['geom_field'])
self.param6.set(self.base_params['filter_property'])
self.param7.set(self.base_params['filter_values'])
self.base_params['host'] = self.param1.get()
self.base_params['layer'] = self.param2.get()
self.base_params['srs_code'] = self.param3.get()
self.base_params['properties'] = self.param4.get()
self.base_params['geom_field'] = self.param5.get()
self.base_params['filter_property'] = self.param6.get()
self.base_params['filter_values'] = self.param7.get()
gj = self.get_geojson(self.base_params)
# create a out_stack of the geojson objects, only storing each one once
self.gj_stack[self.base_params['layer']] = gj
self.info_text.configure(text = 'Request Executed Successfully',
foreground = 'green')
self.cb_dataset['values'] = [i for i in self.gj_stack.keys()]
except Exception:
self.info_text.configure(text = 'Error With Request Parameters: Please Try Again',
foreground = 'red')
def get_geojson(self, params):
"""
This function accepts a dictionary of parameters and returns a GeoJSON representation of the requested layer. This
takes a format similar to the following example:
{
"host": "mf2.dit.ie:8080",
"layer": "cso:ctygeom",
"srs_code": 29902,
"properties": ["countyname", ],
"geom_field": "geom",
"filter_property": "countyname",
"filter_values": ["Cork", "Kerry"]
}
You can filter the set of features returned by adjusting "filter_values". This is a list of values that must
be present in "filter_property". In the above example you'd get the counties of Cork and Kerry plus Cork City.
Similarly, you can filter the properties returned to reduce their number. If you use this feature, you'll need to
set "geom_field" to the name of the geometry field. Geoserver can give you this.
All values in the dictionary are optional except "host" and "layer".
:param Dictionary as above:
:return: Parsed GeoJSON or exception as appropriate
"""
import urllib.parse
import httplib2
import os, os.path
import json
import xml.etree.ElementTree as etree
#
# Check that the parameters exist and/or sensible. Because the filter can contain some 'odd' characters such as '%'
# and single quotes the filter text needs to be url encoded so that text like "countyname LIKE '%Cork%'" becomes
# "countyname%20LIKE%20%27%25Cork%25%27" which is safer for URLs
#
if "host" not in params:
raise ValueError("Value for 'host' required")
if "layer" not in params:
raise ValueError("Value for 'layer' required")
if "srs_code" in params and params["srs_code"]:
srs_text = "&srsName=epsg:{}".format(params["srs_code"])
else:
srs_text = ""
if "properties" in params and params["properties"]:
item_string = ""
for item in params["properties"]:
item_string += str(item) + ","
if "geom_field" in params and params["geom_field"]:
item_string += str(params["geom_field"])
property_text = "&PROPERTYNAME={}".format(item_string)
else:
property_text = ""
if "filter_property" in params and params["filter_property"] and params["filter_values"]:
filter_text = "{filter_property} LIKE '%{filter_values}%'".format(filter_property=params["filter_property"], filter_values=params["filter_values"][0])
for item in range(1, len(params["filter_values"])):
filter_text += "OR {filter_property} LIKE '%{filter_values}%'".format(filter_property=params["filter_property"], filter_values=params["filter_values"][item])
filter_text = urllib.parse.quote(filter_text)
filter_text = "&CQL_FILTER=" + filter_text
else:
filter_text = ""
url = "http://{host}/geoserver/ows?" \
"service=WFS&version=1.0.0&" \
"request=GetFeature&" \
"typeName={layer}&" \
"outputFormat=json".format(host=params["host"], layer=params["layer"])
url += srs_text
url += property_text
url += filter_text
#
# Make a directory to hold downloads so that we don't have to repeatedly download them later, i.e. they already
# exist so we get them from a local directory. This directory is called .httpcache".
#
scriptDir = 'C:\\Python34'
cacheDir = os.path.join(scriptDir, ".httpcache")
if not os.path.exists(cacheDir):
os.mkdir(cacheDir)
#
# Go to the web and attempt to get the resource
#
try:
h = httplib2.Http(cacheDir)
response_headers, response = h.request(url)
response = response.decode()
#
# Geoserver only sends valid gj_stack in the requested format, in our case GeoJSON, so if we get a response back in
# XML format we know that we have an error. We do minimal parsing on the xml to extract the error text and raise
# an exception based on it.
#
if response[:5] == "<?xml":
response = etree.fromstring(response)
xml_error = ""
for element in response:
xml_error += element.text
raise Exception(xml_error)
else:
return json.loads(response)
except httplib2.HttpLib2Error as e:
print(e)
if __name__ == '__main__':
main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST manipulation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
class CleanCopier(object):
"""NodeTransformer-like visitor that copies an AST."""
def __init__(self, preserve_annos):
super(CleanCopier, self).__init__()
self.preserve_annos = preserve_annos
def copy(self, node):
"""Returns a deep copy of node (excluding some fields, see copy_clean)."""
if isinstance(node, list):
return [self.copy(n) for n in node]
elif isinstance(node, tuple):
return tuple(self.copy(n) for n in node)
elif not isinstance(node, (gast.AST, ast.AST)):
# Assuming everything that's not an AST, list or tuple is a value type
# and may simply be assigned.
return node
assert isinstance(node, (gast.AST, ast.AST))
new_fields = {}
for f in node._fields:
if not f.startswith('__') and hasattr(node, f):
new_fields[f] = self.copy(getattr(node, f))
new_node = type(node)(**new_fields)
if self.preserve_annos:
for k in self.preserve_annos:
anno.copyanno(node, new_node, k)
return new_node
def copy_clean(node, preserve_annos=None):
"""Creates a deep copy of an AST.
The copy will not include fields that are prefixed by '__', with the
exception of user-specified annotations.
Args:
node: ast.AST
preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
copy
Returns:
ast.AST
"""
return CleanCopier(preserve_annos).copy(node)
class SymbolRenamer(gast.NodeTransformer):
"""Transformer that can rename symbols to a simple names."""
def __init__(self, name_map):
self.name_map = name_map
def _process_name_node(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
new_node = gast.Name(
str(self.name_map[qn]),
ctx=node.ctx,
annotation=None,
type_comment=None)
# All annotations get carried over.
for k in anno.keys(node):
anno.copyanno(node, new_node, k)
return new_node
return self.generic_visit(node)
def _process_list_of_strings(self, names):
for i in range(len(names)):
qn = qual_names.QN(names[i])
if qn in self.name_map:
names[i] = str(self.name_map[qn])
return names
def visit_Nonlocal(self, node):
node.names = self._process_list_of_strings(node.names)
return node
def visit_Global(self, node):
node.names = self._process_list_of_strings(node.names)
return node
def visit_Name(self, node):
return self._process_name_node(node)
def visit_Attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
return self._process_name_node(node)
# Renaming attributes is not supported.
return self.generic_visit(node)
def visit_FunctionDef(self, node):
qn = qual_names.QN(node.name)
if qn in self.name_map:
node.name = str(self.name_map[qn])
return self.generic_visit(node)
def rename_symbols(node, name_map):
"""Renames symbols in an AST. Requires qual_names annotations."""
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
elif isinstance(node, tuple):
return tuple(renamer.visit(n) for n in node)
return renamer.visit(node)
def keywords_to_dict(keywords):
"""Converts a list of ast.keyword objects to a dict."""
keys = []
values = []
for kw in keywords:
keys.append(gast.Constant(kw.arg, kind=None))
values.append(kw.value)
return gast.Dict(keys=keys, values=values)
class PatternMatcher(gast.NodeVisitor):
"""Matches a node against a pattern represented by a node."""
def __init__(self, pattern):
self.pattern = pattern
self.pattern_stack = []
self.matches = True
def compare_and_visit(self, node, pattern):
self.pattern_stack.append(self.pattern)
self.pattern = pattern
self.generic_visit(node)
self.pattern = self.pattern_stack.pop()
def no_match(self):
self.matches = False
return False
def is_wildcard(self, p):
if isinstance(p, (list, tuple)) and len(p) == 1:
p, = p
if isinstance(p, gast.Name) and p.id == '_':
return True
if p == '_':
return True
return False
def generic_visit(self, node):
if not self.matches:
return
pattern = self.pattern
for f in node._fields:
if f.startswith('__'):
continue
if not hasattr(node, f):
if hasattr(pattern, f) and getattr(pattern, f):
return self.no_match()
else:
continue
if not hasattr(pattern, f):
return self.no_match()
v = getattr(node, f)
p = getattr(pattern, f)
if self.is_wildcard(p):
continue
if isinstance(v, (list, tuple)):
if not isinstance(p, (list, tuple)) or len(v) != len(p):
return self.no_match()
for v_item, p_item in zip(v, p):
self.compare_and_visit(v_item, p_item)
elif isinstance(v, (gast.AST, ast.AST)):
if not isinstance(v, type(p)) and not isinstance(p, type(v)):
return self.no_match()
self.compare_and_visit(v, p)
else:
# Assume everything else is a value type.
if v != p:
return self.no_match()
def matches(node, pattern):
"""Basic pattern matcher for AST.
The pattern may contain wildcards represented by the symbol '_'. A node
matches a pattern if for every node in the tree, either there is a node of
the same type in pattern, or a Name node with id='_'.
Args:
node: ast.AST
pattern: ast.AST
Returns:
bool
"""
if isinstance(pattern, str):
pattern = parser.parse_str(pattern)
matcher = PatternMatcher(pattern)
matcher.visit(node)
return matcher.matches
# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
def apply_to_single_assignments(targets, values, apply_fn):
"""Applies a function to each individual assignment.
This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
It tries to break down the unpacking if possible. In effect, it has the same
effect as passing the assigned values in SSA form to apply_fn.
Examples:
The following will result in apply_fn(a, c), apply_fn(b, d):
a, b = c, d
The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
a, b = c
The following will result in apply_fn(a, (b, c)):
a = b, c
It uses the visitor pattern to allow subclasses to process single
assignments individually.
Args:
targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be
used with the targets field of an ast.Assign node
values: ast.AST
apply_fn: Callable[[ast.AST, ast.AST], None], called with the
respective nodes of each single assignment
"""
if not isinstance(targets, (list, tuple)):
targets = (targets,)
for target in targets:
if isinstance(target, (gast.Tuple, gast.List)):
for i in range(len(target.elts)):
target_el = target.elts[i]
if isinstance(values, (gast.Tuple, gast.List)):
value_el = values.elts[i]
else:
idx = parser.parse_expression(str(i))
value_el = gast.Subscript(values, gast.Index(idx), ctx=gast.Load())
apply_to_single_assignments(target_el, value_el, apply_fn)
else:
apply_fn(target, values)
def parallel_walk(node, other):
"""Walks two ASTs in parallel.
The two trees must have identical structure.
Args:
node: Union[ast.AST, Iterable[ast.AST]]
other: Union[ast.AST, Iterable[ast.AST]]
Yields:
Tuple[ast.AST, ast.AST]
Raises:
ValueError: if the two trees don't have identical structure.
"""
if isinstance(node, (list, tuple)):
node_stack = list(node)
else:
node_stack = [node]
if isinstance(other, (list, tuple)):
other_stack = list(other)
else:
other_stack = [other]
while node_stack and other_stack:
assert len(node_stack) == len(other_stack)
n = node_stack.pop()
o = other_stack.pop()
if ((not isinstance(n, (ast.AST, gast.AST, str)) and n is not None) or
(not isinstance(o, (ast.AST, gast.AST, str)) and n is not None) or
n.__class__.__name__ != o.__class__.__name__):
raise ValueError('inconsistent nodes: {} ({}) and {} ({})'.format(
n, n.__class__.__name__, o, o.__class__.__name__))
yield n, o
if isinstance(n, str):
assert isinstance(o, str), 'The check above should have ensured this'
continue
if n is None:
assert o is None, 'The check above should have ensured this'
continue
for f in n._fields:
n_child = getattr(n, f, None)
o_child = getattr(o, f, None)
if f.startswith('__') or n_child is None or o_child is None:
continue
if isinstance(n_child, (list, tuple)):
if (not isinstance(o_child, (list, tuple)) or
len(n_child) != len(o_child)):
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
node_stack.extend(n_child)
other_stack.extend(o_child)
elif isinstance(n_child, (gast.AST, ast.AST)):
node_stack.append(n_child)
other_stack.append(o_child)
elif n_child != o_child:
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
|
|
# -*- coding: utf-8 -*-
'''
Installation of Composer Packages
=================================
These states manage the installed packages for composer for PHP. Note that
either composer is installed and accessible via a bin directory or you can pass
the location of composer in the state.
.. code-block:: yaml
get-composer:
cmd.run:
- name: 'CURL=`which curl`; $CURL -sS https://getcomposer.org/installer | php'
- unless: test -f /usr/local/bin/composer
- cwd: /root/
install-composer:
cmd.wait:
- name: mv /root/composer.phar /usr/local/bin/composer
- cwd: /root/
- watch:
- cmd: get-composer
/path/to/project:
composer.installed:
- no_dev: true
- require:
- cmd: install-composer
# Without composer installed in your PATH
# Note: composer.phar must be executable for state to work properly
/path/to/project:
composer.installed:
- composer: /path/to/composer.phar
- php: /usr/local/bin/php
- no_dev: true
'''
from __future__ import absolute_import
# Import salt libs
from salt.exceptions import SaltException
def __virtual__():
'''
Only load if the composer module is available in __salt__
'''
return 'composer.install' in __salt__
def installed(name,
composer=None,
php=None,
user=None,
prefer_source=None,
prefer_dist=None,
no_scripts=None,
no_plugins=None,
optimize=None,
no_dev=None,
quiet=False,
composer_home='/root',
always_check=True):
'''
Verify that the correct versions of composer dependencies are present.
dir
Directory location of the composer.json file.
composer
Location of the composer.phar file. If not set composer will
just execute "composer" as if it is installed globally.
(i.e. /path/to/composer.phar)
php
Location of the php executable to use with composer.
(i.e. /usr/bin/php)
user
Which system user to run composer as.
.. versionadded:: 2014.1.4
prefer_source
--prefer-source option of composer.
prefer_dist
--prefer-dist option of composer.
no_scripts
--no-scripts option of composer.
no_plugins
--no-plugins option of composer.
optimize
--optimize-autoloader option of composer. Recommended for production.
no_dev
--no-dev option for composer. Recommended for production.
quiet
--quiet option for composer. Whether or not to return output from composer.
composer_home
$COMPOSER_HOME environment variable
always_check
If True, _always_ run `composer install` in the directory. This is the
default behavior. If False, only run `composer install` if there is no
vendor directory present.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
did_install = __salt__['composer.did_composer_install'](name)
# Check if composer.lock exists, if so we already ran `composer install`
# and we don't need to do it again
if always_check is False and did_install:
ret['result'] = True
ret['comment'] = 'Composer already installed this directory'
return ret
# The state of the system does need to be changed. Check if we're running
# in ``test=true`` mode.
if __opts__['test'] is True:
if did_install is True:
install_status = ""
else:
install_status = "not "
ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
ret['changes'] = {
'old': 'composer install has {0}been run in {1}'.format(install_status, name),
'new': 'composer install will be run in {0}'.format(name)
}
ret['result'] = None
return ret
try:
call = __salt__['composer.install'](
name,
composer=composer,
php=php,
runas=user,
prefer_source=prefer_source,
prefer_dist=prefer_dist,
no_scripts=no_scripts,
no_plugins=no_plugins,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
composer_home=composer_home
)
except (SaltException) as err:
ret['result'] = False
ret['comment'] = 'Error executing composer in \'{0!r}\': {1!r}'.format(name, err)
return ret
# If composer retcode != 0 then an exception was thrown and we dealt with it.
# Any other case is success, regardless of what composer decides to output.
ret['result'] = True
if quiet is True:
ret['comment'] = 'Composer install completed successfully, output silenced by quiet flag'
else:
ret['comment'] = 'Composer install completed successfully'
ret['changes'] = {
'stderr': call['stderr'],
'stdout': call['stdout']
}
return ret
def update(name,
composer=None,
php=None,
user=None,
prefer_source=None,
prefer_dist=None,
no_scripts=None,
no_plugins=None,
optimize=None,
no_dev=None,
quiet=False,
composer_home='/root'):
'''
Composer update the directory to ensure we have the latest versions
of all project dependencies.
dir
Directory location of the composer.json file.
composer
Location of the composer.phar file. If not set composer will
just execute "composer" as if it is installed globally.
(i.e. /path/to/composer.phar)
php
Location of the php executable to use with composer.
(i.e. /usr/bin/php)
user
Which system user to run composer as.
.. versionadded:: 2014.1.4
prefer_source
--prefer-source option of composer.
prefer_dist
--prefer-dist option of composer.
no_scripts
--no-scripts option of composer.
no_plugins
--no-plugins option of composer.
optimize
--optimize-autoloader option of composer. Recommended for production.
no_dev
--no-dev option for composer. Recommended for production.
quiet
--quiet option for composer. Whether or not to return output from composer.
composer_home
$COMPOSER_HOME environment variable
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
# Check if composer.lock exists, if so we already ran `composer install`
is_installed = __salt__['composer.did_composer_install'](name)
if is_installed:
old_status = "composer install has not yet been run in {0}".format(name)
else:
old_status = "composer install has been run in {0}".format(name)
# The state of the system does need to be changed. Check if we're running
# in ``test=true`` mode.
if __opts__['test'] is True:
ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
ret['changes'] = {
'old': old_status,
'new': 'composer install/update will be run in {0}'.format(name)
}
ret['result'] = None
return ret
try:
call = __salt__['composer.update'](
name,
composer=composer,
php=php,
runas=user,
prefer_source=prefer_source,
prefer_dist=prefer_dist,
no_scripts=no_scripts,
no_plugins=no_plugins,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
composer_home=composer_home
)
except (SaltException) as err:
ret['result'] = False
ret['comment'] = 'Error executing composer in \'{0!r}\': {1!r}'.format(name, err)
return ret
# If composer retcode != 0 then an exception was thrown and we dealt with it.
# Any other case is success, regardless of what composer decides to output.
ret['result'] = True
if quiet is True:
ret['comment'] = 'Composer update completed successfully, output silenced by quiet flag'
else:
ret['comment'] = 'Composer update completed successfully'
ret['changes'] = {
'stderr': call['stderr'],
'stdout': call['stdout']
}
return ret
|
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, MutableSequence, Optional, Tuple, Union
import torch
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.utilities import _TPU_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
def determine_root_gpu_device(gpus: List[int]) -> Optional[int]:
"""
Args:
gpus: non-empty list of ints representing which gpus to use
Returns:
designated root GPU device id
Raises:
TypeError:
If ``gpus`` is not a list
AssertionError:
If GPU list is empty
"""
if gpus is None:
return None
if not isinstance(gpus, list):
raise TypeError("gpus should be a list")
assert len(gpus) > 0, "gpus should be a non empty list"
# set root gpu
root_gpu = gpus[0]
return root_gpu
def parse_gpu_ids(gpus: Optional[Union[int, str, List[int]]]) -> Optional[List[int]]:
"""
Parses the GPU ids given in the format as accepted by the
:class:`~pytorch_lightning.trainer.Trainer`.
Args:
gpus: An int -1 or string '-1' indicate that all available GPUs should be used.
A list of unique ints or a string containing list of comma separated unique integers
indicates specific GPUs to use.
An int 0 means that no GPUs should be used.
Any int N > 0 indicates that GPUs [0..N) should be used.
Returns:
a list of gpus to be used or ``None`` if no GPUs were requested
If no GPUs are available but the value of gpus variable indicates request for GPUs
then a MisconfigurationException is raised.
"""
# Check that gpus param is None, Int, String or List
_check_data_type(gpus)
# Handle the case when no gpus are requested
if gpus is None or isinstance(gpus, int) and gpus == 0 or str(gpus).strip() == "0":
return None
# We know user requested GPUs therefore if some of the
# requested GPUs are not available an exception is thrown.
gpus = _normalize_parse_gpu_string_input(gpus)
gpus = _normalize_parse_gpu_input_to_list(gpus)
if not gpus:
raise MisconfigurationException("GPUs requested but none are available.")
if TorchElasticEnvironment.is_using_torchelastic() and len(gpus) != 1 and len(_get_all_available_gpus()) == 1:
# omit sanity check on torchelastic as by default shows one visible GPU per process
return gpus
# Check that gpus are unique. Duplicate gpus are not supported by the backend.
_check_unique(gpus)
return _sanitize_gpu_ids(gpus)
def parse_tpu_cores(tpu_cores: Union[int, str, List]) -> Optional[Union[int, List[int]]]:
"""
Parses the tpu_cores given in the format as accepted by the
:class:`~pytorch_lightning.trainer.Trainer`.
Args:
tpu_cores: An int 1 or string '1' indicate that 1 core with multi-processing should be used
An int 8 or string '8' indicate that all 8 cores with multi-processing should be used
A list of int or a string containing list of comma separated integer
indicates specific TPU core to use.
Returns:
a list of tpu_cores to be used or ``None`` if no TPU cores were requested
Raises:
MisconfigurationException:
If TPU cores aren't 1 or 8 cores, or no TPU devices are found
"""
_check_data_type(tpu_cores)
if isinstance(tpu_cores, str):
tpu_cores = _parse_tpu_cores_str(tpu_cores.strip())
if not _tpu_cores_valid(tpu_cores):
raise MisconfigurationException("`tpu_cores` can only be 1, 8 or [<1-8>]")
if tpu_cores is not None and not _TPU_AVAILABLE:
raise MisconfigurationException("No TPU devices were found.")
return tpu_cores
def _normalize_parse_gpu_string_input(s: Union[int, str, List[int]]) -> Union[int, List[int]]:
if not isinstance(s, str):
return s
if s == "-1":
return -1
if "," in s:
return [int(x.strip()) for x in s.split(",") if len(x) > 0]
return int(s.strip())
def _sanitize_gpu_ids(gpus: List[int]) -> List[int]:
"""
Checks that each of the GPUs in the list is actually available.
Raises a MisconfigurationException if any of the GPUs is not available.
Args:
gpus: list of ints corresponding to GPU indices
Returns:
unmodified gpus variable
Raises:
MisconfigurationException:
If machine has fewer available GPUs than requested.
"""
all_available_gpus = _get_all_available_gpus()
for gpu in gpus:
if gpu not in all_available_gpus:
raise MisconfigurationException(
f"You requested GPUs: {gpus}\n But your machine only has: {all_available_gpus}"
)
return gpus
def _normalize_parse_gpu_input_to_list(gpus: Union[int, List[int], Tuple[int, ...]]) -> Optional[List[int]]:
assert gpus is not None
if isinstance(gpus, (MutableSequence, tuple)):
return list(gpus)
# must be an int
if not gpus: # gpus==0
return None
if gpus == -1:
return _get_all_available_gpus()
return list(range(gpus))
def _get_all_available_gpus() -> List[int]:
"""
Returns:
a list of all available gpus
"""
return list(range(torch.cuda.device_count()))
def _check_unique(device_ids: List[int]) -> None:
"""
Checks that the device_ids are unique.
Args:
device_ids: list of ints corresponding to gpus indices
Raises:
MisconfigurationException:
If ``device_ids`` of GPUs aren't unique
"""
if len(device_ids) != len(set(device_ids)):
raise MisconfigurationException("Device ID's (GPU) must be unique.")
def _check_data_type(device_ids: Any) -> None:
"""
Checks that the device_ids argument is one of: None, Int, String or List.
Raises a MisconfigurationException otherwise.
Args:
device_ids: gpus/tpu_cores parameter as passed to the Trainer
Raises:
MisconfigurationException:
If ``device_ids`` of GPU/TPUs aren't ``int``, ``str``, sequence of ``int`` or ``None``
"""
if device_ids is not None and (
not isinstance(device_ids, (int, str, MutableSequence, tuple)) or isinstance(device_ids, bool)
):
raise MisconfigurationException("Device ID's (GPU/TPU) must be int, string or sequence of ints or None.")
def _tpu_cores_valid(tpu_cores: Any) -> bool:
# allow 1 or 8 cores
if tpu_cores in (1, 8, None):
return True
# allow picking 1 of 8 indexes
if isinstance(tpu_cores, (list, tuple, set)):
has_1_tpu_idx = len(tpu_cores) == 1
is_valid_tpu_idx = 1 <= list(tpu_cores)[0] <= 8
is_valid_tpu_core_choice = has_1_tpu_idx and is_valid_tpu_idx
return is_valid_tpu_core_choice
return False
def _parse_tpu_cores_str(tpu_cores: str) -> Union[int, List[int]]:
if tpu_cores in ("1", "8"):
return int(tpu_cores)
return [int(x.strip()) for x in tpu_cores.split(",") if len(x) > 0]
|
|
# Copyright (c) 2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This module implementations the representation of final build information.
"""
from nr.stream import stream
import collections
import hashlib
import json
import nr.fs as path
import re
import sys
import warnings
import proplib from './proplib'
def split_filetags(tags, set_cls=frozenset):
if isinstance(tags, str):
tags = (x.strip() for x in tags.split(','))
return set_cls(tags)
class TaggedFile:
"""
Represents a file attached with zero or more tags.
This class interns all tag strings.
"""
def __init__(self, name, tags=()):
self._name = name
self._tags = set(sys.intern(x) for x in tags)
def __repr__(self):
return 'TaggedFile(name={!r}, tags={{{!r}}})'.format(self.name, ','.join(self.tags))
def has_tag(self, tag):
return tag in self._tags
def add_tags(self, tags):
self._tags |= set(sys.intern(x) for x in tags)
@property
def name(self):
return self._name
@property
def tags(self):
return set(self._tags)
def matches(self, tags):
tags = split_filetags(tags)
for tag in tags:
if not tag: continue
if tag[0] == '!':
result = tag[1:] not in self._tags
else:
result = tag in self._tags
if not result:
return False
return True
class FileSet:
"""
Represents a collection of #TaggedFile objects. Additionally, the #FileSet
may contain additional variables with lists of strings a values. These may
not be useful in all contexts.
"""
def __init__(self):
self._files = collections.OrderedDict()
def __repr__(self):
v = ('{!r}: {!r}'.format(k, v.tags) for k, v in self._files.items())
return 'FileSet({{{0}}})'.format(', '.join(v))
def __getitem__(self, name):
name = path.canonical(name)
return self._files[name.lower()]
def __delitem__(self, name):
name = path.canonical(name)
del self._files[name]
def __iter__(self):
return iter(self._files.values())
def name(self):
return self._name
def add(self, names, tags=()):
if isinstance(names, str):
names = [names]
result = []
for name in names:
name = path.canonical(name)
# We build the hash table using the case-insensitive canonical name.
name_lower = name.lower()
obj = self._files.get(name_lower)
if obj is None:
obj = TaggedFile(name, tags)
self._files[name_lower] = obj
else:
obj.add_tags(tags)
result.append(obj)
return result
def tagged(self, tags):
tags = split_filetags(tags)
for tf in self._files.values():
if tf.matches(tags):
yield tf.name
def to_json(self):
return {x.name: sorted(x.tags) for x in self._files.values()}
@classmethod
def from_json(cls, data):
obj = cls()
for key, tags in data.items():
obj.add(key, tags)
return obj
class ActionVariables:
"""
A container for variables that must be lists of strings. Used in an
action's build step together with a #FileSet.
"""
def __init__(self):
self._variables = collections.OrderedDict()
def __repr__(self):
v = ('{!r}: {!r}'.format(k, v) for k, v in self._variables.items())
return 'ActionVariables({{{}}})'.format(', '.join(v))
def __getitem__(self, key):
return self._variables[key]
def __setitem__(self, key, value):
if isinstance(value, str):
value = [value]
if not isinstance(value, (list, tuple)):
raise TypeError('expected str,list/tuple, got {}'.format(type(value).__name__))
if not all(isinstance(x, str) for x in value):
raise TypeError('expected item to be str')
self._variables[key] = list(value)
def __delitem__(self, key):
del self._variables[key]
def __contains__(self, key):
return key in self._variables
def get(self, key, default=None):
return self._variables.get(key, default)
def to_json(self):
return self._variables
@classmethod
def from_json(cls, data):
obj = cls()
obj._variables.update(data)
return obj
class BuildSet:
def __init__(self, name, files=None, vars=None):
self.name = name
self.files = files or FileSet()
self.vars = vars or ActionVariables()
def __repr__(self):
return 'BuildSet(name={!r}, files={!r}, vars={!r})'.format(
self.name, self.files, self.vars)
def to_json(self):
return {'name': self.name, 'files': self.files.to_json(), 'vars': self.vars.to_json()}
@classmethod
def from_json(cls, data):
return cls(data['name'], FileSet.from_json(data['files']),
ActionVariables.from_json(data['vars']))
def subst(self, cmd):
"""
Substitute all variables references in the list of strings *cmd* with the
files or variables in this buildset.
"""
expr = re.compile(r'^(.*)(?:\$\{([^\}]+)\}|\$(\w+))(.*)$')
result = []
for string in cmd:
match = expr.match(string)
if match:
px, sx = match.group(1), match.group(4)
tags = match.group(2) or match.group(3)
if '&' in tags:
msg = 'legacy tag syntax using `&` character in buildset {!r}: {!r}'
warnings.warn(msg.format(self.name, tags))
tags = set(tags.split('&'))
else:
tags = split_filetags(tags)
files = list(self.files.tagged(tags))
result += [px+x+sx for x in self.files.tagged(tags)]
if len(tags) == 1 and next(iter(tags)) in self.vars:
result += [px+x+sx for x in self.vars[next(iter(tags))]]
else:
result.append(string)
return result
class ActionSet:
"""
Represents a collection of #Action objects and a set of file tag strings.
"""
def __init__(self, tags):
self.tags = split_filetags(tags, set)
self.actions = set()
self._files = None
def __repr__(self):
return 'ActionSet(tags="{}", actions={!r})'.format(
','.join(self.tags), self.actions)
def __iter__(self):
return iter(self.actions)
def __or__(self, other):
if not isinstance(other, ActionSet):
raise NotImplementedError
res = ActionSet(self.tags | other.tags)
res.actions = self.actions | other.actions
return res
def __ior__(self, other):
if not isinstance(other, ActionSet):
raise NotImplementedError
getstate = lambda: (len(self.tags), len(self.actions))
state = getstate()
self.tags |= other.tags
self.actions |= other.actions
if state != getstate():
self.reset_cache()
return self
@property
def files(self):
"""
Returns a list of files that matched the tags in the #ActionSet. This
list of files is cached. This cache is only cleared when new actions or
tags are added to the set.
If you add tags or actions manually to the #ActionSet, make sure to call
the #reset_cache() method.
"""
if self._files is None:
self._files = list(stream.concat(
x.all_files_tagged(self.tags) for x in self.actions))
return self._files
def reset_cache(self):
"""
Resets the #files cache of the #ActionSet. This must be called when
manually modifying the ActionSet's #tags or #actions.
"""
self._files = None
def tag(self, add_tags):
"""
Add a tag to all the files matched by this #ActionSet.
"""
add_tags = split_filetags(add_tags)
for action in self.actions:
for build in action.builds:
for filename in build.files.tagged(self.tags):
build.files.add(filename, add_tags)
def add(self, action):
"""
Add an action to the set. This invalidates the #files cache.
"""
getstate = lambda: len(self.actions)
state = getstate()
self.actions.add(action)
if state != getstate():
self.reset_cache()
class Action:
"""
Represents an action that translates a set of input files to a set of
output files. Actions can be used to execute the *commands* multiple
times for different sets of input/output files. Every action needs at
least one set of input/output files.
# Variables
Every file in an action has a tag associated with it. Files can then be
accessed by filtering with these tags. To reference files that have a tag,
use the `$tag` or `${tag}` syntax inside the command list. Multiple tags
can be separated by `&` characters. Files need all tags specified in a
variable to match, eg. `${out&dll}` will be expanded to all files that are
tagged as `out` and `dll`.
Note that an argument in the command-list will be multiplied for every
file matching the variable, eg. an argument "-I${include}" may expand to
multiple arguments like `-Iinclude`, `-Ivendor/optional/include` if there
are multiple files with the tag `include`.
Note that only files tagged with `in` and `out` will be considered mandatory
input and output files for an action. Additionally, the tag `optional` may
be combined with any of the two tags to specify an optional input or output
file.
# Parameters
target (Target):
The #Target that this action is generated for. Note that in a session
where the build graph is loaded from a file and the build modules have
not been executed, this may be a proxy target with no valid properties.
name (str):
The name of the action inside the target.
commands (list of list of str):
A list of commands to execute in order. The strings in this list may
contain variables are described above.
deps (list of Action):
A list of actions that need to be executed before this action. This
relationship should also roughly be represented by the input and output
files of the actions.
cwd (str):
The directory to execute the action in.
environ (dict of (str, str)):
An environment dictionary that will be merged on top of the current
environment before running the commands in the action.
explicit (bool):
If #True, this action must be explicitly specified to be built or
required by another action to be run.
syncio (bool):
#True if the action needs to be run with the original stdin/stdout/stderr
attached.
deps_prefix (str):
A string that represents the prefix of for lines in the output of the
command(s) that represent additional dependencies to the action (eg.
headers in the case of C/C++). Can not be mixed with *depfile*.
depfile (str):
A filename that is produced by the command(s) which lists additional
dependencies of the action. The file must be formatted like a Makefile.
Can not be mixed with *deps_prefix*.
# Members
builds (list of BuildSet):
A list of files this action depends on or produces and variables. Both
are available for variable expansion in the *commands* strings.
"""
def __init__(self, target, name, deps, commands, cwd=None, environ=None,
explicit=False, syncio=False, deps_prefix=None, depfile=None):
assert isinstance(target, str)
deps = proplib.List[proplib.InstanceOf[Action]]().coerce('deps', deps)
if deps_prefix and depfile:
raise TypeError('deps_prefix and depfile parameters can not be mixed')
self.target = target
self.name = name
self.deps =deps
self.commands = commands
self.cwd = cwd
self.environ = environ
self.explicit = explicit
self.syncio = syncio
self.deps_prefix = deps_prefix
self.depfile = depfile
self.builds = []
def __repr__(self):
return 'Action({!r} with {} buildsets)'.format(
self.identifier(), len(self.builds))
def identifier(self):
return '{}:{}'.format(self.target, self.name)
def add_buildset(self, buildset=None, name=None):
if buildset is not None:
assert isinstance(buildset, BuildSet)
self.builds.append(buildset)
else:
buildset = BuildSet(name=name)
self.builds.append(buildset)
return buildset
def all_files_tagged(self, tags):
tags = split_filetags(tags)
files = []
for build in self.builds:
files += build.files.tagged(tags)
return files
def has_files_tagged(self, tags):
tags = split_filetags(tags)
for build in self.builds:
for file in build.files:
if file.matches(tags):
return True
return False
def to_json(self):
return {
'target': self.target,
'name': self.name,
'deps': [x.identifier() for x in self.deps],
'commands': self.commands,
'cwd': self.cwd,
'environ': self.environ,
'explicit': self.explicit,
'syncio': self.syncio,
'deps_prefix': self.deps_prefix,
'depfile': self.depfile,
'builds': [x.to_json() for x in self.builds]
}
@classmethod
def from_json(cls, data):
builds = data.pop('builds')
action = cls(**data)
action.builds = [BuildSet.from_json(x) for x in builds]
return action
class BuildGraph:
"""
This class represents the build graph that is built from #Action#s after
all targets have been handled.
"""
def __init__(self):
self._mtime = sys.maxsize
self._actions = {}
self._selected = set()
# This will be used during deserialization to produce fake #Module
# objects to associate the #Action#s with.
self._modules = {}
def __getitem__(self, action_name):
return self._actions[action_name]
def __iter__(self):
return iter(self._actions.keys())
def actions(self):
return self._actions.values()
def add_action(self, action):
self._actions[action.identifier()] = action
def add_actions(self, actions):
for action in actions:
self._actions[action.identifier()] = action
def resolve(self, action_name):
"""
Returns a list of actions matching the specified *action_name*. If a
target does not provide the specified action exactyly, the targets layers
are searched and a list of the actions is returned.
Never returns an empty list, instead raises a #ValueError if there was
no matching action(s).
"""
if action_name in self._actions:
return [self._actions[action_name]]
target, action = action_name.partition(':')[::2]
target += '/'
actions = [x for x in self._actions.values() if x.target.startswith(target)]
if action:
actions = [x for x in actions if x.name == action]
else:
actions = [x for x in actions if not x.explicit]
if not actions:
raise ValueError('no actions matching {!r}'.format(action_name))
return actions
def select(self, action_name):
self._selected |= set(x.identifier() for x in self.resolve(action_name))
def selected(self):
return (self[x] for x in self._selected)
def to_json(self):
root = {}
root['actions'] = {a.identifier(): a.to_json() for a in self._actions.values()}
return root
def from_json(self, root):
deps = {}
for action in root['actions'].values():
action_deps = action.pop('deps')
action['deps'] = []
action = Action.from_json(action)
self._actions[action.identifier()] = action
deps[action.identifier()] = action_deps
# Re-establish links between actions.
for action in self._actions.values():
for dep in deps[action.identifier()]:
action.deps.append(self._actions[dep])
def set_mtime(self, mtime):
self._mtime = mtime
def mtime(self):
return self._mtime
def hash(self, action):
hasher = hashlib.md5()
writer = type('Namespace', (object,), {})()
writer.write = lambda x: hasher.update(x.encode('utf8'))
json.dump(action.to_json(), writer, sort_keys=True)
return hasher.hexdigest()[:12]
class BuildBackend:
def __init__(self, context, args):
self.context = context
self.args = args
def export(self):
raise NotImplementedError
def clean(self, recursive):
raise NotImplementedError
def build(self, verbose):
raise NotImplementedError
|
|
#!/usr/bin/env python3
import sys
import shutil
import os
import fnmatch
import unittest
import argparse
import time
import threading
import signal
import psutil
import re
import multiprocessing
from multiprocessing import Process, Pipe, cpu_count
from multiprocessing.queues import Queue
from multiprocessing.managers import BaseManager
import framework
from framework import VppTestRunner, running_extended_tests, VppTestCase, \
get_testcase_doc_name, get_test_description, PASS, FAIL, ERROR, SKIP, \
TEST_RUN
from debug import spawn_gdb
from log import get_parallel_logger, double_line_delim, RED, YELLOW, GREEN, \
colorize, single_line_delim
from discover_tests import discover_tests
from subprocess import check_output, CalledProcessError
from util import check_core_path, get_core_path, is_core_present
# timeout which controls how long the child has to finish after seeing
# a core dump in test temporary directory. If this is exceeded, parent assumes
# that child process is stuck (e.g. waiting for shm mutex, which will never
# get unlocked) and kill the child
core_timeout = 3
min_req_shm = 536870912 # min 512MB shm required
# 128MB per extra process
shm_per_process = 134217728
class StreamQueue(Queue):
def write(self, msg):
self.put(msg)
def flush(self):
sys.__stdout__.flush()
sys.__stderr__.flush()
def fileno(self):
return self._writer.fileno()
class StreamQueueManager(BaseManager):
pass
StreamQueueManager.register('StreamQueue', StreamQueue)
class TestResult(dict):
def __init__(self, testcase_suite, testcases_by_id=None):
super(TestResult, self).__init__()
self[PASS] = []
self[FAIL] = []
self[ERROR] = []
self[SKIP] = []
self[TEST_RUN] = []
self.crashed = False
self.testcase_suite = testcase_suite
self.testcases = [testcase for testcase in testcase_suite]
self.testcases_by_id = testcases_by_id
def was_successful(self):
return 0 == len(self[FAIL]) == len(self[ERROR]) \
and len(self[PASS] + self[SKIP]) \
== self.testcase_suite.countTestCases() == len(self[TEST_RUN])
def no_tests_run(self):
return 0 == len(self[TEST_RUN])
def process_result(self, test_id, result):
self[result].append(test_id)
def suite_from_failed(self):
rerun_ids = set([])
for testcase in self.testcase_suite:
tc_id = testcase.id()
if tc_id not in self[PASS] and tc_id not in self[SKIP]:
rerun_ids.add(tc_id)
if rerun_ids:
return suite_from_failed(self.testcase_suite, rerun_ids)
def get_testcase_names(self, test_id):
# could be tearDownClass (test_ipsec_esp.TestIpsecEsp1)
setup_teardown_match = re.match(
r'((tearDownClass)|(setUpClass)) \((.+\..+)\)', test_id)
if setup_teardown_match:
test_name, _, _, testcase_name = setup_teardown_match.groups()
if len(testcase_name.split('.')) == 2:
for key in self.testcases_by_id.keys():
if key.startswith(testcase_name):
testcase_name = key
break
testcase_name = self._get_testcase_doc_name(testcase_name)
else:
test_name = self._get_test_description(test_id)
testcase_name = self._get_testcase_doc_name(test_id)
return testcase_name, test_name
def _get_test_description(self, test_id):
if test_id in self.testcases_by_id:
desc = get_test_description(descriptions,
self.testcases_by_id[test_id])
else:
desc = test_id
return desc
def _get_testcase_doc_name(self, test_id):
if test_id in self.testcases_by_id:
doc_name = get_testcase_doc_name(self.testcases_by_id[test_id])
else:
doc_name = test_id
return doc_name
def test_runner_wrapper(suite, keep_alive_pipe, stdouterr_queue,
finished_pipe, result_pipe, logger):
sys.stdout = stdouterr_queue
sys.stderr = stdouterr_queue
VppTestCase.parallel_handler = logger.handlers[0]
result = VppTestRunner(keep_alive_pipe=keep_alive_pipe,
descriptions=descriptions,
verbosity=verbose,
result_pipe=result_pipe,
failfast=failfast,
print_summary=False).run(suite)
finished_pipe.send(result.wasSuccessful())
finished_pipe.close()
keep_alive_pipe.close()
class TestCaseWrapper(object):
def __init__(self, testcase_suite, manager):
self.keep_alive_parent_end, self.keep_alive_child_end = Pipe(
duplex=False)
self.finished_parent_end, self.finished_child_end = Pipe(duplex=False)
self.result_parent_end, self.result_child_end = Pipe(duplex=False)
self.testcase_suite = testcase_suite
if sys.version[0] == '2':
self.stdouterr_queue = manager.StreamQueue()
else:
from multiprocessing import get_context
self.stdouterr_queue = manager.StreamQueue(ctx=get_context())
self.logger = get_parallel_logger(self.stdouterr_queue)
self.child = Process(target=test_runner_wrapper,
args=(testcase_suite,
self.keep_alive_child_end,
self.stdouterr_queue,
self.finished_child_end,
self.result_child_end,
self.logger)
)
self.child.start()
self.last_test_temp_dir = None
self.last_test_vpp_binary = None
self._last_test = None
self.last_test_id = None
self.vpp_pid = None
self.last_heard = time.time()
self.core_detected_at = None
self.testcases_by_id = {}
self.testclasess_with_core = {}
for testcase in self.testcase_suite:
self.testcases_by_id[testcase.id()] = testcase
self.result = TestResult(testcase_suite, self.testcases_by_id)
@property
def last_test(self):
return self._last_test
@last_test.setter
def last_test(self, test_id):
self.last_test_id = test_id
if test_id in self.testcases_by_id:
testcase = self.testcases_by_id[test_id]
self._last_test = testcase.shortDescription()
if not self._last_test:
self._last_test = str(testcase)
else:
self._last_test = test_id
def add_testclass_with_core(self):
if self.last_test_id in self.testcases_by_id:
test = self.testcases_by_id[self.last_test_id]
class_name = unittest.util.strclass(test.__class__)
test_name = "'{}' ({})".format(get_test_description(descriptions,
test),
self.last_test_id)
else:
test_name = self.last_test_id
class_name = re.match(r'((tearDownClass)|(setUpClass)) '
r'\((.+\..+)\)', test_name).groups()[3]
if class_name not in self.testclasess_with_core:
self.testclasess_with_core[class_name] = (
test_name,
self.last_test_vpp_binary,
self.last_test_temp_dir)
def close_pipes(self):
self.keep_alive_child_end.close()
self.finished_child_end.close()
self.result_child_end.close()
self.keep_alive_parent_end.close()
self.finished_parent_end.close()
self.result_parent_end.close()
def was_successful(self):
return self.result.was_successful()
def stdouterr_reader_wrapper(unread_testcases, finished_unread_testcases,
read_testcases):
read_testcase = None
while read_testcases.is_set() or unread_testcases:
if finished_unread_testcases:
read_testcase = finished_unread_testcases.pop()
unread_testcases.remove(read_testcase)
elif unread_testcases:
read_testcase = unread_testcases.pop()
if read_testcase:
data = ''
while data is not None:
sys.stdout.write(data)
data = read_testcase.stdouterr_queue.get()
read_testcase.stdouterr_queue.close()
finished_unread_testcases.discard(read_testcase)
read_testcase = None
def handle_failed_suite(logger, last_test_temp_dir, vpp_pid):
if last_test_temp_dir:
# Need to create link in case of a timeout or core dump without failure
lttd = os.path.basename(last_test_temp_dir)
failed_dir = os.getenv('FAILED_DIR')
link_path = '%s%s-FAILED' % (failed_dir, lttd)
if not os.path.exists(link_path):
os.symlink(last_test_temp_dir, link_path)
logger.error("Symlink to failed testcase directory: %s -> %s"
% (link_path, lttd))
# Report core existence
core_path = get_core_path(last_test_temp_dir)
if os.path.exists(core_path):
logger.error(
"Core-file exists in test temporary directory: %s!" %
core_path)
check_core_path(logger, core_path)
logger.debug("Running 'file %s':" % core_path)
try:
info = check_output(["file", core_path])
logger.debug(info)
except CalledProcessError as e:
logger.error("Subprocess returned with return code "
"while running `file' utility on core-file "
"returned: "
"rc=%s", e.returncode)
except OSError as e:
logger.error("Subprocess returned with OS error while "
"running 'file' utility "
"on core-file: "
"(%s) %s", e.errno, e.strerror)
except Exception as e:
logger.exception("Unexpected error running `file' utility "
"on core-file")
if vpp_pid:
# Copy api post mortem
api_post_mortem_path = "/tmp/api_post_mortem.%d" % vpp_pid
if os.path.isfile(api_post_mortem_path):
logger.error("Copying api_post_mortem.%d to %s" %
(vpp_pid, last_test_temp_dir))
shutil.copy2(api_post_mortem_path, last_test_temp_dir)
def check_and_handle_core(vpp_binary, tempdir, core_crash_test):
if is_core_present(tempdir):
if debug_core:
print('VPP core detected in %s. Last test running was %s' %
(tempdir, core_crash_test))
print(single_line_delim)
spawn_gdb(vpp_binary, get_core_path(tempdir))
print(single_line_delim)
elif compress_core:
print("Compressing core-file in test directory `%s'" % tempdir)
os.system("gzip %s" % get_core_path(tempdir))
def handle_cores(failed_testcases):
for failed_testcase in failed_testcases:
tcs_with_core = failed_testcase.testclasess_with_core
if tcs_with_core:
for test, vpp_binary, tempdir in tcs_with_core.values():
check_and_handle_core(vpp_binary, tempdir, test)
def process_finished_testsuite(wrapped_testcase_suite,
finished_testcase_suites,
failed_wrapped_testcases,
results):
results.append(wrapped_testcase_suite.result)
finished_testcase_suites.add(wrapped_testcase_suite)
stop_run = False
if failfast and not wrapped_testcase_suite.was_successful():
stop_run = True
if not wrapped_testcase_suite.was_successful():
failed_wrapped_testcases.add(wrapped_testcase_suite)
handle_failed_suite(wrapped_testcase_suite.logger,
wrapped_testcase_suite.last_test_temp_dir,
wrapped_testcase_suite.vpp_pid)
return stop_run
def run_forked(testcase_suites):
wrapped_testcase_suites = set()
# suites are unhashable, need to use list
results = []
unread_testcases = set()
finished_unread_testcases = set()
manager = StreamQueueManager()
manager.start()
for i in range(concurrent_tests):
if testcase_suites:
wrapped_testcase_suite = TestCaseWrapper(testcase_suites.pop(0),
manager)
wrapped_testcase_suites.add(wrapped_testcase_suite)
unread_testcases.add(wrapped_testcase_suite)
else:
break
read_from_testcases = threading.Event()
read_from_testcases.set()
stdouterr_thread = threading.Thread(target=stdouterr_reader_wrapper,
args=(unread_testcases,
finished_unread_testcases,
read_from_testcases))
stdouterr_thread.start()
failed_wrapped_testcases = set()
stop_run = False
try:
while wrapped_testcase_suites:
finished_testcase_suites = set()
for wrapped_testcase_suite in wrapped_testcase_suites:
while wrapped_testcase_suite.result_parent_end.poll():
wrapped_testcase_suite.result.process_result(
*wrapped_testcase_suite.result_parent_end.recv())
wrapped_testcase_suite.last_heard = time.time()
while wrapped_testcase_suite.keep_alive_parent_end.poll():
wrapped_testcase_suite.last_test, \
wrapped_testcase_suite.last_test_vpp_binary, \
wrapped_testcase_suite.last_test_temp_dir, \
wrapped_testcase_suite.vpp_pid = \
wrapped_testcase_suite.keep_alive_parent_end.recv()
wrapped_testcase_suite.last_heard = time.time()
if wrapped_testcase_suite.finished_parent_end.poll():
wrapped_testcase_suite.finished_parent_end.recv()
wrapped_testcase_suite.last_heard = time.time()
stop_run = process_finished_testsuite(
wrapped_testcase_suite,
finished_testcase_suites,
failed_wrapped_testcases,
results) or stop_run
continue
fail = False
if wrapped_testcase_suite.last_heard + test_timeout < \
time.time():
fail = True
wrapped_testcase_suite.logger.critical(
"Child test runner process timed out "
"(last test running was `%s' in `%s')!" %
(wrapped_testcase_suite.last_test,
wrapped_testcase_suite.last_test_temp_dir))
elif not wrapped_testcase_suite.child.is_alive():
fail = True
wrapped_testcase_suite.logger.critical(
"Child test runner process unexpectedly died "
"(last test running was `%s' in `%s')!" %
(wrapped_testcase_suite.last_test,
wrapped_testcase_suite.last_test_temp_dir))
elif wrapped_testcase_suite.last_test_temp_dir and \
wrapped_testcase_suite.last_test_vpp_binary:
if is_core_present(
wrapped_testcase_suite.last_test_temp_dir):
wrapped_testcase_suite.add_testclass_with_core()
if wrapped_testcase_suite.core_detected_at is None:
wrapped_testcase_suite.core_detected_at = \
time.time()
elif wrapped_testcase_suite.core_detected_at + \
core_timeout < time.time():
wrapped_testcase_suite.logger.critical(
"Child test runner process unresponsive and "
"core-file exists in test temporary directory "
"(last test running was `%s' in `%s')!" %
(wrapped_testcase_suite.last_test,
wrapped_testcase_suite.last_test_temp_dir))
fail = True
if fail:
wrapped_testcase_suite.child.terminate()
try:
# terminating the child process tends to leave orphan
# VPP process around
if wrapped_testcase_suite.vpp_pid:
os.kill(wrapped_testcase_suite.vpp_pid,
signal.SIGTERM)
except OSError:
# already dead
pass
wrapped_testcase_suite.result.crashed = True
wrapped_testcase_suite.result.process_result(
wrapped_testcase_suite.last_test_id, ERROR)
stop_run = process_finished_testsuite(
wrapped_testcase_suite,
finished_testcase_suites,
failed_wrapped_testcases,
results) or stop_run
for finished_testcase in finished_testcase_suites:
# Somewhat surprisingly, the join below may
# timeout, even if client signaled that
# it finished - so we note it just in case.
join_start = time.time()
finished_testcase.child.join(test_finished_join_timeout)
join_end = time.time()
if join_end - join_start >= test_finished_join_timeout:
finished_testcase.logger.error(
"Timeout joining finished test: %s (pid %d)" %
(finished_testcase.last_test,
finished_testcase.child.pid))
finished_testcase.close_pipes()
wrapped_testcase_suites.remove(finished_testcase)
finished_unread_testcases.add(finished_testcase)
finished_testcase.stdouterr_queue.put(None)
if stop_run:
while testcase_suites:
results.append(TestResult(testcase_suites.pop(0)))
elif testcase_suites:
new_testcase = TestCaseWrapper(testcase_suites.pop(0),
manager)
wrapped_testcase_suites.add(new_testcase)
unread_testcases.add(new_testcase)
time.sleep(0.1)
except Exception:
for wrapped_testcase_suite in wrapped_testcase_suites:
wrapped_testcase_suite.child.terminate()
wrapped_testcase_suite.stdouterr_queue.put(None)
raise
finally:
read_from_testcases.clear()
stdouterr_thread.join(test_timeout)
manager.shutdown()
handle_cores(failed_wrapped_testcases)
return results
class SplitToSuitesCallback:
def __init__(self, filter_callback):
self.suites = {}
self.suite_name = 'default'
self.filter_callback = filter_callback
self.filtered = unittest.TestSuite()
def __call__(self, file_name, cls, method):
test_method = cls(method)
if self.filter_callback(file_name, cls.__name__, method):
self.suite_name = file_name + cls.__name__
if self.suite_name not in self.suites:
self.suites[self.suite_name] = unittest.TestSuite()
self.suites[self.suite_name].addTest(test_method)
else:
self.filtered.addTest(test_method)
test_option = "TEST"
def parse_test_option():
f = os.getenv(test_option, None)
filter_file_name = None
filter_class_name = None
filter_func_name = None
if f:
if '.' in f:
parts = f.split('.')
if len(parts) > 3:
raise Exception("Unrecognized %s option: %s" %
(test_option, f))
if len(parts) > 2:
if parts[2] not in ('*', ''):
filter_func_name = parts[2]
if parts[1] not in ('*', ''):
filter_class_name = parts[1]
if parts[0] not in ('*', ''):
if parts[0].startswith('test_'):
filter_file_name = parts[0]
else:
filter_file_name = 'test_%s' % parts[0]
else:
if f.startswith('test_'):
filter_file_name = f
else:
filter_file_name = 'test_%s' % f
if filter_file_name:
filter_file_name = '%s.py' % filter_file_name
return filter_file_name, filter_class_name, filter_func_name
def filter_tests(tests, filter_cb):
result = unittest.suite.TestSuite()
for t in tests:
if isinstance(t, unittest.suite.TestSuite):
# this is a bunch of tests, recursively filter...
x = filter_tests(t, filter_cb)
if x.countTestCases() > 0:
result.addTest(x)
elif isinstance(t, unittest.TestCase):
# this is a single test
parts = t.id().split('.')
# t.id() for common cases like this:
# test_classifier.TestClassifier.test_acl_ip
# apply filtering only if it is so
if len(parts) == 3:
if not filter_cb(parts[0], parts[1], parts[2]):
continue
result.addTest(t)
else:
# unexpected object, don't touch it
result.addTest(t)
return result
class FilterByTestOption:
def __init__(self, filter_file_name, filter_class_name, filter_func_name):
self.filter_file_name = filter_file_name
self.filter_class_name = filter_class_name
self.filter_func_name = filter_func_name
def __call__(self, file_name, class_name, func_name):
if self.filter_file_name:
fn_match = fnmatch.fnmatch(file_name, self.filter_file_name)
if not fn_match:
return False
if self.filter_class_name and class_name != self.filter_class_name:
return False
if self.filter_func_name and func_name != self.filter_func_name:
return False
return True
class FilterByClassList:
def __init__(self, classes_with_filenames):
self.classes_with_filenames = classes_with_filenames
def __call__(self, file_name, class_name, func_name):
return '.'.join([file_name, class_name]) in self.classes_with_filenames
def suite_from_failed(suite, failed):
failed = {x.rsplit('.', 1)[0] for x in failed}
filter_cb = FilterByClassList(failed)
suite = filter_tests(suite, filter_cb)
return suite
class AllResults(dict):
def __init__(self):
super(AllResults, self).__init__()
self.all_testcases = 0
self.results_per_suite = []
self[PASS] = 0
self[FAIL] = 0
self[ERROR] = 0
self[SKIP] = 0
self[TEST_RUN] = 0
self.rerun = []
self.testsuites_no_tests_run = []
def add_results(self, result):
self.results_per_suite.append(result)
result_types = [PASS, FAIL, ERROR, SKIP, TEST_RUN]
for result_type in result_types:
self[result_type] += len(result[result_type])
def add_result(self, result):
retval = 0
self.all_testcases += result.testcase_suite.countTestCases()
self.add_results(result)
if result.no_tests_run():
self.testsuites_no_tests_run.append(result.testcase_suite)
if result.crashed:
retval = -1
else:
retval = 1
elif not result.was_successful():
retval = 1
if retval != 0:
self.rerun.append(result.testcase_suite)
return retval
def print_results(self):
print('')
print(double_line_delim)
print('TEST RESULTS:')
print(' Scheduled tests: {}'.format(self.all_testcases))
print(' Executed tests: {}'.format(self[TEST_RUN]))
print(' Passed tests: {}'.format(
colorize(str(self[PASS]), GREEN)))
if self[SKIP] > 0:
print(' Skipped tests: {}'.format(
colorize(str(self[SKIP]), YELLOW)))
if self.not_executed > 0:
print(' Not Executed tests: {}'.format(
colorize(str(self.not_executed), RED)))
if self[FAIL] > 0:
print(' Failures: {}'.format(
colorize(str(self[FAIL]), RED)))
if self[ERROR] > 0:
print(' Errors: {}'.format(
colorize(str(self[ERROR]), RED)))
if self.all_failed > 0:
print('FAILURES AND ERRORS IN TESTS:')
for result in self.results_per_suite:
failed_testcase_ids = result[FAIL]
errored_testcase_ids = result[ERROR]
old_testcase_name = None
if failed_testcase_ids:
for failed_test_id in failed_testcase_ids:
new_testcase_name, test_name = \
result.get_testcase_names(failed_test_id)
if new_testcase_name != old_testcase_name:
print(' Testcase name: {}'.format(
colorize(new_testcase_name, RED)))
old_testcase_name = new_testcase_name
print(' FAILURE: {} [{}]'.format(
colorize(test_name, RED), failed_test_id))
if errored_testcase_ids:
for errored_test_id in errored_testcase_ids:
new_testcase_name, test_name = \
result.get_testcase_names(errored_test_id)
if new_testcase_name != old_testcase_name:
print(' Testcase name: {}'.format(
colorize(new_testcase_name, RED)))
old_testcase_name = new_testcase_name
print(' ERROR: {} [{}]'.format(
colorize(test_name, RED), errored_test_id))
if self.testsuites_no_tests_run:
print('TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:')
tc_classes = set()
for testsuite in self.testsuites_no_tests_run:
for testcase in testsuite:
tc_classes.add(get_testcase_doc_name(testcase))
for tc_class in tc_classes:
print(' {}'.format(colorize(tc_class, RED)))
print(double_line_delim)
print('')
@property
def not_executed(self):
return self.all_testcases - self[TEST_RUN]
@property
def all_failed(self):
return self[FAIL] + self[ERROR]
def parse_results(results):
"""
Prints the number of scheduled, executed, not executed, passed, failed,
errored and skipped tests and details about failed and errored tests.
Also returns all suites where any test failed.
:param results:
:return:
"""
results_per_suite = AllResults()
crashed = False
failed = False
for result in results:
result_code = results_per_suite.add_result(result)
if result_code == 1:
failed = True
elif result_code == -1:
crashed = True
results_per_suite.print_results()
if crashed:
return_code = -1
elif failed:
return_code = 1
else:
return_code = 0
return return_code, results_per_suite.rerun
def parse_digit_env(env_var, default):
value = os.getenv(env_var, default)
if value != default:
if value.isdigit():
value = int(value)
else:
print('WARNING: unsupported value "%s" for env var "%s",'
'defaulting to %s' % (value, env_var, default))
value = default
return value
if __name__ == '__main__':
verbose = parse_digit_env("V", 0)
test_timeout = parse_digit_env("TIMEOUT", 600) # default = 10 minutes
test_finished_join_timeout = 15
retries = parse_digit_env("RETRIES", 0)
debug = os.getenv("DEBUG", "n").lower() in ["gdb", "gdbserver"]
debug_core = os.getenv("DEBUG", "").lower() == "core"
compress_core = framework.BoolEnvironmentVariable("CORE_COMPRESS")
step = framework.BoolEnvironmentVariable("STEP")
force_foreground = framework.BoolEnvironmentVariable("FORCE_FOREGROUND")
run_interactive = debug or step or force_foreground
try:
num_cpus = len(os.sched_getaffinity(0))
except AttributeError:
num_cpus = multiprocessing.cpu_count()
shm_free = psutil.disk_usage('/dev/shm').free
print('OS reports %s available cpu(s). Free shm: %s' % (
num_cpus, "{:,}MB".format(shm_free / (1024 * 1024))))
test_jobs = os.getenv("TEST_JOBS", "1").lower() # default = 1 process
if test_jobs == 'auto':
if run_interactive:
concurrent_tests = 1
print('Interactive mode required, running on one core')
else:
shm_max_processes = 1
if shm_free < min_req_shm:
raise Exception('Not enough free space in /dev/shm. Required '
'free space is at least %sM.'
% (min_req_shm >> 20))
else:
extra_shm = shm_free - min_req_shm
shm_max_processes += extra_shm // shm_per_process
concurrent_tests = min(cpu_count(), shm_max_processes)
print('Found enough resources to run tests with %s cores'
% concurrent_tests)
elif test_jobs.isdigit():
concurrent_tests = int(test_jobs)
print("Running on %s core(s) as set by 'TEST_JOBS'." %
concurrent_tests)
else:
concurrent_tests = 1
print('Running on one core.')
if run_interactive and concurrent_tests > 1:
raise NotImplementedError(
'Running tests interactively (DEBUG is gdb or gdbserver or STEP '
'is set) in parallel (TEST_JOBS is more than 1) is not supported')
parser = argparse.ArgumentParser(description="VPP unit tests")
parser.add_argument("-f", "--failfast", action='store_true',
help="fast failure flag")
parser.add_argument("-d", "--dir", action='append', type=str,
help="directory containing test files "
"(may be specified multiple times)")
args = parser.parse_args()
failfast = args.failfast
descriptions = True
print("Running tests using custom test runner") # debug message
filter_file, filter_class, filter_func = parse_test_option()
print("Active filters: file=%s, class=%s, function=%s" % (
filter_file, filter_class, filter_func))
filter_cb = FilterByTestOption(filter_file, filter_class, filter_func)
ignore_path = os.getenv("VENV_PATH", None)
cb = SplitToSuitesCallback(filter_cb)
for d in args.dir:
print("Adding tests from directory tree %s" % d)
discover_tests(d, cb, ignore_path)
# suites are not hashable, need to use list
suites = []
tests_amount = 0
for testcase_suite in cb.suites.values():
tests_amount += testcase_suite.countTestCases()
suites.append(testcase_suite)
print("%s out of %s tests match specified filters" % (
tests_amount, tests_amount + cb.filtered.countTestCases()))
if not running_extended_tests:
print("Not running extended tests (some tests will be skipped)")
attempts = retries + 1
if attempts > 1:
print("Perform %s attempts to pass the suite..." % attempts)
if run_interactive and suites:
# don't fork if requiring interactive terminal
print('Running tests in foreground in the current process')
full_suite = unittest.TestSuite()
full_suite.addTests(suites)
result = VppTestRunner(verbosity=verbose,
failfast=failfast,
print_summary=True).run(full_suite)
was_successful = result.wasSuccessful()
if not was_successful:
for test_case_info in result.failed_test_cases_info:
handle_failed_suite(test_case_info.logger,
test_case_info.tempdir,
test_case_info.vpp_pid)
if test_case_info in result.core_crash_test_cases_info:
check_and_handle_core(test_case_info.vpp_bin_path,
test_case_info.tempdir,
test_case_info.core_crash_test)
sys.exit(not was_successful)
else:
print('Running each VPPTestCase in a separate background process'
' with {} parallel process(es)'.format(concurrent_tests))
exit_code = 0
while suites and attempts > 0:
results = run_forked(suites)
exit_code, suites = parse_results(results)
attempts -= 1
if exit_code == 0:
print('Test run was successful')
else:
print('%s attempt(s) left.' % attempts)
sys.exit(exit_code)
|
|
import hashlib
import os
import posixpath
import re
from urllib import unquote
from urlparse import urlsplit, urlunsplit, urldefrag
from django.conf import settings
from django.core.cache import (get_cache, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode, smart_str
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class CachedFilesMixin(object):
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
r"""(@import\s*["']\s*(.*?)["'])""",
)),
)
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.cache = get_cache('staticfiles')
except InvalidCacheBackendError:
# Use the default backend
self.cache = default_cache
self._patterns = SortedDict()
for extension, patterns in self.patterns:
for pattern in patterns:
compiled = re.compile(pattern)
self._patterns.setdefault(extension, []).append(compiled)
def file_hash(self, name, content=None):
"""
Retuns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
file_hash = self.file_hash(clean_name, content)
if file_hash is not None:
file_hash = u".%s" % file_hash
hashed_name = os.path.join(path, u"%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def cache_key(self, name):
return u'staticfiles:%s' % hashlib.md5(smart_str(name)).hexdigest()
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
cache_key = self.cache_key(name)
hashed_name = self.cache.get(cache_key)
if hashed_name is None:
hashed_name = self.hashed_name(clean_name).replace('\\', '/')
# set the cache if there was a miss
# (e.g. if cache server goes down)
self.cache.set(cache_key, hashed_name)
final_url = super(CachedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name):
"""
Returns the custom URL converter for the given file name.
"""
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return 'url("%s")' % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given list of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read()
converter = self.url_converter(name)
for patterns in self._patterns.values():
for pattern in patterns:
content = pattern.sub(converter, content)
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(smart_str(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_unicode(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_unicode(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
mod = import_module(app)
mod_path = os.path.dirname(mod.__file__)
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from taggit.managers import TaggableManager
from opps.core.models import Publishable, BaseBox, BaseConfig
class Infographic(Publishable):
TYPES = (
("gallery", _(u"Photo Gallery")),
("css", _(u"Custom CSS")),
("timeline", _(u"Timeline")),
)
title = models.CharField(_(u"Title"), max_length=255)
slug = models.SlugField(
_(u"URL"),
max_length=150,
unique=True,
db_index=True
)
headline = models.TextField(_(u"Headline"), blank=True, null=True)
description = models.TextField(
_(u"Description"),
blank=True,
null=True,
help_text=_(u'Main description, also used by timeline type')
)
channel = models.ForeignKey(
'channels.Channel',
null=True,
blank=True,
on_delete=models.SET_NULL
)
posts = models.ManyToManyField(
'articles.Post',
null=True,
blank=True,
related_name='infographic_post',
through='InfographicPost'
)
top_image = models.ForeignKey(
'images.Image',
verbose_name=_(u'Infographic Top Image'), blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='infographic_topimage'
)
main_image = models.ForeignKey(
'images.Image',
verbose_name=_(u'Infographic Image'),
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='infographic_image'
)
order = models.IntegerField(_(u"Order"), default=0)
tags = TaggableManager(blank=True)
type = models.CharField(
_(u"Infographic type"),
max_length=20,
choices=TYPES,
default="gallery"
)
items = models.ManyToManyField(
'infographics.InfographicItem',
null=True, blank=True,
related_name='infographic_item',
through='InfographicInfographicItem'
)
# css
css_path = models.CharField(
_(u"Custom css path"),
max_length=255,
null=True,
blank=True,
help_text=_(u'/static/css/file.css or http://domain.com/file.css')
)
# js_filepath
js_path = models.CharField(
_(u"Custom Java Script path"),
max_length=255,
null=True,
blank=True,
help_text=_(u'allowed only in the same domain')
)
# Timeline
timeline = models.ForeignKey(
'timelinejs.Timeline',
verbose_name=_(u'Timeline'),
null=True,
blank=True,
related_name='infographic_timeline',
on_delete=models.SET_NULL,
help_text=_(u'Set this and provide JSON, DOC or Events')
)
def __unicode__(self):
return self.title
class Meta:
ordering = ['order']
class InfographicInfographicItem(models.Model):
item = models.ForeignKey(
'infographics.InfographicItem',
verbose_name=_(u'Infographic Item'),
null=True,
blank=True,
related_name='infographicitem_item',
on_delete=models.SET_NULL
)
infographic = models.ForeignKey(
'infographics.Infographic',
verbose_name=_(u'Infographic'),
null=True,
blank=True,
related_name='infographicitem_infographic',
on_delete=models.SET_NULL
)
def __unicode__(self):
return u"{0}-{1}".format(self.infographic.slug, self.item.title)
class InfographicPost(models.Model):
post = models.ForeignKey(
'articles.Post',
verbose_name=_(u'Infographic Post'),
null=True,
blank=True,
related_name='infographicpost_post',
on_delete=models.SET_NULL
)
infographic = models.ForeignKey(
'infographics.Infographic',
verbose_name=_(u'Infographic'),
null=True,
blank=True,
related_name='infographicpost_infographic',
on_delete=models.SET_NULL
)
def __unicode__(self):
return u"{0}-{1}".format(self.infographic.slug, self.post.slug)
class InfographicItem(models.Model):
title = models.CharField(_(u"Title"), max_length=255)
slug = models.SlugField(
_(u"URL"),
max_length=150,
db_index=True
)
description = models.TextField(_(u"Description"), null=True, blank=True)
# optional for gallery and css
group = models.CharField(
_(u"Group"),
max_length=255,
blank=True, null=True,
help_text=_(u'To group menu items or to store custom attributes')
)
image = models.ForeignKey(
'images.Image',
verbose_name=_(u'Infographic Item Image'),
blank=True,
null=True,
help_text=_(u'Image'),
on_delete=models.SET_NULL,
)
# gallery
album = models.ForeignKey(
'articles.Album',
null=True, blank=True,
on_delete=models.SET_NULL,
related_name='infographicitem_album',
verbose_name=_(u'Album'),
)
timeline = models.ForeignKey(
'timelinejs.Timeline',
verbose_name=_(u'Timeline'),
null=True,
blank=True,
related_name='infographicitem_timeline',
on_delete=models.SET_NULL,
help_text=_(u'Set this and provide JSON, DOC or Items')
)
order = models.IntegerField(_(u"Order"), default=0)
def belongs(self):
if not self.infographicitem_item.exists():
return _(u"No infographic")
return ", ".join(item.infographic.title for item in self.infographicitem_item.all())
__unicode__ = lambda self: self.title
class InfographicBox(BaseBox):
infographics = models.ManyToManyField(
'infographics.Infographic',
null=True, blank=True,
related_name='infographicbox_infographics',
through='infographics.InfographicBoxInfographics'
)
class InfographicBoxInfographics(models.Model):
infographicbox = models.ForeignKey(
'infographics.InfographicBox',
null=True, blank=True,
on_delete=models.SET_NULL,
related_name='infographicboxinfographics_infographicboxes',
verbose_name=_(u'Infographic Box'),
)
infographic = models.ForeignKey(
'infographics.Infographic',
null=True, blank=True,
on_delete=models.SET_NULL,
related_name='infographicboxinfographics_infographics',
verbose_name=_(u'Infographic'),
)
order = models.PositiveIntegerField(_(u'Order'), default=0)
def __unicode__(self):
return u"{0}-{1}".format(self.infographicbox.slug, self.infographic.slug)
def clean(self):
if not self.infographic.published:
raise ValidationError(_(u'Infographic not published!'))
if not self.infographic.date_available <= timezone.now():
raise ValidationError(_(u'Infographic date_available '
u'is greater than today!'))
class InfographicConfig(BaseConfig):
infographic = models.ForeignKey(
'infographics.Infographic',
null=True, blank=True,
on_delete=models.SET_NULL,
related_name='infographicconfig_infographics',
verbose_name=_(u'Infographic'),
)
class Meta:
permissions = (("developer", "Developer"),)
unique_together = (
"key_group", "key", "site",
"channel", "article", "infographic"
)
|
|
# Copyright 2014 Antoine "hashar" Musso
# Copyright 2014 Wikimedia Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import git
import logging
import os
import re
import yaml
import six
from git import GitCommandError
from zuul import exceptions
from zuul.lib.clonemapper import CloneMapper
from zuul.merger.merger import Repo
class Cloner(object):
log = logging.getLogger("zuul.Cloner")
def __init__(self, git_base_url, projects, workspace, zuul_branch,
zuul_ref, zuul_url, branch=None, clone_map_file=None,
project_branches=None, cache_dir=None, zuul_newrev=None,
zuul_project=None):
self.clone_map = []
self.dests = None
self.branch = branch
self.git_url = git_base_url
self.cache_dir = cache_dir
self.projects = projects
self.workspace = workspace
self.zuul_branch = zuul_branch or ''
self.zuul_ref = zuul_ref or ''
self.zuul_url = zuul_url
self.project_branches = project_branches or {}
self.project_revisions = {}
if zuul_newrev and zuul_project:
self.project_revisions[zuul_project] = zuul_newrev
if clone_map_file:
self.readCloneMap(clone_map_file)
def readCloneMap(self, clone_map_file):
clone_map_file = os.path.expanduser(clone_map_file)
if not os.path.exists(clone_map_file):
raise Exception("Unable to read clone map file at %s." %
clone_map_file)
clone_map_file = open(clone_map_file)
self.clone_map = yaml.load(clone_map_file).get('clonemap')
self.log.info("Loaded map containing %s rules", len(self.clone_map))
return self.clone_map
def execute(self):
mapper = CloneMapper(self.clone_map, self.projects)
dests = mapper.expand(workspace=self.workspace)
self.log.info("Preparing %s repositories", len(dests))
for project, dest in six.iteritems(dests):
self.prepareRepo(project, dest)
self.log.info("Prepared all repositories")
def cloneUpstream(self, project, dest):
# Check for a cached git repo first
git_cache = '%s/%s' % (self.cache_dir, project)
git_upstream = '%s/%s' % (self.git_url, project)
repo_is_cloned = os.path.exists(os.path.join(dest, '.git'))
if (self.cache_dir and
os.path.exists(git_cache) and
not repo_is_cloned):
# file:// tells git not to hard-link across repos
git_cache = 'file://%s' % git_cache
self.log.info("Creating repo %s from cache %s",
project, git_cache)
new_repo = git.Repo.clone_from(git_cache, dest)
self.log.info("Updating origin remote in repo %s to %s",
project, git_upstream)
new_repo.remotes.origin.config_writer.set('url', git_upstream)
else:
self.log.info("Creating repo %s from upstream %s",
project, git_upstream)
repo = Repo(
remote=git_upstream,
local=dest,
email=None,
username=None)
if not repo.isInitialized():
raise Exception("Error cloning %s to %s" % (git_upstream, dest))
return repo
def fetchFromZuul(self, repo, project, ref):
zuul_remote = '%s/%s' % (self.zuul_url, project)
try:
repo.fetchFrom(zuul_remote, ref)
self.log.debug("Fetched ref %s from %s", ref, project)
return True
except ValueError:
self.log.debug("Project %s in Zuul does not have ref %s",
project, ref)
return False
except GitCommandError as error:
# Bail out if fetch fails due to infrastructure reasons
if error.stderr.startswith('fatal: unable to access'):
raise
self.log.debug("Project %s in Zuul does not have ref %s",
project, ref)
return False
def prepareRepo(self, project, dest):
"""Clone a repository for project at dest and apply a reference
suitable for testing. The reference lookup is attempted in this order:
1) The indicated revision for specific project
2) Zuul reference for the indicated branch
3) Zuul reference for the master branch
4) The tip of the indicated branch
5) The tip of the master branch
If an "indicated revision" is specified for this project, and we are
unable to meet this requirement, we stop attempting to check this
repo out and raise a zuul.exceptions.RevNotFound exception.
The "indicated branch" is one of the following:
A) The project-specific override branch (from project_branches arg)
B) The user specified branch (from the branch arg)
C) ZUUL_BRANCH (from the zuul_branch arg)
"""
repo = self.cloneUpstream(project, dest)
# Ensure that we don't have stale remotes around
repo.prune()
# We must reset after pruning because reseting sets HEAD to point
# at refs/remotes/origin/master, but `git branch` which prune runs
# explodes if HEAD does not point at something in refs/heads.
# Later with repo.checkout() we set HEAD to something that
# `git branch` is happy with.
repo.reset()
indicated_revision = None
if project in self.project_revisions:
indicated_revision = self.project_revisions[project]
indicated_branch = self.branch or self.zuul_branch
if project in self.project_branches:
indicated_branch = self.project_branches[project]
if indicated_branch:
override_zuul_ref = re.sub(self.zuul_branch, indicated_branch,
self.zuul_ref)
else:
override_zuul_ref = None
if indicated_branch and repo.hasBranch(indicated_branch):
self.log.info("upstream repo has branch %s", indicated_branch)
fallback_branch = indicated_branch
else:
if indicated_branch:
self.log.info("upstream repo is missing branch %s",
indicated_branch)
# FIXME should be origin HEAD branch which might not be 'master'
fallback_branch = 'master'
if self.zuul_branch:
fallback_zuul_ref = re.sub(self.zuul_branch, fallback_branch,
self.zuul_ref)
else:
fallback_zuul_ref = None
# If the user has requested an explicit revision to be checked out,
# we use it above all else, and if we cannot satisfy this requirement
# we raise an error and do not attempt to continue.
if indicated_revision:
self.log.info("Attempting to check out revision %s for "
"project %s", indicated_revision, project)
try:
self.fetchFromZuul(repo, project, self.zuul_ref)
commit = repo.checkout(indicated_revision)
except (ValueError, GitCommandError):
raise exceptions.RevNotFound(project, indicated_revision)
self.log.info("Prepared '%s' repo at revision '%s'", project,
indicated_revision)
# If we have a non empty zuul_ref to use, use it. Otherwise we fall
# back to checking out the branch.
elif ((override_zuul_ref and
self.fetchFromZuul(repo, project, override_zuul_ref)) or
(fallback_zuul_ref and
fallback_zuul_ref != override_zuul_ref and
self.fetchFromZuul(repo, project, fallback_zuul_ref))):
# Work around a bug in GitPython which can not parse FETCH_HEAD
gitcmd = git.Git(dest)
fetch_head = gitcmd.rev_parse('FETCH_HEAD')
repo.checkout(fetch_head)
self.log.info("Prepared %s repo with commit %s",
project, fetch_head)
else:
# Checkout branch
self.log.info("Falling back to branch %s", fallback_branch)
try:
commit = repo.checkout('remotes/origin/%s' % fallback_branch)
except (ValueError, GitCommandError):
self.log.exception("Fallback branch not found: %s",
fallback_branch)
self.log.info("Prepared %s repo with branch %s at commit %s",
project, fallback_branch, commit)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import sys
from pyspark.sql.functions import array, explode, col, lit, udf, sum, pandas_udf, PandasUDFType
from pyspark.sql.types import DoubleType, StructType, StructField, Row
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
if have_pyarrow:
import pyarrow as pa
# Tests below use pd.DataFrame.assign that will infer mixed types (unicode/str) for column names
# From kwargs w/ Python 2, so need to set check_column_type=False and avoid this check
_check_column_type = sys.version >= '3'
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class CogroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data1(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks')))\
.withColumn("v", col('k') * 10)\
.drop('ks')
@property
def data2(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks'))) \
.withColumn("v2", col('k') * 100) \
.drop('ks')
def test_simple(self):
self._test_merge(self.data1, self.data2)
def test_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_merge(left, self.data2)
def test_right_group_empty(self):
right = self.data2.where(col("id") % 2 == 0)
self._test_merge(self.data1, right)
def test_different_schemas(self):
right = self.data2.withColumn('v3', lit('a'))
self._test_merge(self.data1, right, 'id long, k int, v int, v2 int, v3 string')
def test_complex_group_by(self):
left = pd.DataFrame.from_dict({
'id': [1, 2, 3],
'k': [5, 6, 7],
'v': [9, 10, 11]
})
right = pd.DataFrame.from_dict({
'id': [11, 12, 13],
'k': [5, 6, 7],
'v2': [90, 100, 110]
})
left_gdf = self.spark\
.createDataFrame(left)\
.groupby(col('id') % 2 == 0)
right_gdf = self.spark \
.createDataFrame(right) \
.groupby(col('id') % 2 == 0)
def merge_pandas(l, r):
return pd.merge(l[['k', 'v']], r[['k', 'v2']], on=['k'])
result = left_gdf \
.cogroup(right_gdf) \
.applyInPandas(merge_pandas, 'k long, v long, v2 long') \
.sort(['k']) \
.toPandas()
expected = pd.DataFrame.from_dict({
'k': [5, 6, 7],
'v': [9, 10, 11],
'v2': [90, 100, 110]
})
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_empty_group_by(self):
left = self.data1
right = self.data2
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left.groupby().cogroup(right.groupby())\
.applyInPandas(merge_pandas, 'id long, k int, v int, v2 int') \
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_mixed_scalar_udfs_followed_by_cogrouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby().cogroup(df.groupby()) \
.applyInPandas(lambda x, y: pd.DataFrame([(x.sum().sum(), y.sum().sum())]),
'sum1 int, sum2 int').collect()
self.assertEquals(result[0]['sum1'], 165)
self.assertEquals(result[0]['sum2'], 165)
def test_with_key_left(self):
self._test_with_key(self.data1, self.data1, isLeft=True)
def test_with_key_right(self):
self._test_with_key(self.data1, self.data1, isLeft=False)
def test_with_key_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_with_key(left, self.data1, isLeft=True)
def test_with_key_right_group_empty(self):
right = self.data1.where(col("id") % 2 == 0)
self._test_with_key(self.data1, right, isLeft=False)
def test_with_key_complex(self):
def left_assign_key(key, l, _):
return l.assign(key=key[0])
result = self.data1 \
.groupby(col('id') % 2 == 0)\
.cogroup(self.data2.groupby(col('id') % 2 == 0)) \
.applyInPandas(left_assign_key, 'id long, k int, v int, key boolean') \
.sort(['id', 'k']) \
.toPandas()
expected = self.data1.toPandas()
expected = expected.assign(key=expected.id % 2 == 0)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_wrong_return_type(self):
# Test that we get a sensible exception invalid values passed to apply
left = self.data1
right = self.data2
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid return type.*MapType'):
left.groupby('id').cogroup(right.groupby('id')).applyInPandas(
lambda l, r: l, 'id long, v map<int, int>')
def test_wrong_args(self):
left = self.data1
right = self.data2
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
left.groupby('id').cogroup(right.groupby('id')) \
.applyInPandas(lambda: 1, StructType([StructField("d", DoubleType())]))
def test_case_insensitive_grouping_column(self):
# SPARK-31915: case-insensitive grouping column should work.
df1 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df1.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEquals(row.asDict(), Row(column=2, value=2).asDict())
df2 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df2.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEquals(row.asDict(), Row(column=2, value=2).asDict())
@staticmethod
def _test_with_key(left, right, isLeft):
def right_assign_key(key, l, r):
return l.assign(key=key[0]) if isLeft else r.assign(key=key[0])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(right_assign_key, 'id long, k int, v int, key long') \
.toPandas()
expected = left.toPandas() if isLeft else right.toPandas()
expected = expected.assign(key=expected.id)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
@staticmethod
def _test_merge(left, right, output_schema='id long, k int, v int, v2 int'):
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(merge_pandas, output_schema)\
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result, check_column_type=_check_column_type)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_cogrouped_map import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
_C.MODEL.RPN_ONLY = False
_C.MODEL.MASK_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
# If the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in paths_catalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHT = ""
# -----------------------------------------------------------------------------
# Load pre-trained models from C2 Detectron
# -----------------------------------------------------------------------------
_C.MODEL.C2_COMPAT = CN()
# Weight file from C2 Detectron. Should be in .pkl format
_C.MODEL.C2_COMPAT.WEIGHTS = ""
# Name of the function that loads the C2 weights into our PyTorch model
_C.MODEL.C2_COMPAT.WEIGHT_LOADER = ""
# Load from C2 Detectron or not
_C.MODEL.C2_COMPAT.ENABLED = False
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = 800 # (800,)
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1333
# Size of the smallest side of the image during testing
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1333
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [1., 1., 1.]
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training, as present in paths_catalog.py
_C.DATASETS.TRAIN = ()
# List of the dataset names for testing, as present in paths_catalog.py
_C.DATASETS.TEST = ()
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
_C.DATALOADER.SIZE_DIVISIBILITY = 0
# Number of images per batch
_C.DATALOADER.IMAGES_PER_BATCH_TRAIN = 2
_C.DATALOADER.IMAGES_PER_BATCH_TEST = 1
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
# The backbone conv body to use
# The string must match a function that is imported in modeling.model_builder
# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN
# backbone)
_C.MODEL.BACKBONE.CONV_BODY = "R-50-C4"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2
_C.MODEL.BACKBONE.OUT_CHANNELS = 256 * 4
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.USE_FPN = False
# RPN anchor sizes given in relative size w.r.t. BASE_ANCHOR_SIZE
_C.MODEL.RPN.SCALES = (0.125, 0.25, 0.5, 1., 2.)
# Base RPN anchor size given in absolute pixels w.r.t. the scaled network input
_C.MODEL.RPN.BASE_ANCHOR_SIZE = 256
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
_C.MODEL.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
_C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
_C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
_C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
_C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.USE_FPN = False
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
_C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH * NUM_GPUS
# E.g., a common configuration is: 512 * 2 * 8 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
_C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
_C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100
_C.MODEL.ROI_BOX_HEAD = CN()
_C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_BOX_HEAD.PREDICTOR = "FastRCNNPredictor"
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 81
# Hidden layer dimension when using an MLP for the RoI box head
_C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_MASK_HEAD.PREDICTOR = "MaskRCNNC4Predictor"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256)
_C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14
_C.MODEL.SHARE_FEATURES_DURING_TRAINING = True
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Residual transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithFixedBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithFixedBatchNorm"
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 2
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
# Paramters to accumulate across steps
_C.SOLVER.ACCUMULATE_STEPS = 1
_C.SOLVER.ACCUMULATE_GRAD = False
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
_C.TEST.EXPECTED_RESULTS = []
_C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT_DIR = "/results"
_C.CHECKPOINT = ""
_C.SAVE_CHECKPOINTS = False
_C.DO_ONLINE_MAP_EVAL = True
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")
|
|
from __future__ import unicode_literals
import getpass
import os
import shlex
import subprocess
try:
import unittest2 as unittest
except ImportError:
import unittest
import six
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm import __version__
# derived from http://www.cyberciti.biz/faq/create-ssh-config-file-on-linux-unix/
FAKE_SSH_CONFIG_FOR_CLI_TESTS = """
### default for all ##
Host *
ForwardAgent no
ForwardX11 no
ForwardX11Trusted yes
User nixcraft
Port 22
Protocol 2
ServerAliveInterval 60
ServerAliveCountMax 30
LocalForward 3128 127.0.0.1:3128
LocalForward 3129 127.0.0.1:3128
## override as per host ##
Host server1
HostName server1.cyberciti.biz
User nixcraft
Port 4242
IdentityFile /nfs/shared/users/nixcraft/keys/server1/id_rsa
IdentityFile /tmp/x.rsa
## Home nas server ##
Host nas01
HostName 192.168.1.100
User root
IdentityFile ~/.ssh/nas01.key
## Login AWS Cloud ##
Host aws.apache
HostName 1.2.3.4
User wwwdata
IdentityFile ~/.ssh/aws.apache.key
## Login to internal lan server at 192.168.0.251 via our public uk office ssh based gateway using ##
## $ ssh uk.gw.lan ##
Host uk.gw.lan uk.lan
HostName 192.168.0.251
User nixcraft
ProxyCommand ssh [email protected] nc %h %p 2> /dev/null
## Our Us Proxy Server ##
## Forward all local port 3128 traffic to port 3128 on the remote vps1.cyberciti.biz server ##
## $ ssh -f -N proxyus ##
Host proxyus
HostName vps1.cyberciti.biz
User breakfree
IdentityFile ~/.ssh/vps1.cyberciti.biz.key
LocalForward 3128 127.0.0.1:3128
"""
class StormCliTestCase(unittest.TestCase):
def setUp(self):
self.config_file = '/tmp/ssh_config_cli_tests'
with open(self.config_file, 'w+') as f:
f.write(FAKE_SSH_CONFIG_FOR_CLI_TESTS)
self.config_arg = '--config={0}'.format(self.config_file)
def run_cmd(self, cmd):
cmd = 'storm %s' % cmd
cmd = shlex.split(cmd.encode('utf-8') if six.PY2 else cmd)
_env = os.environ
_env["TESTMODE"] = "1"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=_env)
out, err = process.communicate()
rc = process.returncode
return out, err, rc
def test_list_command(self):
out, err, rc = self.run_cmd('list {0}'.format(self.config_arg))
self.assertTrue(out.startswith(b" Listing entries:\n\n"))
hosts, custom_options = [
"aws.apache -> [email protected]:22",
"nas01 -> [email protected]:22",
"proxyus -> [email protected]:22",
"server1 -> [email protected]:4242",
"uk.gw.lan uk.lan -> [email protected]:22",
], [
"[custom options] identityfile=~/.ssh/aws.apache.key",
"[custom options] identityfile=~/.ssh/nas01.key",
"identityfile=~/.ssh/vps1.cyberciti.biz.key",
"localforward=3128 127.0.0.1:3128",
"[custom options] identityfile=/nfs/shared/users/nixcraft/keys/server1/id_rsa,/tmp/x.rsa",
"[custom options] proxycommand=ssh [email protected] nc %h %p 2> /dev/null",
]
general_options = {
"forwardx11": "no",
"protocol": "2",
"user": "nixcraft",
"forwardagent": "no",
"forwardx11trusted": "yes",
"serveralivecountmax": "30",
"serveraliveinterval": "60",
"port": "22",
"localforward": "3128 127.0.0.1:3128, 3129 127.0.0.1:3128",
}
for host in hosts:
self.assertIn(host.encode('ascii'), out)
for custom_option in custom_options:
self.assertIn(custom_option.encode('ascii'), out)
for general_option, value in six.iteritems(general_options):
self.assertIn("{0}: {1}".format(general_option, value).encode('ascii'), out)
self.assertEqual(err, b'')
self.assertEqual(rc, 0)
def test_version_command(self):
out, err, rc = self.run_cmd('version')
self.assertIn(__version__.encode('ascii'), out)
def test_basic_add(self):
out, err, rc = self.run_cmd('add netscaler [email protected] {0}'.format(self.config_arg))
self.assertIn(b"success", out)
def test_add_duplicate(self):
out, err, rc = self.run_cmd('add aws.apache [email protected] {0}'.format(self.config_arg))
self.assertEqual(b'', out)
self.assertIn(b'error', err)
def test_add_invalid_host(self):
out, err, rc = self.run_cmd('add @_@ test.com {0}'.format(self.config_arg))
self.assertEqual(b'', out)
self.assertIn(b'error', err)
def test_advanced_add(self):
out, err, rc = self.run_cmd('add postgresql-server [email protected] {0} {1}{2}'.format(
"--id_file=/tmp/idfilecheck.rsa ",
'--o "StrictHostKeyChecking=yes" --o "UserKnownHostsFile=/dev/advanced_test" ',
self.config_arg)
)
self.assertIn(b"success", out)
with open(self.config_file) as f:
# check that property is really flushed out to the config?
content = f.read().encode('ascii')
self.assertIn(b'identityfile "/tmp/idfilecheck.rsa"', content)
self.assertIn(b"stricthostkeychecking yes", content)
self.assertIn(b"userknownhostsfile /dev/advanced_test", content)
def test_add_with_idfile(self):
out, err, rc = self.run_cmd('add postgresql-server [email protected] {0} {1}'.format(
"--id_file=/tmp/idfileonlycheck.rsa",
self.config_arg)
)
self.assertIn(b"success", out)
with open(self.config_file) as f:
content = f.read().encode('ascii')
self.assertIn(b'identityfile "/tmp/idfileonlycheck.rsa"', content)
def test_basic_edit(self):
out, err, rc = self.run_cmd('edit aws.apache [email protected] {0}'.format(self.config_arg))
self.assertIn(b"success", out)
with open(self.config_file) as f:
content = f.read().encode('ascii')
self.assertIn(b"basic_edit_check", content)
self.assertIn(b"10.20.30.40", content)
def test_edit_invalid_host(self):
out, err, rc = self.run_cmd('edit @missing_host test.com {0}'.format(self.config_arg))
self.assertEqual(b'', out)
self.assertIn(b'error', err)
def test_edit_missing_host(self):
out, err, rc = self.run_cmd('edit missing_host test.com {0}'.format(self.config_arg))
self.assertEqual(b'', out)
self.assertIn(b'error', err)
def test_update(self):
out, err, rc = self.run_cmd('update aws.apache --o "user=daghan" --o port=42000 {0}'.format(self.config_arg))
self.assertIn(b"success", out)
with open(self.config_file) as f:
content = f.read().encode('ascii')
self.assertIn(b"user daghan", content) # see daghan: http://instagram.com/p/lfPMW_qVja
self.assertIn(b"port 42000", content)
def test_update_regex(self):
self.run_cmd('add worker alphaworker.com {0}'.format(self.config_arg))
# add three machines -- hostnames starts with worker-N
self.run_cmd('add worker-1 worker1.com {0}'.format(self.config_arg))
self.run_cmd('add worker-2 worker2.com {0}'.format(self.config_arg))
self.run_cmd('add worker-4 worker4.com {0}'.format(self.config_arg))
# another one -- regex shouldn't capture that one though.
self.run_cmd('add worker3 worker3.com {0}'.format(self.config_arg))
out, err, rc = self.run_cmd("update 'worker-[1-5]' --o hostname=boss.com {0}".format(self.config_arg))
self.assertIn(b"success", out)
# edit the alphaworker
out, err, rc = self.run_cmd('edit worker [email protected] {0}'.format(self.config_arg))
with open(self.config_file) as f:
content = f.read().encode('ascii')
self.assertNotIn(b"worker1.com", content)
self.assertNotIn(b"worker2.com", content)
self.assertNotIn(b"worker4.com", content)
self.assertIn(b"worker3.com", content)
self.assertIn(b"alphauser", content)
out, err, rc = self.run_cmd("edit worker {0}".format(self.config_arg))
def test_update_invalid_regex(self):
out, err, rc = self.run_cmd("update 'drogba-[0-5]' --o hostname=boss.com {0}".format(self.config_arg))
self.assertEqual(b'', out)
self.assertIn(b'error', err)
def test_delete(self):
out, err, rc = self.run_cmd("delete server1 {0}".format(self.config_arg))
self.assertIn(b"success", out)
def test_delete_invalid_hostname(self):
out, err, rc = self.run_cmd("delete unknown_server".format(self.config_arg))
self.assertEqual(b'', out)
self.assertIn(b'error', err)
def test_search(self):
out, err, rc = self.run_cmd("search aws {0}".format(self.config_arg))
self.assertTrue(out.startswith(b'Listing results for aws:'))
self.assertIn(b'aws.apache', out)
def test_backup(self):
out, err, rc = self.run_cmd("backup /tmp/ssh_backup {0}".format(
self.config_arg))
self.assertEqual(True, os.path.exists("/tmp/ssh_backup"))
def test_invalid_search(self):
out, err, rc = self.run_cmd("search THEREISNOTHINGRELATEDWITHME {0}".format(self.config_arg))
self.assertIn(b'no results found.', out)
def test_delete_all(self):
out, err, rc = self.run_cmd('delete_all {0}'.format(self.config_arg))
self.assertIn(b'all entries deleted', out)
def tearDown(self):
os.unlink('/tmp/ssh_config_cli_tests')
class StormTests(unittest.TestCase):
def setUp(self):
fake_ssh_config = """Host *
IdentitiesOnly yes
Host netscaler
hostname 1.1.1.1
port 3367
"""
with open('/tmp/ssh_config', 'w+') as f:
f.write(fake_ssh_config)
self.storm = Storm('/tmp/ssh_config')
def test_config_load(self):
self.assertEqual(self.storm.ssh_config.config_data[1]["options"]["identitiesonly"], 'yes')
def test_config_dump(self):
self.storm.ssh_config.write_to_ssh_config()
for search_str in ("hostname 1.1.1.1", "Host netscaler", "Host *"):
with open('/tmp/ssh_config') as fd:
self.assertIn(search_str, fd.read())
def test_update_host(self):
self.storm.ssh_config.update_host("netscaler", {"hostname": "2.2.2.2"})
self.assertEqual(self.storm.ssh_config.config_data[4]["options"]["hostname"], '2.2.2.2')
def test_add_host(self):
self.storm.add_entry('google', 'google.com', 'root', '22', '/tmp/tmp.pub')
self.storm.add_entry('goog', 'google.com', 'root', '22', '/tmp/tmp.pub')
self.storm.ssh_config.write_to_ssh_config()
for item in self.storm.ssh_config.config_data:
if item.get("host") == 'google' or item.get("host") == 'goog':
self.assertEqual(item.get("options").get("port"), '22')
self.assertEqual(item.get("options").get("identityfile"), '"/tmp/tmp.pub"')
def test_clone_host(self):
self.storm.add_entry('google', 'google.com', 'ops', '24', '/tmp/tmp.pub')
self.storm.clone_entry('google', 'yahoo')
has_yahoo = False
for item in self.storm.ssh_config.config_data:
if item.get("host") == 'yahoo':
has_yahoo = True
break
self.assertEqual(True, has_yahoo)
self.assertEqual(item.get("options").get("port"), '24')
self.assertEqual(item.get("options").get("identityfile"), '"/tmp/tmp.pub"')
self.assertEqual(item.get("options").get("user"), 'ops')
def test_move_host(self):
self.storm.add_entry('google', 'google.com', 'ops', '24', '/tmp/tmp.pub')
self.storm.clone_entry('google', 'yahoo', keep_original=False)
has_yahoo = False
for item in self.storm.ssh_config.config_data:
if item.get("host") == 'yahoo':
has_yahoo = True
break
has_google = False
for item in self.storm.ssh_config.config_data:
if item.get("host") == 'google':
has_google = True
break
self.assertEqual(True, has_yahoo)
self.assertEqual(False, has_google)
self.assertEqual(item.get("options").get("port"), '24')
self.assertEqual(item.get("options").get("identityfile"), '"/tmp/tmp.pub"')
self.assertEqual(item.get("options").get("user"), 'ops')
def test_backup(self):
self.storm.backup("/tmp/storm_ssh_config_backup_file")
self.assertEqual(
True,
os.path.exists("/tmp/storm_ssh_config_backup_file")
)
def test_double_clone_exception(self):
self.storm.add_entry('google', 'google.com', 'ops', '24', '/tmp/tmp.pub')
self.storm.clone_entry('google', 'yahoo')
with self.assertRaises(ValueError):
self.storm.clone_entry('google', 'yahoo')
def test_edit_host(self):
self.storm.add_entry('google', 'google.com', 'root', '22', '/tmp/tmp.pub')
self.storm.ssh_config.write_to_ssh_config()
self.storm.edit_entry('google', 'google.com', 'root', '23', '/tmp/tmp.pub')
self.storm.ssh_config.write_to_ssh_config()
for item in self.storm.ssh_config.config_data:
if item.get("host") == 'google':
self.assertEqual(item.get("options").get("port"), '23')
def test_edit_by_hostname_regexp(self):
import re
self.storm.add_entry('google-01', 'google.com', 'root', '22', '/tmp/tmp.pub')
self.storm.add_entry('google-02', 'google.com', 'root', '23', '/tmp/tmp.pub')
self.storm.ssh_config.write_to_ssh_config()
self.storm.update_entry('google-[0-2]', port='24', identityfile='/tmp/tmp.pub1')
for item in self.storm.ssh_config.config_data:
if re.match(r"google*", item.get("host")):
self.assertEqual(item.get("options").get("identityfile"), '/tmp/tmp.pub1')
self.assertEqual(item.get("options").get("port"), '24')
def test_delete_host(self):
self.storm.delete_entry('netscaler')
for host in self.storm.ssh_config.config_data:
self.assertEqual(False, host.get("host") == 'netscaler')
def test99_delete_all(self):
self.storm.delete_all_entries()
self.assertEqual(len(self.storm.ssh_config.config_data), 0)
def test_uri_parser(self):
user = getpass.getuser()
TEST_STRINGS = [
('[email protected]:22', ('root', 'emreyilmaz.me', 22)),
('emreyilmaz.me', (user, 'emreyilmaz.me', 22)),
('emreyilmaz.me:22', (user, 'emreyilmaz.me', 22)),
('[email protected]', ('root', 'emreyilmaz.me', 22))
]
for uri in TEST_STRINGS:
self.assertEqual(parse(uri[0]), uri[1])
# false strings
self.assertRaises(ValueError, parse, '[email protected]:string-port')
def test_search_host(self):
results = self.storm.ssh_config.search_host("netsca")
self.assertEqual(len(results), 1)
def test_custom_options(self):
custom_options = (
"StrictHostKeyChecking=no",
"UserKnownHostsFile=/dev/null",
)
self.storm.add_entry('host_with_custom_option',
'emre.io', 'emre', 22,
None, custom_options=custom_options)
self.storm.ssh_config.write_to_ssh_config()
for item in self.storm.ssh_config.config_data:
if item.get("host") == 'host_with_custom_option':
self.assertEqual(item.get("options").get("stricthostkeychecking"), 'no')
self.assertEqual(item.get("options").get("userknownhostsfile"), '/dev/null')
custom_options = (
"StrictHostKeyChecking=yes",
"UserKnownHostsFile=/home/emre/foo",
)
self.storm.edit_entry('host_with_custom_option',
'emre.io', 'emre', 22,
None, custom_options=custom_options)
self.storm.ssh_config.write_to_ssh_config()
for item in self.storm.ssh_config.config_data:
if item.get("host") == 'host_with_custom_option':
self.assertEqual(item.get("options").get("stricthostkeychecking"), 'yes')
self.assertEqual(item.get("options").get("userknownhostsfile"), '/home/emre/foo')
def tearDown(self):
os.unlink('/tmp/ssh_config')
if __name__ == '__main__':
unittest.main()
|
|
"""Support for SmartThings Cloud."""
from __future__ import annotations
import asyncio
from collections.abc import Iterable
import importlib
import logging
from aiohttp.client_exceptions import ClientConnectionError, ClientResponseError
from pysmartapp.event import EVENT_TYPE_DEVICE
from pysmartthings import Attribute, Capability, SmartThings
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
HTTP_FORBIDDEN,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from .config_flow import SmartThingsFlowHandler # noqa: F401
from .const import (
CONF_APP_ID,
CONF_INSTALLED_APP_ID,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DATA_MANAGER,
DOMAIN,
EVENT_BUTTON,
PLATFORMS,
SIGNAL_SMARTTHINGS_UPDATE,
TOKEN_REFRESH_INTERVAL,
)
from .smartapp import (
format_unique_id,
setup_smartapp,
setup_smartapp_endpoint,
smartapp_sync_subscriptions,
unload_smartapp_endpoint,
validate_installed_app,
validate_webhook_requirements,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType):
"""Initialize the SmartThings platform."""
await setup_smartapp_endpoint(hass)
return True
async def async_migrate_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Handle migration of a previous version config entry.
A config entry created under a previous version must go through the
integration setup again so we can properly retrieve the needed data
elements. Force this by removing the entry and triggering a new flow.
"""
# Remove the entry which will invoke the callback to delete the app.
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
# only create new flow if there isn't a pending one for SmartThings.
flows = hass.config_entries.flow.async_progress()
if not [flow for flow in flows if flow["handler"] == DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}
)
)
# Return False because it could not be migrated.
return False
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Initialize config entry which represents an installed SmartApp."""
# For backwards compat
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry,
unique_id=format_unique_id(
entry.data[CONF_APP_ID], entry.data[CONF_LOCATION_ID]
),
)
if not validate_webhook_requirements(hass):
_LOGGER.warning(
"The 'base_url' of the 'http' integration must be configured and start with 'https://'"
)
return False
api = SmartThings(async_get_clientsession(hass), entry.data[CONF_ACCESS_TOKEN])
remove_entry = False
try:
# See if the app is already setup. This occurs when there are
# installs in multiple SmartThings locations (valid use-case)
manager = hass.data[DOMAIN][DATA_MANAGER]
smart_app = manager.smartapps.get(entry.data[CONF_APP_ID])
if not smart_app:
# Validate and setup the app.
app = await api.app(entry.data[CONF_APP_ID])
smart_app = setup_smartapp(hass, app)
# Validate and retrieve the installed app.
installed_app = await validate_installed_app(
api, entry.data[CONF_INSTALLED_APP_ID]
)
# Get scenes
scenes = await async_get_entry_scenes(entry, api)
# Get SmartApp token to sync subscriptions
token = await api.generate_tokens(
entry.data[CONF_CLIENT_ID],
entry.data[CONF_CLIENT_SECRET],
entry.data[CONF_REFRESH_TOKEN],
)
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_REFRESH_TOKEN: token.refresh_token}
)
# Get devices and their current status
devices = await api.devices(location_ids=[installed_app.location_id])
async def retrieve_device_status(device):
try:
await device.status.refresh()
except ClientResponseError:
_LOGGER.debug(
"Unable to update status for device: %s (%s), the device will be excluded",
device.label,
device.device_id,
exc_info=True,
)
devices.remove(device)
await asyncio.gather(*(retrieve_device_status(d) for d in devices.copy()))
# Sync device subscriptions
await smartapp_sync_subscriptions(
hass,
token.access_token,
installed_app.location_id,
installed_app.installed_app_id,
devices,
)
# Setup device broker
broker = DeviceBroker(hass, entry, token, smart_app, devices, scenes)
broker.connect()
hass.data[DOMAIN][DATA_BROKERS][entry.entry_id] = broker
except ClientResponseError as ex:
if ex.status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):
_LOGGER.exception(
"Unable to setup configuration entry '%s' - please reconfigure the integration",
entry.title,
)
remove_entry = True
else:
_LOGGER.debug(ex, exc_info=True)
raise ConfigEntryNotReady from ex
except (ClientConnectionError, RuntimeWarning) as ex:
_LOGGER.debug(ex, exc_info=True)
raise ConfigEntryNotReady from ex
if remove_entry:
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
# only create new flow if there isn't a pending one for SmartThings.
flows = hass.config_entries.flow.async_progress()
if not [flow for flow in flows if flow["handler"] == DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}
)
)
return False
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_get_entry_scenes(entry: ConfigEntry, api):
"""Get the scenes within an integration."""
try:
return await api.scenes(location_id=entry.data[CONF_LOCATION_ID])
except ClientResponseError as ex:
if ex.status == HTTP_FORBIDDEN:
_LOGGER.exception(
"Unable to load scenes for configuration entry '%s' because the access token does not have the required access",
entry.title,
)
else:
raise
return []
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS].pop(entry.entry_id, None)
if broker:
broker.disconnect()
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Perform clean-up when entry is being removed."""
api = SmartThings(async_get_clientsession(hass), entry.data[CONF_ACCESS_TOKEN])
# Remove the installed_app, which if already removed raises a HTTP_FORBIDDEN error.
installed_app_id = entry.data[CONF_INSTALLED_APP_ID]
try:
await api.delete_installed_app(installed_app_id)
except ClientResponseError as ex:
if ex.status == HTTP_FORBIDDEN:
_LOGGER.debug(
"Installed app %s has already been removed",
installed_app_id,
exc_info=True,
)
else:
raise
_LOGGER.debug("Removed installed app %s", installed_app_id)
# Remove the app if not referenced by other entries, which if already
# removed raises a HTTP_FORBIDDEN error.
all_entries = hass.config_entries.async_entries(DOMAIN)
app_id = entry.data[CONF_APP_ID]
app_count = sum(1 for entry in all_entries if entry.data[CONF_APP_ID] == app_id)
if app_count > 1:
_LOGGER.debug(
"App %s was not removed because it is in use by other configuration entries",
app_id,
)
return
# Remove the app
try:
await api.delete_app(app_id)
except ClientResponseError as ex:
if ex.status == HTTP_FORBIDDEN:
_LOGGER.debug("App %s has already been removed", app_id, exc_info=True)
else:
raise
_LOGGER.debug("Removed app %s", app_id)
if len(all_entries) == 1:
await unload_smartapp_endpoint(hass)
class DeviceBroker:
"""Manages an individual SmartThings config entry."""
def __init__(
self,
hass: HomeAssistant,
entry: ConfigEntry,
token,
smart_app,
devices: Iterable,
scenes: Iterable,
):
"""Create a new instance of the DeviceBroker."""
self._hass = hass
self._entry = entry
self._installed_app_id = entry.data[CONF_INSTALLED_APP_ID]
self._smart_app = smart_app
self._token = token
self._event_disconnect = None
self._regenerate_token_remove = None
self._assignments = self._assign_capabilities(devices)
self.devices = {device.device_id: device for device in devices}
self.scenes = {scene.scene_id: scene for scene in scenes}
def _assign_capabilities(self, devices: Iterable):
"""Assign platforms to capabilities."""
assignments = {}
for device in devices:
capabilities = device.capabilities.copy()
slots = {}
for platform in PLATFORMS:
platform_module = importlib.import_module(
f".{platform}", self.__module__
)
if not hasattr(platform_module, "get_capabilities"):
continue
assigned = platform_module.get_capabilities(capabilities)
if not assigned:
continue
# Draw-down capabilities and set slot assignment
for capability in assigned:
if capability not in capabilities:
continue
capabilities.remove(capability)
slots[capability] = platform
assignments[device.device_id] = slots
return assignments
def connect(self):
"""Connect handlers/listeners for device/lifecycle events."""
# Setup interval to regenerate the refresh token on a periodic basis.
# Tokens expire in 30 days and once expired, cannot be recovered.
async def regenerate_refresh_token(now):
"""Generate a new refresh token and update the config entry."""
await self._token.refresh(
self._entry.data[CONF_CLIENT_ID],
self._entry.data[CONF_CLIENT_SECRET],
)
self._hass.config_entries.async_update_entry(
self._entry,
data={
**self._entry.data,
CONF_REFRESH_TOKEN: self._token.refresh_token,
},
)
_LOGGER.debug(
"Regenerated refresh token for installed app: %s",
self._installed_app_id,
)
self._regenerate_token_remove = async_track_time_interval(
self._hass, regenerate_refresh_token, TOKEN_REFRESH_INTERVAL
)
# Connect handler to incoming device events
self._event_disconnect = self._smart_app.connect_event(self._event_handler)
def disconnect(self):
"""Disconnects handlers/listeners for device/lifecycle events."""
if self._regenerate_token_remove:
self._regenerate_token_remove()
if self._event_disconnect:
self._event_disconnect()
def get_assigned(self, device_id: str, platform: str):
"""Get the capabilities assigned to the platform."""
slots = self._assignments.get(device_id, {})
return [key for key, value in slots.items() if value == platform]
def any_assigned(self, device_id: str, platform: str):
"""Return True if the platform has any assigned capabilities."""
slots = self._assignments.get(device_id, {})
return any(value for value in slots.values() if value == platform)
async def _event_handler(self, req, resp, app):
"""Broker for incoming events."""
# Do not process events received from a different installed app
# under the same parent SmartApp (valid use-scenario)
if req.installed_app_id != self._installed_app_id:
return
updated_devices = set()
for evt in req.events:
if evt.event_type != EVENT_TYPE_DEVICE:
continue
device = self.devices.get(evt.device_id)
if not device:
continue
device.status.apply_attribute_update(
evt.component_id,
evt.capability,
evt.attribute,
evt.value,
data=evt.data,
)
# Fire events for buttons
if (
evt.capability == Capability.button
and evt.attribute == Attribute.button
):
data = {
"component_id": evt.component_id,
"device_id": evt.device_id,
"location_id": evt.location_id,
"value": evt.value,
"name": device.label,
"data": evt.data,
}
self._hass.bus.async_fire(EVENT_BUTTON, data)
_LOGGER.debug("Fired button event: %s", data)
else:
data = {
"location_id": evt.location_id,
"device_id": evt.device_id,
"component_id": evt.component_id,
"capability": evt.capability,
"attribute": evt.attribute,
"value": evt.value,
"data": evt.data,
}
_LOGGER.debug("Push update received: %s", data)
updated_devices.add(device.device_id)
async_dispatcher_send(self._hass, SIGNAL_SMARTTHINGS_UPDATE, updated_devices)
class SmartThingsEntity(Entity):
"""Defines a SmartThings entity."""
def __init__(self, device):
"""Initialize the instance."""
self._device = device
self._dispatcher_remove = None
async def async_added_to_hass(self):
"""Device added to hass."""
async def async_update_state(devices):
"""Update device state."""
if self._device.device_id in devices:
await self.async_update_ha_state(True)
self._dispatcher_remove = async_dispatcher_connect(
self.hass, SIGNAL_SMARTTHINGS_UPDATE, async_update_state
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect the device when removed."""
if self._dispatcher_remove:
self._dispatcher_remove()
@property
def device_info(self):
"""Get attributes about the device."""
return {
"identifiers": {(DOMAIN, self._device.device_id)},
"name": self._device.label,
"model": self._device.device_type_name,
"manufacturer": "Unavailable",
}
@property
def name(self) -> str:
"""Return the name of the device."""
return self._device.label
@property
def should_poll(self) -> bool:
"""No polling needed for this device."""
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._device.device_id
|
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for federated_trainer_utils.py."""
import tensorflow as tf
from reconstruction.shared import federated_trainer_utils
class FederatedTrainerUtilsTest(tf.test.TestCase):
def test_build_dataset_split_fn_none(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list,
[[0, 1], [2, 3], [4, 5], [0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5]])
def test_build_dataset_split_fn_skip(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=2)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5], [0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3]])
def test_build_dataset_split_fn_aggregated(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=2)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5]])
def test_build_dataset_split_fn_none_recon_epochs_variable(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list,
[[0, 1], [2, 3], [4, 5], [0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5]])
def test_build_dataset_split_fn_skip_recon_epochs_variable(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=2)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5], [0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3]])
def test_build_dataset_split_fn_aggregated_recon_epochs_variable(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=3)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5]])
def test_build_dataset_split_fn_none_recon_max_steps(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=4,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5], [0, 1]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5]])
# Adding more steps than the number of actual steps has no effect.
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=7,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list,
[[0, 1], [2, 3], [4, 5], [0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5]])
def test_build_dataset_split_fn_skip_recon_max_steps(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=4,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=3)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [0, 1]])
self.assertAllEqual(post_recon_list, [[2, 3], [4, 5]])
# Adding more steps than the number of actual steps has no effect.
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=7,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=3)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [0, 1]])
self.assertAllEqual(post_recon_list, [[2, 3], [4, 5]])
def test_build_dataset_split_fn_aggregated_recon_max_steps(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=4,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=2)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5]])
# Adding more steps than the number of actual steps has no effect.
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=2,
recon_epochs_constant=True,
recon_steps_max=7,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=2)
# Round number shouldn't matter.
recon_dataset, post_recon_dataset = split_dataset_fn(client_dataset, 3)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5]])
def test_build_dataset_split_fn_none_recon_epochs_variable_max_steps(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5], [0, 1]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5]])
def test_build_dataset_split_fn_skip_recon_epochs_variable_max_steps(self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=2)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5], [0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3]])
def test_build_dataset_split_fn_aggregated_recon_epochs_variable_max_steps(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=3)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5]])
def test_build_dataset_split_fn_none_recon_epochs_variable_max_steps_zero_post_epochs(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=0,
post_recon_steps_max=None,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list, [])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5], [0, 1]])
self.assertAllEqual(post_recon_list, [])
def test_build_dataset_split_fn_skip_recon_epochs_variable_max_steps_zero_post_epochs(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=0,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=2)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5], [0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [])
def test_build_dataset_split_fn_aggregated_recon_epochs_variable_max_steps_zero_post_epochs(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=0,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=3)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [])
def test_build_dataset_split_fn_none_recon_epochs_variable_max_steps_multiple_post_epochs(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=2,
post_recon_steps_max=None,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list,
[[0, 1], [2, 3], [4, 5], [0, 1], [2, 3], [4, 5]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5], [0, 1]])
self.assertAllEqual(post_recon_list,
[[0, 1], [2, 3], [4, 5], [0, 1], [2, 3], [4, 5]])
def test_build_dataset_split_fn_skip_recon_epochs_variable_max_steps_multiple_post_epochs(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=2,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=2)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3], [2, 3]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5], [0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3], [2, 3]])
def test_build_dataset_split_fn_aggregated_recon_epochs_variable_max_steps_multiple_post_epochs(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=8,
recon_epochs_constant=False,
recon_steps_max=4,
post_recon_epochs=2,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=2)
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5], [4, 5]])
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5], [4, 5]])
def test_build_dataset_split_fn_none_post_recon_multiple_epochs_max_steps(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=1,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=2,
post_recon_steps_max=4,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
# Round number doesn't matter.
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5], [0, 1]])
# Round number doesn't matter.
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3], [4, 5]])
self.assertAllEqual(post_recon_list, [[0, 1], [2, 3], [4, 5], [0, 1]])
def test_build_dataset_split_fn_skip_post_recon_multiple_epochs_max_steps(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=1,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=2,
post_recon_steps_max=4,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=2)
# Round number doesn't matter.
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3], [2, 3]])
# Round number doesn't matter.
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [4, 5]])
self.assertAllEqual(post_recon_list, [[2, 3], [2, 3]])
def test_build_dataset_split_fn_aggregated_post_recon_multiple_epochs_max_steps(
self):
# 3 batches.
client_dataset = tf.data.Dataset.range(6).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=1,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=2,
post_recon_steps_max=4,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=2)
# Round number doesn't matter.
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5], [4, 5]])
# Round number doesn't matter.
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [[0, 1], [2, 3]])
self.assertAllEqual(post_recon_list, [[4, 5], [4, 5]])
def test_build_dataset_split_none_fn_split_dataset_zero_batches(self):
"""Ensures clients without any data don't fail."""
# 0 batches.
client_dataset = tf.data.Dataset.range(0).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=1,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=False,
split_dataset_strategy=None,
split_dataset_proportion=None)
# Round number doesn't matter.
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [])
self.assertAllEqual(post_recon_list, [])
# Round number doesn't matter.
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [])
self.assertAllEqual(post_recon_list, [])
def test_build_dataset_split_skip_fn_split_dataset_zero_batches(self):
"""Ensures clients without any data don't fail."""
# 0 batches.
client_dataset = tf.data.Dataset.range(0).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=1,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils.SPLIT_STRATEGY_SKIP,
split_dataset_proportion=10)
# Round number doesn't matter.
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [])
self.assertAllEqual(post_recon_list, [])
# Round number doesn't matter.
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [])
self.assertAllEqual(post_recon_list, [])
def test_build_dataset_split_aggregated_fn_split_dataset_zero_batches(self):
"""Ensures clients without any data don't fail."""
# 0 batches.
client_dataset = tf.data.Dataset.range(0).batch(2)
split_dataset_fn = federated_trainer_utils.build_dataset_split_fn(
recon_epochs_max=1,
recon_epochs_constant=True,
recon_steps_max=None,
post_recon_epochs=1,
post_recon_steps_max=None,
split_dataset=True,
split_dataset_strategy=federated_trainer_utils
.SPLIT_STRATEGY_AGGREGATED,
split_dataset_proportion=10)
# Round number doesn't matter.
round_num = tf.constant(1, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [])
self.assertAllEqual(post_recon_list, [])
# Round number doesn't matter.
round_num = tf.constant(2, dtype=tf.int64)
recon_dataset, post_recon_dataset = split_dataset_fn(
client_dataset, round_num)
recon_list = list(recon_dataset.as_numpy_iterator())
post_recon_list = list(post_recon_dataset.as_numpy_iterator())
self.assertAllEqual(recon_list, [])
self.assertAllEqual(post_recon_list, [])
if __name__ == '__main__':
tf.test.main()
|
|
# -*- coding: utf-8 -*-
"""The output mediator object."""
import glob
import os
import pytz
from plaso.engine import path_helper
from plaso.formatters import default
from plaso.formatters import manager as formatters_manager
from plaso.formatters import winevt_rc
from plaso.formatters import yaml_formatters_file
from plaso.lib import definitions
from plaso.output import logger
from plaso.winnt import language_ids
class OutputMediator(object):
"""Output mediator.
Attributes:
data_location (Optional[str]): path of the formatter data files.
"""
DEFAULT_LANGUAGE_IDENTIFIER = 'en-US'
# TODO: add smarter language ID to LCID resolving e.g.
# 'en-US' falls back to 'en'.
# LCID 0x0409 is en-US.
DEFAULT_LCID = 0x0409
_DEFAULT_MESSAGE_FORMATTER = default.DefaultEventFormatter()
_WINEVT_RC_DATABASE = 'winevt-rc.db'
def __init__(
self, knowledge_base, data_location=None, dynamic_time=False,
preferred_encoding='utf-8'):
"""Initializes an output mediator.
Args:
knowledge_base (KnowledgeBase): knowledge base.
data_location (Optional[str]): path of the formatter data files.
dynamic_time (Optional[bool]): True if date and time values should be
represented in their granularity or semantically.
preferred_encoding (Optional[str]): preferred encoding to output.
"""
super(OutputMediator, self).__init__()
self._dynamic_time = dynamic_time
self._knowledge_base = knowledge_base
self._language_identifier = self.DEFAULT_LANGUAGE_IDENTIFIER
self._lcid = self.DEFAULT_LCID
self._message_formatters = {}
self._preferred_encoding = preferred_encoding
self._timezone = pytz.UTC
self._winevt_database_reader = None
self.data_location = data_location
@property
def dynamic_time(self):
"""bool: True if date and time values should be represented in their
granularity or semantically.
"""
return self._dynamic_time
@property
def encoding(self):
"""str: preferred encoding."""
return self._preferred_encoding
@property
def timezone(self):
"""The timezone."""
return self._timezone
def _GetWinevtRcDatabaseReader(self):
"""Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None.
"""
if not self._winevt_database_reader and self.data_location:
database_path = os.path.join(
self.data_location, self._WINEVT_RC_DATABASE)
if not os.path.isfile(database_path):
return None
self._winevt_database_reader = (
winevt_rc.WinevtResourcesSqlite3DatabaseReader())
if not self._winevt_database_reader.Open(database_path):
self._winevt_database_reader = None
return self._winevt_database_reader
def _ReadMessageFormattersFile(self, path):
"""Reads a message formatters configuration file.
Args:
path (str): path of file that contains the message formatters
configuration.
Raises:
KeyError: if the message formatter is already set for the corresponding
data type.
"""
message_formatters_file = yaml_formatters_file.YAMLFormattersFile()
for message_formatter in message_formatters_file.ReadFromFile(path):
for identifier in message_formatter.custom_helpers:
custom_formatter_helper = (
formatters_manager.FormattersManager.GetEventFormatterHelper(
identifier))
if custom_formatter_helper:
message_formatter.AddHelper(custom_formatter_helper)
self._message_formatters[message_formatter.data_type] = message_formatter
def GetDisplayNameForPathSpec(self, path_spec):
"""Retrieves the display name for a path specification.
Args:
path_spec (dfvfs.PathSpec): path specification.
Returns:
str: human readable version of the path specification.
"""
mount_path = self._knowledge_base.GetMountPath()
text_prepend = self._knowledge_base.GetTextPrepend()
return path_helper.PathHelper.GetDisplayNameForPathSpec(
path_spec, mount_path=mount_path, text_prepend=text_prepend)
def GetHostname(self, event_data, default_hostname='-'):
"""Retrieves the hostname related to the event.
Args:
event_data (EventData): event data.
default_hostname (Optional[str]): default hostname.
Returns:
str: hostname.
"""
hostname = getattr(event_data, 'hostname', None)
if hostname:
return hostname
session_identifier = event_data.GetSessionIdentifier()
if session_identifier is None:
return default_hostname
hostname = self._knowledge_base.GetHostname(
session_identifier=session_identifier)
return hostname or default_hostname
def GetMACBRepresentation(self, event, event_data):
"""Retrieves the MACB representation.
Args:
event (EventObject): event.
event_data (EventData): event data.
Returns:
str: MACB representation.
"""
data_type = getattr(event_data, 'data_type', None)
if not data_type:
return '....'
# The filestat parser is somewhat limited.
if data_type == 'fs:stat':
descriptions = event.timestamp_desc.split(';')
return_characters = ['.', '.', '.', '.']
for description in descriptions:
if description in (
'mtime', definitions.TIME_DESCRIPTION_MODIFICATION):
return_characters[0] = 'M'
elif description in (
'atime', definitions.TIME_DESCRIPTION_LAST_ACCESS):
return_characters[1] = 'A'
elif description in (
'ctime', definitions.TIME_DESCRIPTION_CHANGE):
return_characters[2] = 'C'
elif description in (
'crtime', definitions.TIME_DESCRIPTION_CREATION):
return_characters[3] = 'B'
return ''.join(return_characters)
# Access time.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_LAST_ACCESS,
definitions.TIME_DESCRIPTION_ACCOUNT_CREATED,
definitions.TIME_DESCRIPTION_LAST_VISITED,
definitions.TIME_DESCRIPTION_START,
definitions.TIME_DESCRIPTION_LAST_SHUTDOWN,
definitions.TIME_DESCRIPTION_LAST_LOGIN,
definitions.TIME_DESCRIPTION_LAST_PASSWORD_RESET,
definitions.TIME_DESCRIPTION_LAST_CONNECTED,
definitions.TIME_DESCRIPTION_LAST_RUN,
definitions.TIME_DESCRIPTION_LAST_PRINTED]:
return '.A..'
# Content modification.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_MODIFICATION,
definitions.TIME_DESCRIPTION_WRITTEN,
definitions.TIME_DESCRIPTION_DELETED]:
return 'M...'
# Content creation time.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_CREATION,
definitions.TIME_DESCRIPTION_ADDED,
definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
definitions.TIME_DESCRIPTION_FIRST_CONNECTED]:
return '...B'
# Metadata modification.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_CHANGE,
definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION]:
return '..C.'
return '....'
def GetMACBRepresentationFromDescriptions(self, timestamp_descriptions):
"""Determines the MACB representation from the timestamp descriptions.
MACB representation is a shorthand for representing one or more of
modification, access, change, birth timestamp descriptions as the letters
"MACB" or a "." if the corresponding timestamp is not set.
Note that this is an output format shorthand and does not guarantee that
the timestamps represent the same occurrence.
Args:
timestamp_descriptions (list[str]): timestamp descriptions, which are
defined in definitions.TIME_DESCRIPTIONS.
Returns:
str: MACB representation.
"""
macb_representation = []
if ('mtime' in timestamp_descriptions or
definitions.TIME_DESCRIPTION_MODIFICATION in timestamp_descriptions):
macb_representation.append('M')
else:
macb_representation.append('.')
if ('atime' in timestamp_descriptions or
definitions.TIME_DESCRIPTION_LAST_ACCESS in timestamp_descriptions):
macb_representation.append('A')
else:
macb_representation.append('.')
if ('ctime' in timestamp_descriptions or
definitions.TIME_DESCRIPTION_CHANGE in timestamp_descriptions):
macb_representation.append('C')
else:
macb_representation.append('.')
if ('crtime' in timestamp_descriptions or
definitions.TIME_DESCRIPTION_CREATION in timestamp_descriptions):
macb_representation.append('B')
else:
macb_representation.append('.')
return ''.join(macb_representation)
def GetMessageFormatter(self, data_type):
"""Retrieves the message formatter for a specific data type.
Args:
data_type (str): data type.
Returns:
EventFormatter: corresponding message formatter or the default message
formatter if not available.
"""
data_type = data_type.lower()
message_formatter = self._message_formatters.get(data_type, None)
if not message_formatter:
logger.warning(
'Using default message formatter for data type: {0:s}'.format(
data_type))
message_formatter = self._DEFAULT_MESSAGE_FORMATTER
return message_formatter
def GetRelativePathForPathSpec(self, path_spec):
"""Retrieves the relative path for a path specification.
Args:
path_spec (dfvfs.PathSpec): path specification.
Returns:
str: relateive path of the path specification.
"""
mount_path = self._knowledge_base.GetMountPath()
return path_helper.PathHelper.GetRelativePathForPathSpec(
path_spec, mount_path=mount_path)
def GetStoredHostname(self):
"""Retrieves the stored hostname.
Returns:
str: hostname.
"""
return self._knowledge_base.GetHostname()
def GetUsername(self, event_data, default_username='-'):
"""Retrieves the username related to the event.
Args:
event_data (EventData): event data.
default_username (Optional[str]): default username.
Returns:
str: username.
"""
username = getattr(event_data, 'username', None)
if username and username != '-':
return username
session_identifier = event_data.GetSessionIdentifier()
if session_identifier is None:
return default_username
user_sid = getattr(event_data, 'user_sid', None)
username = self._knowledge_base.GetUsernameByIdentifier(
user_sid, session_identifier=session_identifier)
return username or default_username
def GetWindowsEventMessage(self, log_source, message_identifier):
"""Retrieves the message string for a specific Windows Event Log source.
Args:
log_source (str): Event Log source, such as "Application Error".
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
"""
database_reader = self._GetWinevtRcDatabaseReader()
if not database_reader:
return None
if self._lcid != self.DEFAULT_LCID:
message_string = database_reader.GetMessage(
log_source, self._lcid, message_identifier)
if message_string:
return message_string
return database_reader.GetMessage(
log_source, self.DEFAULT_LCID, message_identifier)
def ReadMessageFormattersFromDirectory(self, path):
"""Reads message formatters from a directory.
Args:
path (str): path of directory that contains the message formatters
configuration files.
Raises:
KeyError: if the message formatter is already set for the corresponding
data type.
"""
for formatters_file_path in glob.glob(os.path.join(path, '*.yaml')):
self._ReadMessageFormattersFile(formatters_file_path)
def ReadMessageFormattersFromFile(self, path):
"""Reads message formatters from a file.
Args:
path (str): path of file that contains the message formatters
configuration.
Raises:
KeyError: if the message formatter is already set for the corresponding
data type.
"""
self._ReadMessageFormattersFile(path)
def SetPreferredLanguageIdentifier(self, language_identifier):
"""Sets the preferred language identifier.
Args:
language_identifier (str): language identifier string such as "en-US"
for US English or "is-IS" for Icelandic.
Raises:
KeyError: if the language identifier is not defined.
ValueError: if the language identifier is not a string type.
"""
if not isinstance(language_identifier, str):
raise ValueError('Language identifier is not a string.')
values = language_ids.LANGUAGE_IDENTIFIERS.get(
language_identifier.lower(), None)
if not values:
raise KeyError('Language identifier: {0:s} is not defined.'.format(
language_identifier))
self._language_identifier = language_identifier
self._lcid = values[0]
def SetTimezone(self, timezone):
"""Sets the timezone.
Args:
timezone (str): timezone.
Raises:
ValueError: if the timezone is not supported.
"""
if not timezone:
return
try:
self._timezone = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise ValueError('Unsupported timezone: {0:s}'.format(timezone))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import mock
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import convolutional_box_predictor
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.predictors.heads import mask_head
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._min_depth, 2)
self.assertEqual(box_predictor._max_depth, 16)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.4)
self.assertTrue(class_head._apply_sigmoid_to_scores)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(class_head._num_class_slots, 10)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertTrue(class_head._use_depthwise)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._min_depth, 0)
self.assertEqual(box_predictor._max_depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertTrue(class_head._use_dropout)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8)
self.assertFalse(class_head._apply_sigmoid_to_scores)
self.assertEqual(class_head._num_class_slots, 91)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertFalse(class_head._use_depthwise)
def test_construct_default_conv_box_predictor_with_default_mask_head(self):
box_predictor_text_proto = """
convolutional_box_predictor {
mask_head {
}
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertTrue(convolutional_box_predictor.MASK_PREDICTIONS in
box_predictor._other_heads)
mask_prediction_head = (
box_predictor._other_heads[convolutional_box_predictor.MASK_PREDICTIONS]
)
self.assertEqual(mask_prediction_head._mask_height, 15)
self.assertEqual(mask_prediction_head._mask_width, 15)
self.assertTrue(mask_prediction_head._masks_are_class_agnostic)
def test_construct_default_conv_box_predictor_with_custom_mask_head(self):
box_predictor_text_proto = """
convolutional_box_predictor {
mask_head {
mask_height: 7
mask_width: 7
masks_are_class_agnostic: false
}
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertTrue(convolutional_box_predictor.MASK_PREDICTIONS in
box_predictor._other_heads)
mask_prediction_head = (
box_predictor._other_heads[convolutional_box_predictor.MASK_PREDICTIONS]
)
self.assertEqual(mask_prediction_head._mask_height, 7)
self.assertEqual(mask_prediction_head._mask_width, 7)
self.assertFalse(mask_prediction_head._masks_are_class_agnostic)
class WeightSharedConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
(box_predictor_proto.weight_shared_convolutional_box_predictor
.conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, False)
def test_construct_non_default_depthwise_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertEqual(box_predictor._apply_batch_norm, False)
self.assertEqual(box_predictor._use_depthwise, True)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, False)
def test_construct_default_conv_box_predictor_with_batch_norm(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
batch_norm {
train: true
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, True)
def test_construct_weight_shared_predictor_with_default_mask_head(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
mask_head {
}
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertTrue(convolutional_box_predictor.MASK_PREDICTIONS in
box_predictor._other_heads)
weight_shared_convolutional_mask_head = (
box_predictor._other_heads[convolutional_box_predictor.MASK_PREDICTIONS]
)
self.assertIsInstance(weight_shared_convolutional_mask_head,
mask_head.WeightSharedConvolutionalMaskHead)
self.assertEqual(weight_shared_convolutional_mask_head._mask_height, 15)
self.assertEqual(weight_shared_convolutional_mask_head._mask_width, 15)
self.assertTrue(
weight_shared_convolutional_mask_head._masks_are_class_agnostic)
def test_construct_weight_shared_predictor_with_custom_mask_head(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
mask_head {
mask_height: 7
mask_width: 7
masks_are_class_agnostic: false
}
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertTrue(convolutional_box_predictor.MASK_PREDICTIONS in
box_predictor._other_heads)
weight_shared_convolutional_mask_head = (
box_predictor._other_heads[convolutional_box_predictor.MASK_PREDICTIONS]
)
self.assertIsInstance(weight_shared_convolutional_mask_head,
mask_head.WeightSharedConvolutionalMaskHead)
self.assertEqual(weight_shared_convolutional_mask_head._mask_height, 7)
self.assertEqual(weight_shared_convolutional_mask_head._mask_width, 7)
self.assertFalse(
weight_shared_convolutional_mask_head._masks_are_class_agnostic)
class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_builder_calls_fc_argscope_fn(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
mock_argscope_fn.assert_called_with(hyperparams_proto, False)
self.assertEqual(box_predictor._box_prediction_head._fc_hyperparams_fn,
'arg_scope')
self.assertEqual(box_predictor._class_prediction_head._fc_hyperparams_fn,
'arg_scope')
def test_non_default_mask_rcnn_box_predictor(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto = """
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
share_box_across_classes: true
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
def mock_fc_argscope_builder(fc_hyperparams_arg, is_training):
return (fc_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_fc_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
self.assertTrue(box_head._use_dropout)
self.assertTrue(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.8)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 3)
self.assertEqual(box_head._share_box_across_classes, True)
def test_build_default_mask_rcnn_box_predictor(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor = box_predictor_builder.build(
argscope_fn=mock.Mock(return_value='arg_scope'),
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertEqual(len(box_predictor._third_stage_heads.keys()), 0)
def test_build_box_predictor_with_mask_branch(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 16
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 16
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
third_stage_heads = box_predictor._third_stage_heads
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertTrue(
mask_rcnn_box_predictor.MASK_PREDICTIONS in third_stage_heads)
self.assertEqual(
third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._mask_prediction_conv_depth, 512)
def test_build_box_predictor_with_convlve_then_upsample_masks(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 24
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 24
box_predictor_proto.mask_rcnn_box_predictor.convolve_then_upsample_masks = (
True)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
third_stage_heads = box_predictor._third_stage_heads
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertTrue(
mask_rcnn_box_predictor.MASK_PREDICTIONS in third_stage_heads)
self.assertEqual(
third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._mask_prediction_conv_depth, 512)
self.assertTrue(third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._convolve_then_upsample)
class RfcnBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_fc_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_non_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto = """
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
self.assertEqual(box_predictor._num_spatial_bins, [4, 4])
self.assertEqual(box_predictor._crop_size, [16, 16])
def test_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertEqual(box_predictor._num_spatial_bins, [3, 3])
self.assertEqual(box_predictor._crop_size, [12, 12])
if __name__ == '__main__':
tf.test.main()
|
|
"""
Module for managing quotas on POSIX-like systems.
"""
import logging
import salt.utils.path
import salt.utils.platform
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
# Define a function alias in order not to shadow built-in's
__func_alias__ = {"set_": "set"}
def __virtual__():
"""
Only work on POSIX-like systems with setquota binary available
"""
if not salt.utils.platform.is_windows() and salt.utils.path.which("setquota"):
return "quota"
return (
False,
"The quota execution module cannot be loaded: the module is only "
"available on POSIX-like systems with the setquota binary available.",
)
def report(mount):
"""
Report on quotas for a specific volume
CLI Example:
.. code-block:: bash
salt '*' quota.report /media/data
"""
ret = {mount: {}}
ret[mount]["User Quotas"] = _parse_quota(mount, "-u")
ret[mount]["Group Quotas"] = _parse_quota(mount, "-g")
return ret
def _parse_quota(mount, opts):
"""
Parse the output from repquota. Requires that -u -g are passed in
"""
cmd = "repquota -vp {} {}".format(opts, mount)
out = __salt__["cmd.run"](cmd, python_shell=False).splitlines()
mode = "header"
if "-u" in opts:
quotatype = "Users"
elif "-g" in opts:
quotatype = "Groups"
ret = {quotatype: {}}
for line in out:
if not line:
continue
comps = line.split()
if mode == "header":
if "Block grace time" in line:
blockg, inodeg = line.split(";")
blockgc = blockg.split(": ")
inodegc = inodeg.split(": ")
ret["Block Grace Time"] = blockgc[-1:]
ret["Inode Grace Time"] = inodegc[-1:]
elif line.startswith("-"):
mode = "quotas"
elif mode == "quotas":
if len(comps) < 8:
continue
if not comps[0] in ret[quotatype]:
ret[quotatype][comps[0]] = {}
ret[quotatype][comps[0]]["block-used"] = comps[2]
ret[quotatype][comps[0]]["block-soft-limit"] = comps[3]
ret[quotatype][comps[0]]["block-hard-limit"] = comps[4]
ret[quotatype][comps[0]]["block-grace"] = comps[5]
ret[quotatype][comps[0]]["file-used"] = comps[6]
ret[quotatype][comps[0]]["file-soft-limit"] = comps[7]
ret[quotatype][comps[0]]["file-hard-limit"] = comps[8]
ret[quotatype][comps[0]]["file-grace"] = comps[9]
return ret
def set_(device, **kwargs):
"""
Calls out to setquota, for a specific user or group
CLI Example:
.. code-block:: bash
salt '*' quota.set /media/data user=larry block-soft-limit=1048576
salt '*' quota.set /media/data group=painters file-hard-limit=1000
"""
empty = {
"block-soft-limit": 0,
"block-hard-limit": 0,
"file-soft-limit": 0,
"file-hard-limit": 0,
}
current = None
cmd = "setquota"
if "user" in kwargs:
cmd += " -u {} ".format(kwargs["user"])
parsed = _parse_quota(device, "-u")
if kwargs["user"] in parsed:
current = parsed["Users"][kwargs["user"]]
else:
current = empty
ret = "User: {}".format(kwargs["user"])
if "group" in kwargs:
if "user" in kwargs:
raise SaltInvocationError("Please specify a user or group, not both.")
cmd += " -g {} ".format(kwargs["group"])
parsed = _parse_quota(device, "-g")
if kwargs["group"] in parsed:
current = parsed["Groups"][kwargs["group"]]
else:
current = empty
ret = "Group: {}".format(kwargs["group"])
if not current:
raise CommandExecutionError("A valid user or group was not found")
for limit in (
"block-soft-limit",
"block-hard-limit",
"file-soft-limit",
"file-hard-limit",
):
if limit in kwargs:
current[limit] = kwargs[limit]
cmd += "{} {} {} {} {}".format(
current["block-soft-limit"],
current["block-hard-limit"],
current["file-soft-limit"],
current["file-hard-limit"],
device,
)
result = __salt__["cmd.run_all"](cmd, python_shell=False)
if result["retcode"] != 0:
raise CommandExecutionError(
"Unable to set desired quota. Error follows: \n{}".format(result["stderr"])
)
return {ret: current}
def warn():
"""
Runs the warnquota command, to send warning emails to users who
are over their quota limit.
CLI Example:
.. code-block:: bash
salt '*' quota.warn
"""
__salt__["cmd.run"]("quotawarn")
def stats():
"""
Runs the quotastats command, and returns the parsed output
CLI Example:
.. code-block:: bash
salt '*' quota.stats
"""
ret = {}
out = __salt__["cmd.run"]("quotastats").splitlines()
for line in out:
if not line:
continue
comps = line.split(": ")
ret[comps[0]] = comps[1]
return ret
def on(device):
"""
Turns on the quota system
CLI Example:
.. code-block:: bash
salt '*' quota.on
"""
cmd = "quotaon {}".format(device)
__salt__["cmd.run"](cmd, python_shell=False)
return True
def off(device):
"""
Turns off the quota system
CLI Example:
.. code-block:: bash
salt '*' quota.off
"""
cmd = "quotaoff {}".format(device)
__salt__["cmd.run"](cmd, python_shell=False)
return True
def get_mode(device):
"""
Report whether the quota system for this device is on or off
CLI Example:
.. code-block:: bash
salt '*' quota.get_mode
"""
ret = {}
cmd = "quotaon -p {}".format(device)
out = __salt__["cmd.run"](cmd, python_shell=False)
for line in out.splitlines():
comps = line.strip().split()
if comps[3] not in ret:
if comps[0].startswith("quotaon"):
if comps[1].startswith("Mountpoint"):
ret[comps[4]] = "disabled"
continue
elif comps[1].startswith("Cannot"):
ret[device] = "Not found"
return ret
continue
ret[comps[3]] = {
"device": comps[4].replace("(", "").replace(")", ""),
}
ret[comps[3]][comps[0]] = comps[6]
return ret
|
|
#!/usr/bin/env python3
"""Test runner for typeshed.
Depends on pytype being installed.
If pytype is installed:
1. For every pyi, do nothing if it is in pytype_exclude_list.txt.
2. Otherwise, call 'pytype.io.parse_pyi'.
Option two will load the file and all the builtins, typeshed dependencies. This
will also discover incorrect usage of imported modules.
"""
import argparse
import os
import re
import sys
import traceback
from typing import List, Match, Optional, Sequence, Tuple
from pytype import config as pytype_config, load_pytd
TYPESHED_SUBDIRS = ["stdlib", "third_party"]
TYPESHED_HOME = "TYPESHED_HOME"
UNSET = object() # marker for tracking the TYPESHED_HOME environment variable
_LOADERS = {}
def main() -> None:
args = create_parser().parse_args()
typeshed_location = args.typeshed_location or os.getcwd()
subdir_paths = [os.path.join(typeshed_location, d) for d in TYPESHED_SUBDIRS]
check_subdirs_discoverable(subdir_paths)
files_to_test = determine_files_to_test(typeshed_location=typeshed_location, paths=args.files or subdir_paths)
run_all_tests(
files_to_test=files_to_test,
typeshed_location=typeshed_location,
print_stderr=args.print_stderr,
dry_run=args.dry_run,
)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Pytype/typeshed tests.")
parser.add_argument("-n", "--dry-run", action="store_true", default=False, help="Don't actually run tests")
# Default to '' so that symlinking typeshed subdirs in cwd will work.
parser.add_argument("--typeshed-location", type=str, default="", help="Path to typeshed installation.")
# Set to true to print a stack trace every time an exception is thrown.
parser.add_argument(
"--print-stderr", action="store_true", default=False, help="Print stderr every time an error is encountered."
)
parser.add_argument(
"files",
metavar="FILE",
type=str,
nargs="*",
help="Files or directories to check. (Default: Check all files.)",
)
return parser
class PathMatcher:
def __init__(self, patterns: Sequence[str]) -> None:
patterns = [re.escape(os.path.join(*x.split("/"))) for x in patterns]
self.matcher = re.compile(r"({})$".format("|".join(patterns))) if patterns else None
def search(self, path: str) -> Optional[Match[str]]:
if not self.matcher:
return None
return self.matcher.search(path)
def load_exclude_list(typeshed_location: str) -> List[str]:
filename = os.path.join(typeshed_location, "tests", "pytype_exclude_list.txt")
skip_re = re.compile(r"^\s*([^\s#]+)\s*(?:#.*)?$")
skip = []
with open(filename) as f:
for line in f:
skip_match = skip_re.match(line)
if skip_match:
skip.append(skip_match.group(1))
return skip
def run_pytype(*, filename: str, python_version: str, typeshed_location: str) -> Optional[str]:
"""Runs pytype, returning the stderr if any."""
if python_version not in _LOADERS:
options = pytype_config.Options.create(
"", parse_pyi=True, python_version=python_version)
loader = load_pytd.create_loader(options)
_LOADERS[python_version] = (options, loader)
options, loader = _LOADERS[python_version]
old_typeshed_home = os.environ.get(TYPESHED_HOME, UNSET)
os.environ[TYPESHED_HOME] = typeshed_location
try:
with pytype_config.verbosity_from(options):
ast = loader.load_file(_get_module_name(filename), filename)
loader.finish_and_verify_ast(ast)
except Exception:
stderr = traceback.format_exc()
else:
stderr = None
if old_typeshed_home is UNSET:
del os.environ[TYPESHED_HOME]
else:
os.environ[TYPESHED_HOME] = old_typeshed_home
return stderr
def _get_relative(filename: str) -> str:
top = 0
for d in TYPESHED_SUBDIRS:
try:
top = filename.index(d)
except ValueError:
continue
else:
break
return filename[top:]
def _get_module_name(filename: str) -> str:
"""Converts a filename {subdir}/m.n/module/foo to module.foo."""
return ".".join(_get_relative(filename).split(os.path.sep)[2:]).replace(".pyi", "").replace(".__init__", "")
def _is_version(path: str, version: str) -> bool:
return any("{}{}{}".format(d, os.path.sep, version) in path for d in TYPESHED_SUBDIRS)
def check_subdirs_discoverable(subdir_paths: List[str]) -> None:
for p in subdir_paths:
if not os.path.isdir(p):
raise SystemExit("Cannot find typeshed subdir at {} (specify parent dir via --typeshed-location)".format(p))
def determine_files_to_test(*, typeshed_location: str, paths: Sequence[str]) -> List[Tuple[str, int]]:
"""Determine all files to test, checking if it's in the exclude list and which Python versions to use.
Returns a list of pairs of the file path and Python version as an int."""
skipped = PathMatcher(load_exclude_list(typeshed_location))
filenames = find_stubs_in_paths(paths)
files = []
for f in sorted(filenames):
rel = _get_relative(f)
if skipped.search(rel):
continue
if _is_version(f, "2and3"):
files.append((f, 2))
files.append((f, 3))
elif _is_version(f, "2"):
files.append((f, 2))
elif _is_version(f, "3"):
files.append((f, 3))
else:
print("Unrecognized path: {}".format(f))
return files
def find_stubs_in_paths(paths: Sequence[str]) -> List[str]:
filenames = []
for path in paths:
if os.path.isdir(path):
for root, _, fns in os.walk(path):
filenames.extend(os.path.join(root, fn) for fn in fns if fn.endswith(".pyi"))
else:
filenames.append(path)
return filenames
def run_all_tests(*, files_to_test: Sequence[Tuple[str, int]], typeshed_location: str, print_stderr: bool, dry_run: bool) -> None:
bad = []
errors = 0
total_tests = len(files_to_test)
print("Testing files with pytype...")
for i, (f, version) in enumerate(files_to_test):
stderr = (
run_pytype(
filename=f,
python_version="2.7" if version == 2 else "{0.major}.{0.minor}".format(sys.version_info),
typeshed_location=typeshed_location,
)
if not dry_run
else None
)
if stderr:
if print_stderr:
print(stderr)
errors += 1
stacktrace_final_line = stderr.rstrip().rsplit("\n", 1)[-1]
bad.append((_get_relative(f), stacktrace_final_line))
runs = i + 1
if runs % 25 == 0:
print(" {:3d}/{:d} with {:3d} errors".format(runs, total_tests, errors))
print("Ran pytype with {:d} pyis, got {:d} errors.".format(total_tests, errors))
for f, err in bad:
print("{}: {}".format(f, err))
if errors:
raise SystemExit("\nRun again with --print-stderr to get the full stacktrace.")
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
from struct import pack, unpack
from ambari_ws4py.exc import FrameTooLargeException, ProtocolException
from ambari_ws4py.compat import py3k, ord, range
# Frame opcodes defined in the spec.
OPCODE_CONTINUATION = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
__all__ = ['Frame']
class Frame(object):
def __init__(self, opcode=None, body=b'', masking_key=None, fin=0, rsv1=0, rsv2=0, rsv3=0):
"""
Implements the framing protocol as defined by RFC 6455.
.. code-block:: python
:linenos:
>>> test_mask = 'XXXXXX' # perhaps from os.urandom(4)
>>> f = Frame(OPCODE_TEXT, 'hello world', masking_key=test_mask, fin=1)
>>> bytes = f.build()
>>> bytes.encode('hex')
'818bbe04e66ad6618a06d1249105cc6882'
>>> f = Frame()
>>> f.parser.send(bytes[0])
1
>>> f.parser.send(bytes[1])
4
.. seealso:: Data Framing http://tools.ietf.org/html/rfc6455#section-5.2
"""
if not isinstance(body, bytes):
raise TypeError("The body must be properly encoded")
self.opcode = opcode
self.body = body
self.masking_key = masking_key
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.payload_length = len(body)
self._parser = None
@property
def parser(self):
if self._parser is None:
self._parser = self._parsing()
# Python generators must be initialized once.
next(self.parser)
return self._parser
def _cleanup(self):
if self._parser:
self._parser.close()
self._parser = None
def build(self):
"""
Builds a frame from the instance's attributes and returns
its bytes representation.
"""
header = b''
if self.fin > 0x1:
raise ValueError('FIN bit parameter must be 0 or 1')
if 0x3 <= self.opcode <= 0x7 or 0xB <= self.opcode:
raise ValueError('Opcode cannot be a reserved opcode')
## +-+-+-+-+-------+
## |F|R|R|R| opcode|
## |I|S|S|S| (4) |
## |N|V|V|V| |
## | |1|2|3| |
## +-+-+-+-+-------+
header = pack('!B', ((self.fin << 7)
| (self.rsv1 << 6)
| (self.rsv2 << 5)
| (self.rsv3 << 4)
| self.opcode))
## +-+-------------+-------------------------------+
## |M| Payload len | Extended payload length |
## |A| (7) | (16/63) |
## |S| | (if payload len==126/127) |
## |K| | |
## +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
## | Extended payload length continued, if payload len == 127 |
## + - - - - - - - - - - - - - - - +-------------------------------+
if self.masking_key: mask_bit = 1 << 7
else: mask_bit = 0
length = self.payload_length
if length < 126:
header += pack('!B', (mask_bit | length))
elif length < (1 << 16):
header += pack('!B', (mask_bit | 126)) + pack('!H', length)
elif length < (1 << 63):
header += pack('!B', (mask_bit | 127)) + pack('!Q', length)
else:
raise FrameTooLargeException()
## + - - - - - - - - - - - - - - - +-------------------------------+
## | |Masking-key, if MASK set to 1 |
## +-------------------------------+-------------------------------+
## | Masking-key (continued) | Payload Data |
## +-------------------------------- - - - - - - - - - - - - - - - +
## : Payload Data continued ... :
## + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
## | Payload Data continued ... |
## +---------------------------------------------------------------+
body = self.body
if not self.masking_key:
return bytes(header + body)
return bytes(header + self.masking_key + self.mask(body))
def _parsing(self):
"""
Generator to parse bytes into a frame. Yields until
enough bytes have been read or an error is met.
"""
buf = b''
some_bytes = b''
# yield until we get the first header's byte
while not some_bytes:
some_bytes = (yield 1)
first_byte = some_bytes[0] if isinstance(some_bytes, bytearray) else ord(some_bytes[0])
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
self.fin = (first_byte >> 7) & 1
self.rsv1 = (first_byte >> 6) & 1
self.rsv2 = (first_byte >> 5) & 1
self.rsv3 = (first_byte >> 4) & 1
self.opcode = first_byte & 0xf
# frame-rsv1 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
if self.rsv1 or self.rsv2 or self.rsv3:
raise ProtocolException()
# control frames between 3 and 7 as well as above 0xA are currently reserved
if 2 < self.opcode < 8 or self.opcode > 0xA:
raise ProtocolException()
# control frames cannot be fragmented
if self.opcode > 0x7 and self.fin == 0:
raise ProtocolException()
# do we already have enough some_bytes to continue?
some_bytes = some_bytes[1:] if some_bytes and len(some_bytes) > 1 else b''
# Yield until we get the second header's byte
while not some_bytes:
some_bytes = (yield 1)
second_byte = some_bytes[0] if isinstance(some_bytes, bytearray) else ord(some_bytes[0])
mask = (second_byte >> 7) & 1
self.payload_length = second_byte & 0x7f
# All control frames MUST have a payload length of 125 some_bytes or less
if self.opcode > 0x7 and self.payload_length > 125:
raise FrameTooLargeException()
if some_bytes and len(some_bytes) > 1:
buf = some_bytes[1:]
some_bytes = buf
else:
buf = b''
some_bytes = b''
if self.payload_length == 127:
# This will compute the actual application data size
if len(buf) < 8:
nxt_buf_size = 8 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < 8:
b = (yield 8 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 8:
buf = some_bytes[8:]
some_bytes = some_bytes[:8]
else:
some_bytes = buf[:8]
buf = buf[8:]
extended_payload_length = some_bytes
self.payload_length = unpack(
'!Q', extended_payload_length)[0]
if self.payload_length > 0x7FFFFFFFFFFFFFFF:
raise FrameTooLargeException()
elif self.payload_length == 126:
if len(buf) < 2:
nxt_buf_size = 2 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < 2:
b = (yield 2 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 2:
buf = some_bytes[2:]
some_bytes = some_bytes[:2]
else:
some_bytes = buf[:2]
buf = buf[2:]
extended_payload_length = some_bytes
self.payload_length = unpack(
'!H', extended_payload_length)[0]
if mask:
if len(buf) < 4:
nxt_buf_size = 4 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while not some_bytes or len(some_bytes) < 4:
b = (yield 4 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 4:
buf = some_bytes[4:]
else:
some_bytes = buf[:4]
buf = buf[4:]
self.masking_key = some_bytes
if len(buf) < self.payload_length:
nxt_buf_size = self.payload_length - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < self.payload_length:
l = self.payload_length - len(some_bytes)
b = (yield l)
if b is not None:
some_bytes = some_bytes + b
else:
if self.payload_length == len(buf):
some_bytes = buf
else:
some_bytes = buf[:self.payload_length]
self.body = some_bytes
yield
def mask(self, data):
"""
Performs the masking or unmasking operation on data
using the simple masking algorithm:
..
j = i MOD 4
transformed-octet-i = original-octet-i XOR masking-key-octet-j
"""
masked = bytearray(data)
if py3k: key = self.masking_key
else: key = map(ord, self.masking_key)
for i in range(len(data)):
masked[i] = masked[i] ^ key[i%4]
return masked
unmask = mask
|
|
# coding: utf-8
"""
Standalone file utils.
Nothing in this module should have an knowledge of config or the layout
and structure of the site and pages in the site.
"""
from __future__ import unicode_literals
import logging
import markdown
import os
import pkg_resources
import shutil
import sys
import yaml
import fnmatch
from mkdocs import toc, exceptions
try: # pragma: no cover
from urllib.parse import urlparse, urlunparse, urljoin # noqa
from urllib.request import pathname2url # noqa
from collections import UserDict # noqa
except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse, urljoin # noqa
from urllib import pathname2url # noqa
from UserDict import UserDict # noqa
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
string_types = str, # noqa
text_type = str # noqa
else: # pragma: no cover
string_types = basestring, # noqa
text_type = unicode # noqa
log = logging.getLogger(__name__)
def yaml_load(source, loader=yaml.Loader):
"""
Wrap PyYaml's loader so we can extend it to suit our needs.
Load all strings as unicode.
http://stackoverflow.com/a/2967461/3609487
"""
def construct_yaml_str(self, node):
"""
Override the default string handling function to always return
unicode objects.
"""
return self.construct_scalar(node)
class Loader(loader):
"""
Define a custom loader derived from the global loader to leave the
global loader unaltered.
"""
# Attach our unicode constructor to our custom loader ensuring all strings
# will be unicode on translation.
Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
try:
return yaml.load(source, Loader)
finally:
# TODO: Remove this when external calls are properly cleaning up file
# objects. Some mkdocs internal calls, sometimes in test lib, will
# load configs with a file object but never close it. On some
# systems, if a delete action is performed on that file without Python
# closing that object, there will be an access error. This will
# process the file and close it as there should be no more use for the
# file once we process the yaml content.
if hasattr(source, 'close'):
source.close()
def reduce_list(data_set):
""" Reduce duplicate items in a list and preserve order """
seen = set()
return [item for item in data_set if
item not in seen and not seen.add(item)]
def copy_file(source_path, output_path):
"""
Copy source_path to output_path, making sure any parent directories exist.
"""
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copy(source_path, output_path)
def write_file(content, output_path):
"""
Write content to output_path, making sure any parent directories exist.
"""
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
open(output_path, 'wb').write(content)
def clean_directory(directory):
"""
Remove the content of a directory recursively but not the directory itself.
"""
if not os.path.exists(directory):
return
for entry in os.listdir(directory):
# Don't remove hidden files from the directory. We never copy files
# that are hidden, so we shouldn't delete them either.
if entry.startswith('.'):
continue
path = os.path.join(directory, entry)
if os.path.isdir(path):
shutil.rmtree(path, True)
else:
os.unlink(path)
def copy_media_files(from_dir, to_dir, exclude=None):
"""
Recursively copy all files except markdown and exclude[ed] files into another directory.
`exclude` accepts a list of Unix shell-style wildcards (`['*.py', '*.pyc']`).
Note that `exclude` only operates on file names, not directories.
"""
for (source_dir, dirnames, filenames) in os.walk(from_dir, followlinks=True):
relative_path = os.path.relpath(source_dir, from_dir)
output_dir = os.path.normpath(os.path.join(to_dir, relative_path))
# Filter file names using Unix pattern matching
# Always filter file names starting with a '.'
exclude_patterns = ['.*']
exclude_patterns.extend(exclude or [])
for pattern in exclude_patterns:
filenames = [f for f in filenames if not fnmatch.fnmatch(f, pattern)]
# Filter the dirnames that start with a '.' and update the list in
# place to prevent us walking these.
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in filenames:
if not is_markdown_file(filename):
source_path = os.path.join(source_dir, filename)
output_path = os.path.join(output_dir, filename)
copy_file(source_path, output_path)
def get_html_path(path):
"""
Map a source file path to an output html path.
Paths like 'index.md' will be converted to 'index.html'
Paths like 'about.md' will be converted to 'about/index.html'
Paths like 'api-guide/core.md' will be converted to 'api-guide/core/index.html'
"""
path = os.path.splitext(path)[0]
if os.path.basename(path) == 'index':
return path + '.html'
return "/".join((path, 'index.html'))
def get_url_path(path, use_directory_urls=True):
"""
Map a source file path to an output html path.
Paths like 'index.md' will be converted to '/'
Paths like 'about.md' will be converted to '/about/'
Paths like 'api-guide/core.md' will be converted to '/api-guide/core/'
If `use_directory_urls` is `False`, returned URLs will include the a trailing
`index.html` rather than just returning the directory path.
"""
path = get_html_path(path)
url = '/' + path.replace(os.path.sep, '/')
if use_directory_urls:
return url[:-len('index.html')]
return url
def is_homepage(path):
return os.path.splitext(path)[0] == 'index'
def is_markdown_file(path):
"""
Return True if the given file path is a Markdown file.
http://superuser.com/questions/249436/file-extension-for-markdown-files
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.markdown',
'.mdown',
'.mkdn',
'.mkd',
'.md',
]
def is_css_file(path):
"""
Return True if the given file path is a CSS file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.css',
]
def is_javascript_file(path):
"""
Return True if the given file path is a Javascript file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.js',
'.javascript'
]
def is_html_file(path):
"""
Return True if the given file path is an HTML file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.html',
'.htm',
]
def is_template_file(path):
"""
Return True if the given file path is an HTML file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.html',
'.htm',
'.xml',
]
def create_media_urls(nav, path_list):
"""
Return a list of URLs that have been processed correctly for inclusion in
a page.
"""
final_urls = []
for path in path_list:
# Allow links to fully qualified URL's
parsed = urlparse(path)
if parsed.netloc:
final_urls.append(path)
continue
# We must be looking at a local path.
url = path_to_url(path)
relative_url = '%s/%s' % (nav.url_context.make_relative('/'), url)
final_urls.append(relative_url)
return final_urls
def create_relative_media_url(nav, url):
"""
For a current page, create a relative url based on the given URL.
On index.md (which becomes /index.html):
image.png -> ./image.png
/image.png -> ./image.png
On sub/page.md (which becomes /sub/page/index.html):
image.png -> ../image.png
/image.png -> ../../image.png
On sub/index.md (which becomes /sub/index.html):
image.png -> ./image.png
/image.png -> ./image.png
"""
# Allow links to fully qualified URL's
parsed = urlparse(url)
if parsed.netloc:
return url
# If the URL we are looking at starts with a /, then it should be
# considered as absolute and will be 'relative' to the root.
if url.startswith('/'):
base = '/'
url = url[1:]
else:
base = nav.url_context.base_path
relative_base = nav.url_context.make_relative(base)
if relative_base == "." and url.startswith("./"):
relative_url = url
else:
relative_url = '%s/%s' % (relative_base, url)
# TODO: Fix this, this is a hack. Relative urls are not being calculated
# correctly for images in the same directory as the markdown. I think this
# is due to us moving it into a directory with index.html, but I'm not sure
if (nav.file_context.current_file.endswith("/index.md") is False and
nav.url_context.base_path != '/' and
relative_url.startswith("./")):
relative_url = ".%s" % relative_url
return relative_url
def path_to_url(path):
"""Convert a system path to a URL."""
if os.path.sep == '/':
return path
if sys.version_info < (3, 0):
path = path.encode('utf8')
return pathname2url(path)
def convert_markdown(markdown_source, extensions=None, extension_configs=None):
"""
Convert the Markdown source file to HTML content, and additionally
return the parsed table of contents, and a dictionary of any metadata
that was specified in the Markdown file.
`extensions` is an optional sequence of Python Markdown extensions to add
to the default set.
"""
md = markdown.Markdown(
extensions=extensions or [],
extension_configs=extension_configs or {}
)
html_content = md.convert(markdown_source)
# On completely blank markdown files, no Meta or tox properties are added
# to the generated document.
meta = getattr(md, 'Meta', {})
toc_html = getattr(md, 'toc', '')
# Post process the generated table of contents into a data structure
table_of_contents = toc.TableOfContents(toc_html)
return (html_content, table_of_contents, meta)
def get_themes():
"""Return a dict of theme names and their locations"""
themes = {}
builtins = pkg_resources.get_entry_map(dist='mkdocs', group='mkdocs.themes')
for theme in pkg_resources.iter_entry_points(group='mkdocs.themes'):
if theme.name in builtins and theme.dist.key != 'mkdocs':
raise exceptions.ConfigurationError(
"The theme {0} is a builtin theme but {1} provides a theme "
"with the same name".format(theme.name, theme.dist.key))
elif theme.name in themes:
multiple_packages = [themes[theme.name].dist.key, theme.dist.key]
log.warning("The theme %s is provided by the Python packages "
"'%s'. The one in %s will be used.",
theme.name, ','.join(multiple_packages), theme.dist.key)
themes[theme.name] = theme
themes = dict((name, os.path.dirname(os.path.abspath(theme.load().__file__)))
for name, theme in themes.items())
return themes
def get_theme_names():
"""Return a list containing all the names of all the builtin themes."""
return get_themes().keys()
def filename_to_title(filename):
title = os.path.splitext(filename)[0]
title = title.replace('-', ' ').replace('_', ' ')
# Capitalize if the filename was all lowercase, otherwise leave it as-is.
if title.lower() == title:
title = title.capitalize()
return title
def dirname_to_title(dirname):
title = dirname
title = title.replace('-', ' ').replace('_', ' ')
# Capitalize if the dirname was all lowercase, otherwise leave it as-is.
if title.lower() == title:
title = title.capitalize()
return title
def find_or_create_node(branch, key):
"""
Given a list, look for dictionary with a key matching key and return it's
value. If it doesn't exist, create it with the value of an empty list and
return that.
"""
for node in branch:
if not isinstance(node, dict):
continue
if key in node:
return node[key]
new_branch = []
node = {key: new_branch}
branch.append(node)
return new_branch
def nest_paths(paths):
"""
Given a list of paths, convert them into a nested structure that will match
the pages config.
"""
nested = []
for path in paths:
if os.path.sep not in path:
nested.append(path)
continue
directory, _ = os.path.split(path)
parts = directory.split(os.path.sep)
branch = nested
for part in parts:
part = dirname_to_title(part)
branch = find_or_create_node(branch, part)
branch.append(path)
return nested
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 6
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-checkblocks=5"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-checkblocks=5"],
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
["-prune=550"]]
def setup_network(self):
self.setup_nodes()
self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/"
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight))
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir))
self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, timewait=900)
assert_equal(node.getblockcount(), 995)
assert_raises_jsonrpc(-1, "not in prune mode", node.pruneblockchain, 500)
self.stop_node(node_number)
# now re-start in manual pruning mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-prune=1"], timewait=900)
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_jsonrpc(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_jsonrpc(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-prune=550"], timewait=900)
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
start_node(2, self.options.tmpdir, ["-prune=550"])
self.log.info("Success")
# check that wallet loads loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan
start_node(5, self.options.tmpdir, ["-prune=550"])
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
self.log.info("Mining a big blockchain of 995 blocks")
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
|
|
'''
Created on Feb 15, 2013
@author: eric
'''
from collections import namedtuple
import random
from osgeo import gdal, ogr
BoundingBox = namedtuple('BoundingBox', ['min_x', 'min_y','max_x', 'max_y'])
def extents(database, table_name, where=None, lat_col='_db_lat', lon_col='_db_lon'):
'''Return the bounding box for a table in the database. The partition must specify
a table
'''
# Find the extents of the data and figure out the offsets for the array.
e= database.connection.execute
if where:
where = "WHERE "+where
else:
where = ''
r = e("""SELECT min({lon}) as min_x, min({lat}) as min_y,
max({lon}) as max_x, max({lat}) as max_y from {table} {where}"""
.format(lat=lat_col, lon=lon_col, table=table_name, where=where)
).first()
# Convert to a regular tuple
o = BoundingBox(r[0], r[1],r[2],r[3])
return o
#From http://danieljlewis.org/files/2010/06/Jenks.pdf
#
# !!!! Use pysal instead!
# !!!! http://pysal.geodacenter.org/1.2/library/esda/mapclassify.html#pysal.esda.mapclassify.Natural_Breaks
#
# Or, a cleaner Python implementation: https://gist.github.com/drewda/1299198
def jenks_breaks(dataList, numClass):
print "A"
mat1 = []
for i in range(0, len(dataList) + 1):
temp = []
for j in range(0, numClass + 1):
temp.append(0)
mat1.append(temp)
print "B"
mat2 = []
for i in range(0, len(dataList) + 1):
temp = []
for j in range(0, numClass + 1):
temp.append(0)
mat2.append(temp)
print "C"
for i in range(1, numClass + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(dataList) + 1):
mat2[j][i] = float('inf')
print "D"
v = 0.0
# # iterations = datalist * .5*datalist * Numclass
for l in range(2, len(dataList) + 1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1, l + 1):
i3 = l - m + 1
val = float(dataList[i3 - 1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, numClass + 1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(dataList)
kclass = []
print "E"
for i in range(0, numClass + 1):
kclass.append(0)
kclass[numClass] = float(dataList[len(dataList) - 1])
countNum = numClass
print 'F'
while countNum >= 2:
#print "rank = " + str(mat1[k][countNum])
id_ = int((mat1[k][countNum]) - 2)
#print "val = " + str(dataList[id])
kclass[countNum - 1] = dataList[id_]
k = int((mat1[k][countNum] - 1))
countNum -= 1
return kclass
def getGVF( dataList, numClass ):
""" The Goodness of Variance Fit (GVF) is found by taking the
difference between the squared deviations from the array mean (SDAM)
and the squared deviations from the class means (SDCM), and dividing by the SDAM
"""
breaks = jenks_breaks(dataList, numClass)
dataList.sort()
listMean = sum(dataList)/len(dataList)
print listMean
SDAM = 0.0
for i in range(0,len(dataList)):
sqDev = (dataList[i] - listMean)**2
SDAM += sqDev
SDCM = 0.0
for i in range(0,numClass):
if breaks[i] == 0:
classStart = 0
else:
classStart = dataList.index(breaks[i])
classStart += 1
classEnd = dataList.index(breaks[i+1])
classList = dataList[classStart:classEnd+1]
classMean = sum(classList)/len(classList)
print classMean
preSDCM = 0.0
for j in range(0,len(classList)):
sqDev2 = (classList[j] - classMean)**2
preSDCM += sqDev2
SDCM += preSDCM
return (SDAM - SDCM)/SDAM
def rasterize(pixel_size=25):
# Open the data source
RASTERIZE_COLOR_FIELD = "__color__"
orig_data_source = ogr.Open("test.shp")
# Make a copy of the layer's data source because we'll need to
# modify its attributes table
source_ds = ogr.GetDriverByName("Memory").CopyDataSource(orig_data_source, "")
source_layer = source_ds.GetLayer(0)
source_srs = source_layer.GetSpatialRef()
x_min, x_max, y_min, y_max = source_layer.GetExtent()
# Create a field in the source layer to hold the features colors
field_def = ogr.FieldDefn(RASTERIZE_COLOR_FIELD, ogr.OFTReal)
source_layer.CreateField(field_def)
source_layer_def = source_layer.GetLayerDefn()
field_index = source_layer_def.GetFieldIndex(RASTERIZE_COLOR_FIELD)
# Generate random values for the color field (it's here that the value
# of the attribute should be used, but you get the idea)
for feature in source_layer:
feature.SetField(field_index, random.randint(0, 255))
source_layer.SetFeature(feature)
# Create the destination data source
x_res = int((x_max - x_min) / pixel_size)
y_res = int((y_max - y_min) / pixel_size)
target_ds = gdal.GetDriverByName('GTiff').Create('test.tif', x_res,
y_res, 3, gdal.GDT_Byte)
target_ds.SetGeoTransform(( x_min, pixel_size, 0, y_max, 0, -pixel_size,))
if source_srs:
# Make the target raster have the same projection as the source
target_ds.SetProjection(source_srs.ExportToWkt())
else:
# Source has no projection (needs GDAL >= 1.7.0 to work)
target_ds.SetProjection('LOCAL_CS["arbitrary"]')
# Rasterize
err = gdal.RasterizeLayer(target_ds, (3, 2, 1), source_layer,
burn_values=(0, 0, 0),
options=["ATTRIBUTE=%s" % RASTERIZE_COLOR_FIELD])
if err != 0:
raise Exception("error rasterizing layer: %s" % err)
def create_poly( points, srs):
"""Create a polygon from a list of points"""
#create polygon object:
ring = ogr.Geometry(type=ogr.wkbLinearRing)
for x,y in points:
ring.AddPoint(x, y)#LowerLeft
# Close
ring.AddPoint(points[0][0], points[0][1])
poly = ogr.Geometry(type=ogr.wkbPolygon)
poly.AssignSpatialReference(srs)
poly.AddGeometry(ring)
return poly
def create_bb( corners, srs):
"""Create a boundingbox from a list or tuple of the four corners
Corners has four values: x_min, x_max, y_min, y_max
The input can be taken directory from Feature.GetEnvelope()
"""
c = corners
return create_poly(((c[0], c[2]),
(c[0], c[3]),
(c[1], c[3]),
(c[1], c[2]),
), srs)
def combine_envelopes( geos, use_bb=True, use_distance=False):
"""Find geometries that intersect"""
loops = 0
while True:
i, new_geos = _combine_envelopes(geos, use_bb, use_distance)
old = len(geos)
geos = None
geos = [g.Clone() for g in new_geos]
loops += 1
print "{}) {} reductions. {} old, {} new".format(loops, i, old, len(geos))
if old == len(geos):
break
return geos
def _combine_envelopes(geometries, use_bb = True, use_distance=False):
"""Inner support function for combine_envelopes"""
import ambry.geo as dg
reductions = 0
new_geometries = []
accum = None
reduced = set()
for i1 in range(len(geometries)):
if i1 in reduced:
continue
g1 = geometries[i1]
for i2 in range(i1+1, len(geometries)):
if i2 in reduced:
continue
g2 = geometries[i2]
intersects = False
if (g1.Intersects(g2) or g1.Contains(g2) or g2.Contains(g1) or g1.Touches(g2)):
intersects = True
# If the final output is to onvert the reduced geometries to bounding boxes, it
# can have BBs that intersect that were not reduced, because the underlying geometries
# didn't intersect
if use_bb and not intersects:
bb1 = dg.create_bb(g1.GetEnvelope(), g1.GetSpatialReference())
bb2 = dg.create_bb(g2.GetEnvelope(), g2.GetSpatialReference())
if bb1.Intersects(bb2):
intersects = True
if use_distance and not intersects:
if use_bb:
if bb1.Distance(bb2) < use_distance:
intersects = True
else:
if g1.Distance(g2) < use_distance:
intersects = True
if intersects:
reductions += 1
reduced.add(i2)
if not accum:
accum = g1.Union(g2)
else:
accum = accum.Union(g2)
if accum is not None:
new_geometries.append(accum.Clone())
accum = None
else:
new_geometries.append(g1.Clone())
return reductions, new_geometries
def bound_clusters_in_raster( a, aa, shape_file_dir,
contour_interval,contour_value, use_bb=True, use_distance=False):
"""Create a shapefile that contains contours and bounding boxes for clusters
of contours.
:param a: A numpy array that contains the data inwhich to find clusters
:type a: Numpy array
:param aa: The analysis object that sets the coordinate system for the area that contains the array
:type aa: ambry.geo.AnalysisArea
:param shape_file_dir: The path to a directory where generated files will be stored.
:type shape_file_dir: string
:param contour_interval: The difference between successive contour intervals.
:type contour_interval: float
:param contour_value:
:type contour_value: float
:param use_bb: If True, compute nearness and intersection using the contours bounding boxes, not the geometry
:type use_bb: bool
:param use_distance: If not False, consider contours that are closer than this value to be overlapping.
:type : number
:rtype: Returns a list of dictionaries, one for each of the combined bounding boxes
This method will store, in the `shape_file_dir` directory:
* a GeoTIFF representation of the array `a`
* An ERSI shapefile layer named `countours`, holding all of the countours.
* A layer named `contour_bounds` with the bounding boxes for all of the contours with value `contour_value`
* A layer named `combined_bounds` with bounding boxes of intersecting and nearby boxes rom `contour_bounds`
The routine will iteratively combine contours that overlap.
If `use_distance` is set to a number, and contours that are closer than this value will be joined.
If `use_bb` is set, the intersection and distance computations use the bounding boxes of the contours,
not the contours themselves.
"""
import ambry.geo as dg
from osgeo.gdalconst import GDT_Float32
import ambry.util as util
from osgeo import gdal
import ogr, os
import numpy as np
if os.path.exists(shape_file_dir):
util.rm_rf(shape_file_dir)
os.makedirs(shape_file_dir)
rasterf = os.path.join(shape_file_dir,'contour.tiff')
ogr_ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(shape_file_dir)
# Setup the countour layer.
ogr_lyr = ogr_ds.CreateLayer('contours', aa.srs)
ogr_lyr.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))
ogr_lyr.CreateField(ogr.FieldDefn('value', ogr.OFTReal))
# Create the contours from the GeoTIFF file.
ds = aa.get_geotiff(rasterf, data_type=GDT_Float32)
ds.GetRasterBand(1).SetNoDataValue(0)
ds.GetRasterBand(1).WriteArray(np.flipud(a))
gdal.ContourGenerate(ds.GetRasterBand(1),
contour_interval, # contourInterval
0, # contourBase
[], # fixedLevelCount
0, # useNoData
0, # noDataValue
ogr_lyr, #destination layer
0, #idField
1 # elevation field
)
# Get buffered bounding boxes around each of the hotspots,
# and put them into a new layer.
bound_lyr = ogr_ds.CreateLayer('contour_bounds', aa.srs)
for i in range(ogr_lyr.GetFeatureCount()):
f1 = ogr_lyr.GetFeature(i)
if f1.GetFieldAsDouble('value') != contour_value:
continue
g1 = f1.GetGeometryRef()
bb = dg.create_bb(g1.GetEnvelope(), g1.GetSpatialReference())
f = ogr.Feature(bound_lyr.GetLayerDefn())
f.SetGeometry(bb)
bound_lyr.CreateFeature(f)
# Doing a full loop instead of a list comprehension b/c the way that comprehensions
# compose arrays results in segfaults, probably because a copied geometry
# object is being released before being used.
geos = []
for i in range(bound_lyr.GetFeatureCount()):
f = bound_lyr.GetFeature(i)
g = f.geometry()
geos.append(g.Clone())
# Combine hot spots that have intersecting bounding boxes, to get larger
# areas that cover all of the adjacent intersecting smaller areas.
geos = dg.combine_envelopes(geos, use_bb=use_bb, use_distance = use_distance)
# Write out the combined bounds areas.
lyr = ogr_ds.CreateLayer('combined_bounds', aa.srs)
lyr.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('area', ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn('name', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('code', ogr.OFTString))
envelopes = []
id = 1
for env in geos:
f = ogr.Feature(lyr.GetLayerDefn())
bb = dg.create_bb(env.GetEnvelope(), env.GetSpatialReference())
f.SetGeometry(bb)
f.SetField(0, id)
f.SetField(1, bb.Area())
f.SetField(2, None)
f.SetField(3, None)
id += 1
lyr.CreateFeature(f)
envelopes.append({'id':id, 'env':bb.GetEnvelope(), 'area':bb.Area()})
return envelopes
def get_shapefile_geometry_types(shape_file):
type_map = {'1':'GEOMETRY',
'2':'GEOMETRYCOLLECTION',
'3':'POINT',
'Point':'POINT',
'4':'MULTIPOINT',
'5':'POLYGON',
'6':'MULTIPOLYGON',
'7':'LINESTRING',
'Line String':'LINESTRING',
'3D Line String':'LINESTRING',
'8':'MULTILINESTRING',
'3D Multi Line String':'MULTILINESTRING',
'Multi Line String':'MULTILINESTRING',
'3D Point':'POINT',
'3D Multi Point':'MULTIPOINT',
'Polygon':'POLYGON',
'3D Polygon':'POLYGON',
'Multi Polygon':'MULTIPOLYGON',
'3D Multi Polygon':'MULTIPOLYGON',
}
shapes = ogr.Open(shape_file)
layer = shapes.GetLayer(0)
types = set()
type_ = None
limit = 20000
count = layer.GetFeatureCount()
if count > limit:
skip = layer.GetFeatureCount() / limit
else:
skip = 1
checked = 0
for i in range(0,layer.GetFeatureCount(),skip):
feature = layer.GetFeature(i)
types.add(type_map[ogr.GeometryTypeToName(feature.geometry().GetGeometryType())])
checked += 1
if len(types) == 1:
type_ = list(types).pop()
elif len(types) == 2:
if set(('POLYGON','MULTIPOLYGON')) & types == set(('POLYGON','MULTIPOLYGON')):
type_ = 'MULTIPOLYGON'
elif set(('POINT', 'MULTIPOINT')) & types == set(('POINT', 'MULTIPOINT')):
type_ = 'MULTIPOINT'
elif set(('LINESTRING', 'MULTILINESTRING')) & types == set(('LINESTRING', 'MULTILINESTRING')):
type_ = 'MULTILINESTRING'
else:
raise Exception("Didn't get valid combination of types: "+str(types))
else:
raise Exception("Can't deal with files that have three more type_ different geometry types, or less than one: "+str(types))
return types, type_
def segment_points(areas,table_name=None, query_template=None, places_query=None, bb_clause=None, bb_type='ll'):
"""A generator that yields information that can be used to classify
points into areas
:param areas: A `Bundle`or `partition` object with access to the places database
:param query: A Query to return places. Must return, for each row, fields names 'id' ,'name'
and 'wkt'
:param bb_type: Either 'll' to use lon/lat for the bounding box query, or 'xy' to use x/y for the query
:rtype: a `LibraryDb` object
The 'wkt' field returned by the query is the Well Know Text representation of the area
geometry
"""
import osr
dest_srs = ogr.osr.SpatialReference()
dest_srs.ImportFromEPSG(4326)
source_srs = areas.get_srs()
transform = osr.CoordinateTransformation(source_srs, dest_srs)
if query_template is None:
# Took the 'empty_clause' out because it is really slow if there is no index.
empty_clause = "AND ({target_col} IS NULL OR {target_col} = 'NONE' OR {target_col} = '-')"
query_template = "SELECT * FROM {table_name} WHERE {bb_clause} "
if places_query is None:
places_query = "SELECT *, AsText(geometry) AS wkt FROM {} ORDER BY area ASC".format(areas.identity.table)
if bb_clause is None:
if bb_type == 'll':
bb_clause = "lon BETWEEN {x1} AND {x2} AND lat BETWEEN {y1} and {y2}"
elif bb_type == 'xy':
bb_clause = "x BETWEEN {x1} AND {x2} AND y BETWEEN {y1} and {y2}"
else:
raise ValueError("Must use 'll' or 'xy' for bb_type. got: {}".format(bb_type))
for area in areas.query(places_query):
g = ogr.CreateGeometryFromWkt(area['wkt'])
g.Transform(transform)
e = g.GetEnvelope()
bb = bb_clause.format(x1=e[0], x2=e[1], y1=e[2], y2=e[3])
query = query_template.format(bb_clause=bb, table_name = table_name, target_col=area['type'])
def is_in(x, y):
"""Clients call this closure to make the determination if the
point is in the area"""
p = ogr.Geometry(ogr.wkbPoint)
p.SetPoint_2D(0, x, y)
if g.Contains(p):
return True
else:
return False
area = dict(area)
yield area, query, is_in
|
|
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
from __future__ import absolute_import, division, print_function
import itertools
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
import pytest
import py
import sys
import time
import platform
import _pytest._pluggy as pluggy
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-q', '--quiet', action="count",
dest="quiet", default=0, help="decrease verbosity."),
group._addoption('-r',
action="store", dest="reportchars", default='', metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed, "
"(p)passed, (P)passed with output, (a)all except pP. "
"Warnings are displayed at all times except when "
"--disable-warnings is set")
group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False,
dest='disable_warnings', action='store_true',
help='disable warnings summary')
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='auto',
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
help="traceback print mode (auto/long/short/line/native/no).")
group._addoption('--fulltrace', '--full-trace',
action="store_true", default=False,
help="don't cut any tracebacks (default is to cut).")
group._addoption('--color', metavar="color",
action="store", dest="color", default='auto',
choices=['yes', 'no', 'auto'],
help="color terminal output (yes/no/auto).")
def pytest_configure(config):
config.option.verbose -= config.option.quiet
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
reportchars = config.option.reportchars
if not config.option.disable_warnings and 'w' not in reportchars:
reportchars += 'w'
elif config.option.disable_warnings and 'w' in reportchars:
reportchars = reportchars.replace('w', '')
if reportchars:
for char in reportchars:
if char not in reportopts and char != 'a':
reportopts += char
elif char == 'a':
reportopts = 'fEsxXw'
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
class WarningReport:
"""
Simple structure to hold warnings information captured by ``pytest_logwarning``.
"""
def __init__(self, code, message, nodeid=None, fslocation=None):
"""
:param code: unused
:param str message: user friendly message about the warning
:param str|None nodeid: node id that generated the warning (see ``get_location``).
:param tuple|py.path.local fslocation:
file system location of the source of the warning (see ``get_location``).
"""
self.code = code
self.message = message
self.nodeid = nodeid
self.fslocation = fslocation
def get_location(self, config):
"""
Returns the more user-friendly information about the location
of a warning, or None.
"""
if self.nodeid:
return self.nodeid
if self.fslocation:
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
filename, linenum = self.fslocation[:2]
relpath = py.path.local(filename).relto(config.invocation_dir)
return '%s:%s' % (relpath, linenum)
else:
return str(self.fslocation)
return None
class TerminalReporter:
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = self.writer = _pytest.config.create_terminal_writer(config,
file)
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
def hasopt(self, char):
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath != self.currentfspath:
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not py.builtin._istext(line):
line = py.builtin.text(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
line = str(line)
self._tw.write("\r" + line, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in py.builtin.text(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
warning = WarningReport(code=code, fslocation=fslocation,
message=message, nodeid=nodeid)
warnings.append(warning)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault('deselected', []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
cat, letter, word = res
self.stats.setdefault(cat, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
if self.verbosity <= 0:
if not hasattr(rep, 'node') and self.showfspath:
self.write_fspath_result(rep.nodeid, letter)
else:
self._tw.write(letter)
else:
if isinstance(word, tuple):
word, markup = word
else:
if rep.passed:
markup = {'green':True}
elif rep.failed:
markup = {'red':True}
elif rep.skipped:
markup = {'yellow':True}
line = self._locationline(rep.nodeid, *rep.location)
if not hasattr(rep, 'node'):
self.write_ensure_prefix(line, word, **markup)
#self._tw.write(word, **markup)
else:
self.ensure_newline()
if hasattr(rep, 'node'):
self._tw.write("[%s] " % rep.node.gateway.id)
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_collection(self):
if not self.isatty and self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
#self.write_fspath_result(report.nodeid, 'E')
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get('error', []))
skipped = len(self.stats.get('skipped', []))
if final:
line = "collected "
else:
line = "collecting "
line += str(self._numcollected) + " items"
if errors:
line += " / %d errors" % errors
if skipped:
line += " / %d skipped" % skipped
if self.isatty:
if final:
line += " \n"
self.rewrite(line, bold=True)
else:
self.write_line(line)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, 'pypy_version_info'):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__, py.__version__, pluggy.__version__)
if self.verbosity > 0 or self.config.option.debug or \
getattr(self.config.option, 'pastebin', None):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir)
lines.reverse()
for line in flatten(lines):
self.write_line(line)
def pytest_report_header(self, config):
inifile = ""
if config.inifile:
inifile = " " + config.rootdir.bestrelpath(config.inifile)
lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append(
"plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return lines
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get('failed'):
self._tw.sep("!", "collection failures")
for rep in self.stats.get('failed'):
rep.toterminal(self._tw)
return 1
return 0
if not self.showheader:
return
#for i, testarg in enumerate(self.config.args):
# self.write_line("test path %d: %s" %(i+1, testarg))
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split('::', 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[:len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack):]:
stack.append(col)
#if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED)
if exitstatus in summary_exit_codes:
self.config.hook.pytest_terminal_summary(terminalreporter=self,
exitstatus=exitstatus)
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_deselected()
self.summary_stats()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, '_keyboardinterrupt_memo'):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
excrepr.reprcrash.toterminal(self._tw)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[:-len(domain)]
l = domain.split("[")
l[0] = l[0].replace('.', '::') # don't replace '.' in params
line += "[".join(l)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid).replace("::()", "") # parens-normalization
if nodeid.split("::")[0] != fspath.replace("\\", "/"):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if hasattr(rep, 'location'):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
l = []
for x in self.stats.get(name, []):
if not hasattr(x, '_pdbshown'):
l.append(x)
return l
def summary_warnings(self):
if self.hasopt("w"):
all_warnings = self.stats.get("warnings")
if not all_warnings:
return
grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config))
self.write_sep("=", "warnings summary", yellow=True, bold=False)
for location, warnings in grouped:
self._tw.line(str(location) or '<undetermined location>')
for w in warnings:
lines = w.message.splitlines()
indented = '\n'.join(' ' + x for x in lines)
self._tw.line(indented)
self._tw.line()
self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html')
def summary_passes(self):
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports('passed')
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def print_teardown_sections(self, rep):
for secname, content in rep.sections:
if 'teardown' in secname:
self._tw.sep('-', secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('failed')
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
markup = {'red': True, 'bold': True}
self.write_sep("_", msg, **markup)
self._outrep_summary(rep)
for report in self.getreports(''):
if report.nodeid == rep.nodeid and report.when == 'teardown':
self.print_teardown_sections(report)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('error')
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats['error']:
msg = self._getfailureheadline(rep)
if not hasattr(rep, 'when'):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
for secname, content in rep.sections:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {color: True, 'bold': True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def summary_deselected(self):
if 'deselected' in self.stats:
self.write_sep("=", "%d tests deselected" % (
len(self.stats['deselected'])), bold=True)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def flatten(l):
for x in l:
if isinstance(x, (list, tuple)):
for y in flatten(x):
yield y
else:
yield x
def build_summary_stats_line(stats):
keys = ("failed passed skipped deselected "
"xfailed xpassed warnings error").split()
unknown_key_seen = False
for key in stats.keys():
if key not in keys:
if key: # setup/teardown reports have an empty key, ignore them
keys.append(key)
unknown_key_seen = True
parts = []
for key in keys:
val = stats.get(key, None)
if val:
parts.append("%d %s" % (len(val), key))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
if 'failed' in stats or 'error' in stats:
color = 'red'
elif 'warnings' in stats or unknown_key_seen:
color = 'yellow'
elif 'passed' in stats:
color = 'green'
else:
color = 'yellow'
return (line, color)
def _plugin_nameversions(plugininfo):
l = []
for plugin, dist in plugininfo:
# gets us name and version!
name = '{dist.project_name}-{dist.version}'.format(dist=dist)
# questionable convenience, but it keeps things short
if name.startswith("pytest-"):
name = name[7:]
# we decided to print python package names
# they can have more than one plugin
if name not in l:
l.append(name)
return l
|
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import *
from models import *
from forms import SpojUserForm, CodeGroupForm
from django.core.mail import send_mail
from django.utils import simplejson as json
from django.views.decorators.csrf import csrf_exempt
class Story(object):
def __init__(self, user, submission=None):
self.user = user
self.submissions = []
if submission:
self.submissions.append(submission)
self.count = len(self.submissions)
def duration(self):
if len(self.submissions) == 1:
return self.submissions[0].timestamp.strftime('%d %b %Y')
else:
start = self.submissions[-1].timestamp.strftime('%d %b %Y')
end = self.submissions[0].timestamp.strftime('%d %b %Y')
return start + ' - ' + end
def format_feed(feed):
story = []
for item in feed:
if story:
if item.user == story[-1].user:
story[-1].submissions.append(item)
story[-1].count = len(story[-1].submissions)
else:
story.append(Story(user=item.user, submission=item))
else:
story.append(Story(user=item.user, submission=item))
return story
def index(request, template_name='index.html'):
if request.user.is_authenticated():
return HttpResponseRedirect('/spoj')
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
def update_jobs(request):
belongs_to = GroupMember.objects.filter(user_email=request.user.email,
user=None)
for group in belongs_to:
group.user = request.user
group.receive_emails = True
group.save()
belongs_to = GroupMember.objects.filter(user=request.user)
if not belongs_to:
group = CodeGroup.objects.create(name='My group', notifications=1)
GroupMember.objects.create(user_email=request.user.email,
user=request.user, group=group, is_owner=True, receive_emails=True)
@login_required
def spoj(request, template_name='spoj.html'):
update_jobs(request)
groups = [x.group for x in GroupMember.objects.filter(user=request.user)]
users = [request.user]
users += [x.user for x in GroupMember.objects.filter(group__in=groups)]
feed = Submission.objects.filter(user__in=users).order_by('-timestamp')[:300]
feed = format_feed(feed)
suggested_problems = ProblemSuggestion.objects.filter(user=request.user)
solved_by_me = SpojProblem.objects.filter(submission__user=request.user)
friend_suggestions = UserSuggestion.objects.filter(group__in=groups)
friend_suggestions = friend_suggestions.exclude(problem__in=solved_by_me)
todo = suggested_problems.exclude(problem__in=solved_by_me)
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
def config(request, template_name='settings.html'):
user, created = SpojUser.objects.get_or_create(user=request.user)
form = SpojUserForm(request.POST or None, instance=user)
if form.is_valid():
form.save()
user.fetch_spoj_data()
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
def create_group(request):
group = CodeGroup.objects.create(name=request.POST['group'])
GroupMember.objects.create(user_email=request.user.email,
user=request.user, group=group, is_owner=True, receive_emails=True)
return HttpResponseRedirect("/group/%d/" % (group.id))
def user_belongs_to_group(user, group_members):
for member in group_members:
if member.user == user:
return True
return False
@login_required
def view_group(request, id, template_name="group.html"):
try:
group = CodeGroup.objects.get(id=id)
group_members = GroupMember.objects.filter(group=group)
group_members = group_members.order_by('user__spojuser__rank')
if request.user.is_superuser:
pass
elif not user_belongs_to_group(request.user, group_members):
return HttpResponseRedirect("/")
except:
return HttpResponseRedirect("/")
groups = [x.group for x in GroupMember.objects.filter(user=request.user)]
group_users = []
for member in group_members:
if member.is_owner and member.user == request.user:
is_owner = True
group_users.append(member.user)
feed = Submission.objects.filter(user__in=group_users).order_by(
'-timestamp')[:300]
feed = format_feed(feed)
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
def get_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None
def validateEmail(email):
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
validate_email(email)
return True
except ValidationError:
return False
@login_required
def view_group_members(request, id, template_name="group_members.html"):
try:
group = CodeGroup.objects.get(id=id)
group_members = GroupMember.objects.filter(group=group)
if not user_belongs_to_group(request.user, group_members):
return HttpResponseRedirect("/")
current_user = GroupMember.objects.get(user=request.user, group=group)
if not current_user.is_owner:
return HttpResponseRedirect("/")
if request.POST:
email = request.POST['email']
if validateEmail(email):
user = get_or_none(User, email=email)
g = GroupMember.objects.create(user_email=email,
user=user, group=group)
group_members = GroupMember.objects.filter(group=group)
if not user:
try:
subject = 'I just added you to my SpojBot Group!'
content = 'Check this out.. This site emails one problem everyday to all members of the group. http://www.spojbot.com '
send_mail(subject, content, '%s <[email protected]>' % (request.user.get_full_name()), [email], fail_silently=False)
except:
pass
else:
g.receive_emails = True
g.save()
form = CodeGroupForm(request.POST or None, instance=group)
if form.is_valid():
form.save()
except:
return HttpResponseRedirect("/")
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
def delete_member(request, id, template_name="delete_member.html"):
member = GroupMember.objects.get(id=id)
group = member.group
current_user = GroupMember.objects.get(user=request.user,
group=group)
if not current_user.is_owner:
return HttpResponseRedirect("/")
if request.POST:
member.delete()
return HttpResponseRedirect("/group/%d/" % group.id)
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
@csrf_exempt
def delete_group(request):
response = {'status': 'Error'}
group = CodeGroup.objects.get(id=request.POST['id'])
current_user = GroupMember.objects.get(user=request.user, group=group)
if current_user.is_owner:
group.delete()
response['status'] = 'OK'
return HttpResponse(json.dumps(response))
return HttpResponse(json.dumps(response))
@login_required
@csrf_exempt
def leave_group(request):
response = {'status': 'Error'}
try:
group = CodeGroup.objects.get(id=request.POST['id'])
current_user = GroupMember.objects.get(user=request.user, group=group)
current_user.delete()
response['status'] = 'OK'
except:
pass
return HttpResponse(json.dumps(response))
@login_required
def suggest_problem(request):
response = {'status': 'Error'}
try:
group = CodeGroup.objects.get(id=request.GET.get('id'))
current_user = GroupMember.objects.get(user=request.user, group=group)
if current_user:
# belongs to this group
problem = request.GET.get('problem')
if '/' in problem:
return HttpResponse(json.dumps(response))
problem, created = SpojProblem.objects.get_or_create(
problem=problem)
if not created:
problem.source = 'user_suggestion'
problem.save()
try:
UserSuggestion.objects.get(group=group, problem=problem)
except:
UserSuggestion.objects.get_or_create(group=group,
problem=problem, user=request.user)
response['status'] = 'OK'
return HttpResponse(json.dumps(response))
except:
pass
return HttpResponse(json.dumps(response))
|
|
import collections
import datetime
import enum
import json
import math
import time
import warnings
import zlib
from email.utils import parsedate
from http.cookies import SimpleCookie
from multidict import CIMultiDict, CIMultiDictProxy
from . import hdrs, payload
from .helpers import HeadersMixin, rfc822_formatted_time, sentinel
from .http import RESPONSES, SERVER_SOFTWARE, HttpVersion10, HttpVersion11
__all__ = ('ContentCoding', 'StreamResponse', 'Response', 'json_response')
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = 'deflate'
gzip = 'gzip'
identity = 'identity'
############################################################
# HTTP Response classes
############################################################
class StreamResponse(collections.MutableMapping, HeadersMixin):
_length_check = True
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._compression = False
self._compression_force = False
self._cookies = SimpleCookie()
self._req = None
self._payload_writer = None
self._eof_sent = False
self._body_length = 0
self._state = {}
if headers is not None:
self._headers = CIMultiDict(headers)
else:
self._headers = CIMultiDict()
self.set_status(status, reason)
@property
def prepared(self):
return self._payload_writer is not None
@property
def task(self):
return getattr(self._req, 'task', None)
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None, _RESPONSES=RESPONSES):
assert not self.prepared, \
'Cannot change the response status code after ' \
'the headers have been sent'
self._status = int(status)
if reason is None:
try:
reason = _RESPONSES[self._status][0]
except Exception:
reason = ''
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
@property
def body_length(self):
return self._body_length
@property
def output_length(self):
warnings.warn('output_length is deprecated', DeprecationWarning)
return self._payload_writer.buffer_size
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
if hdrs.CONTENT_LENGTH in self._headers:
raise RuntimeError("You can't enable chunked encoding when "
"a content length is set")
if chunk_size is not None:
warnings.warn('Chunk size is deprecated #1615', DeprecationWarning)
def enable_compression(self, force=None):
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
elif force is not None:
assert isinstance(force, ContentCoding), ("force should one of "
"None, bool or "
"ContentEncoding")
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':
del c['expires']
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0,
expires="Thu, 01 Jan 1970 00:00:00 GMT",
domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
if self._chunked:
raise RuntimeError("You can't set content length when "
"chunked encoding is enable")
self._headers[hdrs.CONTENT_LENGTH] = str(value)
else:
self._headers.pop(hdrs.CONTENT_LENGTH, None)
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self, _LAST_MODIFIED=hdrs.LAST_MODIFIED):
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@last_modified.setter
def last_modified(self, value):
if value is None:
self.headers.pop(hdrs.LAST_MODIFIED, None)
elif isinstance(value, (int, float)):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)))
elif isinstance(value, datetime.datetime):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple())
elif isinstance(value, str):
self.headers[hdrs.LAST_MODIFIED] = value
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _do_start_compression(self, coding):
if coding != ContentCoding.identity:
self.headers[hdrs.CONTENT_ENCODING] = coding.value
self._payload_writer.enable_compression(coding.value)
# Compressed payload may have different content length,
# remove the header
self._headers.popall(hdrs.CONTENT_LENGTH, None)
def _start_compression(self, request):
if self._compression_force:
self._do_start_compression(self._compression_force)
else:
accept_encoding = request.headers.get(
hdrs.ACCEPT_ENCODING, '').lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
self._do_start_compression(coding)
return
async def prepare(self, request):
if self._eof_sent:
return
if self._payload_writer is not None:
return self._payload_writer
await request._prepare_hook(self)
return self._start(request)
def _start(self, request,
HttpVersion10=HttpVersion10,
HttpVersion11=HttpVersion11,
CONNECTION=hdrs.CONNECTION,
DATE=hdrs.DATE,
SERVER=hdrs.SERVER,
CONTENT_TYPE=hdrs.CONTENT_TYPE,
CONTENT_LENGTH=hdrs.CONTENT_LENGTH,
SET_COOKIE=hdrs.SET_COOKIE,
SERVER_SOFTWARE=SERVER_SOFTWARE,
TRANSFER_ENCODING=hdrs.TRANSFER_ENCODING):
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
version = request.version
writer = self._payload_writer = request._payload_writer
headers = self._headers
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
headers.add(SET_COOKIE, value)
if self._compression:
self._start_compression(request)
if self._chunked:
if version != HttpVersion11:
raise RuntimeError(
"Using chunked encoding is forbidden "
"for HTTP/{0.major}.{0.minor}".format(request.version))
writer.enable_chunking()
headers[TRANSFER_ENCODING] = 'chunked'
if CONTENT_LENGTH in headers:
del headers[CONTENT_LENGTH]
elif self._length_check:
writer.length = self.content_length
if writer.length is None:
if version >= HttpVersion11:
writer.enable_chunking()
headers[TRANSFER_ENCODING] = 'chunked'
if CONTENT_LENGTH in headers:
del headers[CONTENT_LENGTH]
else:
keep_alive = False
headers.setdefault(CONTENT_TYPE, 'application/octet-stream')
headers.setdefault(DATE, rfc822_formatted_time())
headers.setdefault(SERVER, SERVER_SOFTWARE)
# connection header
if CONNECTION not in headers:
if keep_alive:
if version == HttpVersion10:
headers[CONNECTION] = 'keep-alive'
else:
if version == HttpVersion11:
headers[CONNECTION] = 'close'
# status line
status_line = 'HTTP/{}.{} {} {}\r\n'.format(
version[0], version[1], self._status, self._reason)
writer.write_headers(status_line, headers)
return writer
async def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
"data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._payload_writer is None:
raise RuntimeError("Cannot call write() before prepare()")
await self._payload_writer.write(data)
async def drain(self):
assert not self._eof_sent, "EOF has already been sent"
assert self._payload_writer is not None, \
"Response has not been started"
warnings.warn("drain method is deprecated, use await resp.write()",
DeprecationWarning,
stacklevel=2)
await self._payload_writer.drain()
async def write_eof(self, data=b''):
assert isinstance(data, (bytes, bytearray, memoryview)), \
"data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
return
assert self._payload_writer is not None, \
"Response has not been started"
await self._payload_writer.write_eof(data)
self._eof_sent = True
self._req = None
self._body_length = self._payload_writer.output_size
self._payload_writer = None
def __repr__(self):
if self._eof_sent:
info = "eof"
elif self.prepared:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not prepared"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
def __getitem__(self, key):
return self._state[key]
def __setitem__(self, key, value):
self._state[key] = value
def __delitem__(self, key):
del self._state[key]
def __len__(self):
return len(self._state)
def __iter__(self):
return iter(self._state)
def __hash__(self):
return hash(id(self))
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None,
charset=None):
if body is not None and text is not None:
raise ValueError("body and text are not allowed together")
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):
headers = CIMultiDict(headers)
if content_type is not None and "charset" in content_type:
raise ValueError("charset must not be in content_type "
"argument")
if text is not None:
if hdrs.CONTENT_TYPE in headers:
if content_type or charset:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError("text argument must be str (%r)" %
type(text))
if content_type is None:
content_type = 'text/plain'
if charset is None:
charset = 'utf-8'
headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=' + charset)
body = text.encode(charset)
text = None
else:
if hdrs.CONTENT_TYPE in headers:
if content_type is not None or charset is not None:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
if content_type is not None:
if charset is not None:
content_type += '; charset=' + charset
headers[hdrs.CONTENT_TYPE] = content_type
super().__init__(status=status, reason=reason, headers=headers)
if text is not None:
self.text = text
else:
self.body = body
self._compressed_body = None
@property
def body(self):
return self._body
@body.setter
def body(self, body,
CONTENT_TYPE=hdrs.CONTENT_TYPE,
CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
if body is None:
self._body = None
self._body_payload = False
elif isinstance(body, (bytes, bytearray)):
self._body = body
self._body_payload = False
else:
try:
self._body = body = payload.PAYLOAD_REGISTRY.get(body)
except payload.LookupError:
raise ValueError('Unsupported body type %r' % type(body))
self._body_payload = True
headers = self._headers
# set content-length header if needed
if not self._chunked and CONTENT_LENGTH not in headers:
size = body.size
if size is not None:
headers[CONTENT_LENGTH] = str(size)
# set content-type
if CONTENT_TYPE not in headers:
headers[CONTENT_TYPE] = body.content_type
# copy payload headers
if body.headers:
for (key, value) in body.headers.items():
if key not in headers:
headers[key] = value
self._compressed_body = None
@property
def text(self):
if self._body is None:
return None
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
assert text is None or isinstance(text, str), \
"text argument must be str (%r)" % type(text)
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self._body = text.encode(self.charset)
self._body_payload = False
self._compressed_body = None
@property
def content_length(self):
if self._chunked:
return None
if hdrs.CONTENT_LENGTH in self.headers:
return super().content_length
if self._compressed_body is not None:
# Return length of the compressed body
return len(self._compressed_body)
elif self._body_payload:
# A payload without content length, or a compressed payload
return None
elif self._body is not None:
return len(self._body)
else:
return 0
@content_length.setter
def content_length(self, value):
raise RuntimeError("Content length is set automatically")
async def write_eof(self):
if self._eof_sent:
return
if self._compressed_body is not None:
body = self._compressed_body
else:
body = self._body
if body is not None:
if (self._req._method == hdrs.METH_HEAD or
self._status in [204, 304]):
await super().write_eof()
elif self._body_payload:
await body.write(self._payload_writer)
await super().write_eof()
else:
await super().write_eof(body)
else:
await super().write_eof()
def _start(self, request):
if not self._chunked and hdrs.CONTENT_LENGTH not in self._headers:
if not self._body_payload:
if self._body is not None:
self._headers[hdrs.CONTENT_LENGTH] = str(len(self._body))
else:
self._headers[hdrs.CONTENT_LENGTH] = '0'
return super()._start(request)
def _do_start_compression(self, coding):
if self._body_payload or self._chunked:
return super()._do_start_compression(coding)
if coding != ContentCoding.identity:
# Instead of using _payload_writer.enable_compression,
# compress the whole body
zlib_mode = (16 + zlib.MAX_WBITS
if coding.value == 'gzip' else -zlib.MAX_WBITS)
compressobj = zlib.compressobj(wbits=zlib_mode)
self._compressed_body = compressobj.compress(self._body) +\
compressobj.flush()
self._headers[hdrs.CONTENT_ENCODING] = coding.value
self._headers[hdrs.CONTENT_LENGTH] = \
str(len(self._compressed_body))
def json_response(data=sentinel, *, text=None, body=None, status=200,
reason=None, headers=None, content_type='application/json',
dumps=json.dumps):
if data is not sentinel:
if text or body:
raise ValueError(
"only one of data, text, or body should be specified"
)
else:
text = dumps(data)
return Response(text=text, body=body, status=status, reason=reason,
headers=headers, content_type=content_type)
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
PROJECT_ID = 'project-id'
ZONE = 'zone'
CLUSTER_ID = 'cluster-id'
TABLE_ID = 'table-id'
COLUMN_FAMILY_ID = 'column-family-id'
class TestGarbageCollectionRule(unittest2.TestCase):
def _getTargetClass(self):
from gcloud_bigtable.column_family import GarbageCollectionRule
return GarbageCollectionRule
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
gc_rule = self._makeOne()
self.assertEqual(gc_rule.max_num_versions, None)
self.assertEqual(gc_rule.max_age, None)
def test_constructor_failure(self):
with self.assertRaises(TypeError):
self._makeOne(max_num_versions=1, max_age=object())
def test___eq__max_age(self):
max_age = object()
gc_rule1 = self._makeOne(max_age=max_age)
gc_rule2 = self._makeOne(max_age=max_age)
self.assertEqual(gc_rule1, gc_rule2)
def test___eq__max_num_versions(self):
gc_rule1 = self._makeOne(max_num_versions=2)
gc_rule2 = self._makeOne(max_num_versions=2)
self.assertEqual(gc_rule1, gc_rule2)
def test___eq__type_differ(self):
gc_rule1 = self._makeOne()
gc_rule2 = object()
self.assertNotEqual(gc_rule1, gc_rule2)
def test___ne__same_value(self):
gc_rule1 = self._makeOne()
gc_rule2 = self._makeOne()
comparison_val = (gc_rule1 != gc_rule2)
self.assertFalse(comparison_val)
def test_to_pb_too_many_values(self):
# Fool the constructor by passing no values.
gc_rule = self._makeOne()
gc_rule.max_num_versions = object()
gc_rule.max_age = object()
with self.assertRaises(TypeError):
gc_rule.to_pb()
def test_to_pb_no_value(self):
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
gc_rule = self._makeOne()
pb_val = gc_rule.to_pb()
self.assertEqual(pb_val, data_pb2.GcRule())
def test_to_pb_with_max_num_versions(self):
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
max_num_versions = 1337
gc_rule = self._makeOne(max_num_versions=max_num_versions)
pb_val = gc_rule.to_pb()
self.assertEqual(pb_val,
data_pb2.GcRule(max_num_versions=max_num_versions))
def test_to_pb_with_max_age(self):
import datetime
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
from gcloud_bigtable._generated import duration_pb2
max_age = datetime.timedelta(seconds=1)
duration = duration_pb2.Duration(seconds=1)
gc_rule = self._makeOne(max_age=max_age)
pb_val = gc_rule.to_pb()
self.assertEqual(pb_val, data_pb2.GcRule(max_age=duration))
class TestGarbageCollectionRuleUnion(unittest2.TestCase):
def _getTargetClass(self):
from gcloud_bigtable.column_family import GarbageCollectionRuleUnion
return GarbageCollectionRuleUnion
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
rules = object()
rule_union = self._makeOne(rules=rules)
self.assertTrue(rule_union.rules is rules)
def test___eq__(self):
rules = object()
gc_rule1 = self._makeOne(rules=rules)
gc_rule2 = self._makeOne(rules=rules)
self.assertEqual(gc_rule1, gc_rule2)
def test___eq__type_differ(self):
gc_rule1 = self._makeOne()
gc_rule2 = object()
self.assertNotEqual(gc_rule1, gc_rule2)
def test___ne__same_value(self):
gc_rule1 = self._makeOne()
gc_rule2 = self._makeOne()
comparison_val = (gc_rule1 != gc_rule2)
self.assertFalse(comparison_val)
def test_to_pb(self):
import datetime
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
from gcloud_bigtable._generated import duration_pb2
from gcloud_bigtable.column_family import GarbageCollectionRule
max_num_versions = 42
rule1 = GarbageCollectionRule(max_num_versions=max_num_versions)
pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions)
max_age = datetime.timedelta(seconds=1)
rule2 = GarbageCollectionRule(max_age=max_age)
pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1))
rule3 = self._makeOne(rules=[rule1, rule2])
pb_rule3 = data_pb2.GcRule(
union=data_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2]))
gc_rule_pb = rule3.to_pb()
self.assertEqual(gc_rule_pb, pb_rule3)
def test_to_pb_nested(self):
import datetime
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
from gcloud_bigtable._generated import duration_pb2
from gcloud_bigtable.column_family import GarbageCollectionRule
max_num_versions1 = 42
rule1 = GarbageCollectionRule(max_num_versions=max_num_versions1)
pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions1)
max_age = datetime.timedelta(seconds=1)
rule2 = GarbageCollectionRule(max_age=max_age)
pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1))
rule3 = self._makeOne(rules=[rule1, rule2])
pb_rule3 = data_pb2.GcRule(
union=data_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2]))
max_num_versions2 = 1337
rule4 = GarbageCollectionRule(max_num_versions=max_num_versions2)
pb_rule4 = data_pb2.GcRule(max_num_versions=max_num_versions2)
rule5 = self._makeOne(rules=[rule3, rule4])
pb_rule5 = data_pb2.GcRule(
union=data_pb2.GcRule.Union(rules=[pb_rule3, pb_rule4]))
gc_rule_pb = rule5.to_pb()
self.assertEqual(gc_rule_pb, pb_rule5)
class TestGarbageCollectionRuleIntersection(unittest2.TestCase):
def _getTargetClass(self):
from gcloud_bigtable.column_family import (
GarbageCollectionRuleIntersection)
return GarbageCollectionRuleIntersection
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
rules = object()
rule_intersection = self._makeOne(rules=rules)
self.assertTrue(rule_intersection.rules is rules)
def test___eq__(self):
rules = object()
gc_rule1 = self._makeOne(rules=rules)
gc_rule2 = self._makeOne(rules=rules)
self.assertEqual(gc_rule1, gc_rule2)
def test___eq__type_differ(self):
gc_rule1 = self._makeOne()
gc_rule2 = object()
self.assertNotEqual(gc_rule1, gc_rule2)
def test___ne__same_value(self):
gc_rule1 = self._makeOne()
gc_rule2 = self._makeOne()
comparison_val = (gc_rule1 != gc_rule2)
self.assertFalse(comparison_val)
def test_to_pb(self):
import datetime
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
from gcloud_bigtable._generated import duration_pb2
from gcloud_bigtable.column_family import GarbageCollectionRule
max_num_versions = 42
rule1 = GarbageCollectionRule(max_num_versions=max_num_versions)
pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions)
max_age = datetime.timedelta(seconds=1)
rule2 = GarbageCollectionRule(max_age=max_age)
pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1))
rule3 = self._makeOne(rules=[rule1, rule2])
pb_rule3 = data_pb2.GcRule(
intersection=data_pb2.GcRule.Intersection(
rules=[pb_rule1, pb_rule2]))
gc_rule_pb = rule3.to_pb()
self.assertEqual(gc_rule_pb, pb_rule3)
def test_to_pb_nested(self):
import datetime
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
from gcloud_bigtable._generated import duration_pb2
from gcloud_bigtable.column_family import GarbageCollectionRule
max_num_versions1 = 42
rule1 = GarbageCollectionRule(max_num_versions=max_num_versions1)
pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions1)
max_age = datetime.timedelta(seconds=1)
rule2 = GarbageCollectionRule(max_age=max_age)
pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1))
rule3 = self._makeOne(rules=[rule1, rule2])
pb_rule3 = data_pb2.GcRule(
intersection=data_pb2.GcRule.Intersection(
rules=[pb_rule1, pb_rule2]))
max_num_versions2 = 1337
rule4 = GarbageCollectionRule(max_num_versions=max_num_versions2)
pb_rule4 = data_pb2.GcRule(max_num_versions=max_num_versions2)
rule5 = self._makeOne(rules=[rule3, rule4])
pb_rule5 = data_pb2.GcRule(
intersection=data_pb2.GcRule.Intersection(
rules=[pb_rule3, pb_rule4]))
gc_rule_pb = rule5.to_pb()
self.assertEqual(gc_rule_pb, pb_rule5)
class TestColumnFamily(unittest2.TestCase):
def _getTargetClass(self):
from gcloud_bigtable.column_family import ColumnFamily
return ColumnFamily
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
table = object()
gc_rule = object()
column_family = self._makeOne(COLUMN_FAMILY_ID, table, gc_rule=gc_rule)
self.assertEqual(column_family.column_family_id, COLUMN_FAMILY_ID)
self.assertTrue(column_family._table is table)
self.assertTrue(column_family.gc_rule is gc_rule)
def test_table_getter(self):
table = object()
column_family = self._makeOne(COLUMN_FAMILY_ID, table)
self.assertTrue(column_family.table is table)
def test_client_getter(self):
client = object()
table = _Table(None, client=client)
column_family = self._makeOne(COLUMN_FAMILY_ID, table)
self.assertTrue(column_family.client is client)
def test_timeout_seconds_getter(self):
timeout_seconds = 889
table = _Table(None, timeout_seconds=timeout_seconds)
column_family = self._makeOne(COLUMN_FAMILY_ID, table)
self.assertEqual(column_family.timeout_seconds, timeout_seconds)
def test_name_property(self):
table_name = 'table_name'
table = _Table(table_name)
column_family = self._makeOne(COLUMN_FAMILY_ID, table)
expected_name = table_name + '/columnFamilies/' + COLUMN_FAMILY_ID
self.assertEqual(column_family.name, expected_name)
def test___eq__(self):
column_family_id = 'column_family_id'
table = object()
column_family1 = self._makeOne(column_family_id, table)
column_family2 = self._makeOne(column_family_id, table)
self.assertEqual(column_family1, column_family2)
def test___eq__type_differ(self):
column_family1 = self._makeOne('column_family_id', None)
column_family2 = object()
self.assertNotEqual(column_family1, column_family2)
def test___ne__same_value(self):
column_family_id = 'column_family_id'
table = object()
column_family1 = self._makeOne(column_family_id, table)
column_family2 = self._makeOne(column_family_id, table)
comparison_val = (column_family1 != column_family2)
self.assertFalse(comparison_val)
def test___ne__(self):
column_family1 = self._makeOne('column_family_id1', 'table1')
column_family2 = self._makeOne('column_family_id2', 'table2')
self.assertNotEqual(column_family1, column_family2)
def _create_test_helper(self, gc_rule=None):
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
from gcloud_bigtable._generated import (
bigtable_table_service_messages_pb2 as messages_pb2)
from gcloud_bigtable._grpc_mocks import StubMock
client = _Client()
table_name = ('projects/' + PROJECT_ID + '/zones/' + ZONE +
'/clusters/' + CLUSTER_ID + '/tables/' + TABLE_ID)
table = _Table(table_name, client=client)
column_family = self._makeOne(COLUMN_FAMILY_ID, table, gc_rule=gc_rule)
# Create request_pb
if gc_rule is None:
column_family_pb = data_pb2.ColumnFamily()
else:
column_family_pb = data_pb2.ColumnFamily(gc_rule=gc_rule.to_pb())
request_pb = messages_pb2.CreateColumnFamilyRequest(
name=table_name,
column_family_id=COLUMN_FAMILY_ID,
column_family=column_family_pb,
)
# Create response_pb
response_pb = data_pb2.ColumnFamily()
# Patch the stub used by the API method.
client.table_stub = stub = StubMock(response_pb)
# Create expected_result.
expected_result = None # create() has no return value.
# Perform the method and check the result.
timeout_seconds = 4
result = column_family.create(timeout_seconds=timeout_seconds)
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'CreateColumnFamily',
(request_pb, timeout_seconds),
{},
)])
def test_create(self):
self._create_test_helper(gc_rule=None)
def test_create_with_gc_rule(self):
from gcloud_bigtable.column_family import GarbageCollectionRule
gc_rule = GarbageCollectionRule(max_num_versions=1337)
self._create_test_helper(gc_rule=gc_rule)
def _update_test_helper(self, gc_rule=None):
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
from gcloud_bigtable._grpc_mocks import StubMock
client = _Client()
table_name = ('projects/' + PROJECT_ID + '/zones/' + ZONE +
'/clusters/' + CLUSTER_ID + '/tables/' + TABLE_ID)
table = _Table(table_name, client=client)
column_family = self._makeOne(COLUMN_FAMILY_ID, table, gc_rule=gc_rule)
# Create request_pb
column_family_name = table_name + '/columnFamilies/' + COLUMN_FAMILY_ID
if gc_rule is None:
request_pb = data_pb2.ColumnFamily(name=column_family_name)
else:
request_pb = data_pb2.ColumnFamily(
name=column_family_name,
gc_rule=gc_rule.to_pb(),
)
# Create response_pb
response_pb = data_pb2.ColumnFamily()
# Patch the stub used by the API method.
client.table_stub = stub = StubMock(response_pb)
# Create expected_result.
expected_result = None # update() has no return value.
# Perform the method and check the result.
timeout_seconds = 28
result = column_family.update(timeout_seconds=timeout_seconds)
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'UpdateColumnFamily',
(request_pb, timeout_seconds),
{},
)])
def test_update(self):
self._update_test_helper(gc_rule=None)
def test_update_with_gc_rule(self):
from gcloud_bigtable.column_family import GarbageCollectionRule
gc_rule = GarbageCollectionRule(max_num_versions=1337)
self._update_test_helper(gc_rule=gc_rule)
def test_delete(self):
from gcloud_bigtable._generated import (
bigtable_table_service_messages_pb2 as messages_pb2)
from gcloud_bigtable._generated import empty_pb2
from gcloud_bigtable._grpc_mocks import StubMock
client = _Client()
table_name = ('projects/' + PROJECT_ID + '/zones/' + ZONE +
'/clusters/' + CLUSTER_ID + '/tables/' + TABLE_ID)
table = _Table(table_name, client=client)
column_family = self._makeOne(COLUMN_FAMILY_ID, table)
# Create request_pb
column_family_name = table_name + '/columnFamilies/' + COLUMN_FAMILY_ID
request_pb = messages_pb2.DeleteColumnFamilyRequest(
name=column_family_name)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client.table_stub = stub = StubMock(response_pb)
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
timeout_seconds = 7
result = column_family.delete(timeout_seconds=timeout_seconds)
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'DeleteColumnFamily',
(request_pb, timeout_seconds),
{},
)])
class Test__gc_rule_from_pb(unittest2.TestCase):
def _callFUT(self, gc_rule_pb):
from gcloud_bigtable.column_family import _gc_rule_from_pb
return _gc_rule_from_pb(gc_rule_pb)
def test_empty(self):
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
gc_rule_pb = data_pb2.GcRule()
self.assertEqual(self._callFUT(gc_rule_pb), None)
def test_failure(self):
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
from gcloud_bigtable._generated import duration_pb2
gc_rule_pb1 = data_pb2.GcRule(max_num_versions=1)
gc_rule_pb2 = data_pb2.GcRule(
max_age=duration_pb2.Duration(seconds=1),
)
# Since a oneof field, google.protobuf doesn't allow both
# to be set, so we fake it.
gc_rule_pb3 = data_pb2.GcRule()
gc_rule_pb3._fields.update(gc_rule_pb1._fields)
gc_rule_pb3._fields.update(gc_rule_pb2._fields)
with self.assertRaises(ValueError):
self._callFUT(gc_rule_pb3)
def test_max_num_versions(self):
from gcloud_bigtable.column_family import GarbageCollectionRule
orig_rule = GarbageCollectionRule(max_num_versions=1)
gc_rule_pb = orig_rule.to_pb()
result = self._callFUT(gc_rule_pb)
self.assertTrue(isinstance(result, GarbageCollectionRule))
self.assertEqual(result, orig_rule)
def test_max_age(self):
import datetime
from gcloud_bigtable.column_family import GarbageCollectionRule
orig_rule = GarbageCollectionRule(
max_age=datetime.timedelta(seconds=1))
gc_rule_pb = orig_rule.to_pb()
result = self._callFUT(gc_rule_pb)
self.assertTrue(isinstance(result, GarbageCollectionRule))
self.assertEqual(result, orig_rule)
def test_union(self):
import datetime
from gcloud_bigtable.column_family import GarbageCollectionRule
from gcloud_bigtable.column_family import GarbageCollectionRuleUnion
rule1 = GarbageCollectionRule(max_num_versions=1)
rule2 = GarbageCollectionRule(
max_age=datetime.timedelta(seconds=1))
orig_rule = GarbageCollectionRuleUnion(rules=[rule1, rule2])
gc_rule_pb = orig_rule.to_pb()
result = self._callFUT(gc_rule_pb)
self.assertTrue(isinstance(result, GarbageCollectionRuleUnion))
self.assertEqual(result, orig_rule)
def test_intersection(self):
import datetime
from gcloud_bigtable.column_family import GarbageCollectionRule
from gcloud_bigtable.column_family import (
GarbageCollectionRuleIntersection)
rule1 = GarbageCollectionRule(max_num_versions=1)
rule2 = GarbageCollectionRule(
max_age=datetime.timedelta(seconds=1))
orig_rule = GarbageCollectionRuleIntersection(rules=[rule1, rule2])
gc_rule_pb = orig_rule.to_pb()
result = self._callFUT(gc_rule_pb)
self.assertTrue(isinstance(result, GarbageCollectionRuleIntersection))
self.assertEqual(result, orig_rule)
def test_unknown_field_name(self):
from google.protobuf.descriptor import FieldDescriptor
from gcloud_bigtable._generated import (
bigtable_table_data_pb2 as data_pb2)
gc_rule_pb = data_pb2.GcRule()
fake_descriptor_name = 'not-union'
descriptor_args = (fake_descriptor_name,) + (None,) * 12
fake_descriptor = FieldDescriptor(*descriptor_args)
gc_rule_pb._fields[fake_descriptor] = None
self.assertEqual(self._callFUT(gc_rule_pb), None)
class _Client(object):
cluster_stub = None
operations_stub = None
table_stub = None
class _Table(object):
def __init__(self, name, client=None, timeout_seconds=None):
self.name = name
self.client = client
self.timeout_seconds = timeout_seconds
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the guest.
This class encapsulates libvirt domain provides certain
higher level APIs around the raw libvirt API. These APIs are
then used by all the other libvirt related classes
"""
from lxml import etree
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from nova.compute import power_state
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova import utils
from nova.virt import hardware
from nova.virt.libvirt import compat
from nova.virt.libvirt import config as vconfig
libvirt = None
LOG = logging.getLogger(__name__)
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# The DOMAIN_BLOCKED state is only valid in Xen. It means that
# the VM is running and the vCPU is idle. So, we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# The libvirt API doc says that DOMAIN_SHUTDOWN means the domain
# is being shut down. So technically the domain is still
# running. SHUTOFF is the real powered off state. But we will map
# both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
class Guest(object):
def __init__(self, domain):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._domain = domain
def __repr__(self):
return "<Guest %(id)d %(name)s %(uuid)s>" % {
'id': self.id,
'name': self.name,
'uuid': self.uuid
}
@property
def id(self):
return self._domain.ID()
@property
def uuid(self):
return self._domain.UUIDString()
@property
def name(self):
return self._domain.name()
@property
def _encoded_xml(self):
return encodeutils.safe_decode(self._domain.XMLDesc(0))
@classmethod
def create(cls, xml, host):
"""Create a new Guest
:param xml: XML definition of the domain to create
:param host: host.Host connection to define the guest on
:returns guest.Guest: Guest ready to be launched
"""
try:
# TODO(sahid): Host.write_instance_config should return
# an instance of Guest
domain = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a domain with XML: %s') %
encodeutils.safe_decode(xml))
return cls(domain)
def launch(self, pause=False):
"""Starts a created guest.
:param pause: Indicates whether to start and pause the guest
"""
flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error launching a defined domain '
'with XML: %s') %
self._encoded_xml, errors='ignore')
def poweroff(self):
"""Stops a running guest."""
self._domain.destroy()
def inject_nmi(self):
"""Injects an NMI to a guest."""
self._domain.injectNMI()
def resume(self):
"""Resumes a suspended guest."""
self._domain.resume()
def enable_hairpin(self):
"""Enables hairpin mode for this guest."""
interfaces = self.get_interfaces()
try:
for interface in interfaces:
utils.execute(
'tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error enabling hairpin mode with XML: %s') %
self._encoded_xml, errors='ignore')
def get_interfaces(self):
"""Returns a list of all network interfaces for this domain."""
doc = None
try:
doc = etree.fromstring(self._encoded_xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
:returns: guest.VCPUInfo
"""
vcpus = self._domain.vcpus()
if vcpus is not None:
for vcpu in vcpus[0]:
yield VCPUInfo(
id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
def delete_configuration(self):
"""Undefines a domain from hypervisor."""
try:
self._domain.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags. %d"
"Retrying with undefine", self.id)
self._domain.undefine()
except AttributeError:
# Older versions of libvirt don't support undefine flags,
# trying to remove managed image
try:
if self._domain.hasManagedSaveImage(0):
self._domain.managedSaveRemove(0)
except AttributeError:
pass
self._domain.undefine()
def has_persistent_configuration(self):
"""Whether domain config is persistently stored on the host."""
return self._domain.isPersistent()
def attach_device(self, conf, persistent=False, live=False):
"""Attaches device to the guest.
:param conf: A LibvirtConfigObject of the device to attach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
self._domain.attachDeviceFlags(conf.to_xml(), flags=flags)
def get_disk(self, device):
"""Returns the disk mounted at device
:returns LivirtConfigGuestDisk: mounted at device or None
"""
try:
doc = etree.fromstring(self._domain.XMLDesc(0))
except Exception:
return None
node = doc.find("./devices/disk/target[@dev='%s'].." % device)
if node is not None:
conf = vconfig.LibvirtConfigGuestDisk()
conf.parse_dom(node)
return conf
def get_all_disks(self):
"""Returns all the disks for a guest
:returns: a list of LibvirtConfigGuestDisk instances
"""
return self.get_all_devices(vconfig.LibvirtConfigGuestDisk)
def get_all_devices(self, devtype=None):
"""Returns all devices for a guest
:param devtype: a LibvirtConfigGuestDevice subclass class
:returns: a list of LibvirtConfigGuestDevice instances
"""
try:
config = vconfig.LibvirtConfigGuest()
config.parse_str(
self._domain.XMLDesc(0))
except Exception:
return []
devs = []
for dev in config.devices:
if (devtype is None or
isinstance(dev, devtype)):
devs.append(dev)
return devs
def detach_device(self, conf, persistent=False, live=False):
"""Detaches device to the guest.
:param conf: A LibvirtConfigObject of the device to detach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
self._domain.detachDeviceFlags(conf.to_xml(), flags=flags)
def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
dump_migratable=False):
"""Returns xml description of guest.
:param dump_inactive: Dump inactive domain information
:param dump_sensitive: Dump security sensitive information
:param dump_migratable: Dump XML suitable for migration
:returns string: XML description of the guest
"""
flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
return self._domain.XMLDesc(flags=flags)
def save_memory_state(self):
"""Saves the domain's memory state. Requires running domain.
raises: raises libvirtError on error
"""
self._domain.managedSave(0)
def get_block_device(self, disk):
"""Returns a block device wrapper for disk."""
return BlockDevice(self, disk)
def set_user_password(self, user, new_pass):
"""Configures a new user password."""
self._domain.setUserPassword(user, new_pass, 0)
def _get_domain_info(self, host):
"""Returns information on Guest
:param host: a host.Host object with current
connection. Unfortunatly we need to pass it
because of a workaround with < version 1.2..11
:returns list: [state, maxMem, memory, nrVirtCpu, cpuTime]
"""
return compat.get_domain_info(libvirt, host, self._domain)
def get_info(self, host):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
:returns hardware.InstanceInfo:
"""
try:
dom_info = self._get_domain_info(host)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=self.uuid)
msg = (_('Error from libvirt while getting domain info for '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
{'instance_name': self.name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
return hardware.InstanceInfo(
state=LIBVIRT_POWER_STATE[dom_info[0]],
max_mem_kb=dom_info[1],
mem_kb=dom_info[2],
num_cpu=dom_info[3],
cpu_time_ns=dom_info[4],
id=self.id)
class BlockDevice(object):
"""Wrapper around block device API"""
REBASE_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
COMMIT_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
def __init__(self, guest, disk):
self._guest = guest
self._disk = disk
def abort_job(self, async=False, pivot=False):
"""Request to cancel any job currently running on the block.
:param async: Request only, do not wait for completion
:param pivot: Pivot to new file when ending a copy or
active commit job
"""
flags = async and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
self._guest._domain.blockJobAbort(self._disk, flags=flags)
def get_job_info(self):
"""Returns information about job currently running
:returns: BlockDeviceJobInfo or None
"""
status = self._guest._domain.blockJobInfo(self._disk, flags=0)
if status != -1:
return BlockDeviceJobInfo(
job=status.get("type", 0),
bandwidth=status.get("bandwidth", 0),
cur=status.get("cur", 0),
end=status.get("end", 0))
def rebase(self, base, shallow=False, reuse_ext=False,
copy=False, relative=False):
"""Rebases block to new base
:param shallow: Limit copy to top of source backing chain
:param reuse_ext: Reuse existing external file of a copy
:param copy: Start a copy job
:param relative: Keep backing chain referenced using relative names
"""
flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
return self._guest._domain.blockRebase(
self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
def commit(self, base, top, relative=False):
"""Commit on block device
For performance during live snapshot it will reduces the disk chain
to a single disk.
:param relative: Keep backing chain referenced using relative names
"""
flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
return self._guest._domain.blockCommit(
self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
def resize(self, size_kb):
"""Resizes block device to Kib size."""
self._guest._domain.blockResize(self._disk, size_kb)
def wait_for_job(self, abort_on_error=False, wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:param abort_on_error: Whether to stop process and raise NovaException
on error (default: False)
:param wait_for_job_clean: Whether to force wait to ensure job is
finished (see bug: LP#1119173)
:returns: True if still in progress
False if completed
"""
status = self.get_job_info()
if not status and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
if wait_for_job_clean:
job_ended = status.job == 0
else:
job_ended = status.cur == status.end
return not job_ended
class VCPUInfo(object):
def __init__(self, id, cpu, state, time):
"""Structure for information about guest vcpus.
:param id: The virtual cpu number
:param cpu: The host cpu currently associated
:param state: The running state of the vcpu (0 offline, 1 running, 2
blocked on resource)
:param time: The cpu time used in nanoseconds
"""
self.id = id
self.cpu = cpu
self.state = state
self.time = time
class BlockDeviceJobInfo(object):
def __init__(self, job, bandwidth, cur, end):
"""Structure for information about running job.
:param job: The running job (0 placeholder, 1 pull,
2 copy, 3 commit, 4 active commit)
:param bandwidth: Used in MiB/s
:param cur: Indicates the position between 0 and 'end'
:param end: Indicates the position for this operation
"""
self.job = job
self.bandwidth = bandwidth
self.cur = cur
self.end = end
|
|
"""
This module groups and sorts Statements for presentation in downstream tools
while aggregating the statements' statistics/metrics into the groupings. While
most usage of this module will be via the top-level function
`group_and_sort_statements`, alternative usages (including custom statement
data, multiple statement grouping levels, and multiple strategies for
aggregating statement-level metrics for higher-level groupings) are supported
through the various classes (see Class Overview below).
Vocabulary
----------
An "agent-pair" is, as the name suggests, a pair of agents from a statement,
usually defined by their canonical names.
A "relation" is the basic information of a statement, with all details (such as
sites, residues, mutations, and bound conditions) stripped away. Usually this
means it is just the statement type (or verb), subject name, and object name,
though in some corner cases it is different.
Simple Example
--------------
The principal function in the module is `group_and_sort_statements`, and if you
want statements grouped into agent-pairs, then by relations, sorted by evidence
count, simply use the function with its defaults, e.g.:
.. code-block:: python
for _, ag_key, rels, ag_metrics in group_and_sort_statements(stmts):
print(ag_key)
for _, rel_key, stmt_data, rel_metrics in rels:
print('\t', rel_key)
for _, stmt_hash, stmt_obj, stmt_metrics in stmt_data:
print('\t\t', stmt_obj)
Advanced Example
----------------
Custom data and aggregation methods are supported, respectively, by using
instances of the `StmtStat` class and subclassing the BasicAggregator (or more
generally, the AggregatorMeta) API. Custom sorting is implemented by defining
and passing a `sort_by` function to `group_and_sort_statements`.
For example, if you have custom statement metrics (e.g., a value obtained by
experiment such as differential expression of subject or object genes), want
the statements grouped only to the level of relations, and want to sort the
statements and relations independently. Suppose also that your measurement
applies equally at the statement and relation level and hence you don't want
any changes applied during aggregation (e.g. averaging). This is illustrated in
the example below:
.. code-block:: python
# Define a new aggregator that doesn't apply any aggregation function to
# the data, simply taking the last metric (effectively a noop):
class NoopAggregator(BasicAggregator):
def _merge(self, metric_array):
self.values = metric_array
# Create your StmtStat using custom data dict `my_data`, a dict of values
# keyed by statement hash:
my_stat = StmtStat('my_stat', my_data, int, NoopAggregator)
# Define a custom sort function using my stat and the default available
# ev_count. In effect this will sort relations by the custom stat, and then
# secondarily sort the statements within that relation (for which my_stat
# is by design the same) using their evidence counts.
def my_sort(metrics):
return metrics['my_stat'], metrics['ev_count']
# Iterate over the results.
groups = group_and_sort_statements(stmts, sort_by=my_sort,
custom_stats=[my_stat],
grouping_level='relation')
for _, rel_key, rel_stmts, rel_metrics in groups:
print(rel_key, rel_metrics['my_stat'])
for _, stmt_hash, stmt, metrics in rel_stmts:
print('\t', stmt, metrics['ev_count'])
Class Overview
--------------
Statements can have multiple metrics associated with them, most commonly
belief, evidence counts, and source counts, although other metrics may also be
applied. Such metrics imply an order on the set of Statements, and a user
should be able to apply that order to them for sorting or filtering. them.
These types of metric, or "stat", are represented by `StmtStat` classes.
Statements can be grouped based on the information they represent: by their
agents (e.g. subject is MEK and object is ERK), and by their type (e.g.
Phosphorylation). These groups are represented by `StmtGroup` objects, which on
their surface behave much like `defaultdict(list)` would, though more is going
on behind the scenes. The StmtGroup class is used internally by
`group_and_sort_statements` and would only need to be used directly if defining
an alternative statement-level grouping approach (e.g., grouping statements by
subject).
Like Statements, higher-level statement groups are subject to sorting and
filtering. That requires that the `StmtStat`s be aggregated over the statements
in a group. The Aggregator classes serve this purpose, using numpy to do sums
over arrays of metrics as Statements are "included" in the `StmtGroup`. Each
`StmtStat` must declare how its data should be aggregated, as different kinds
of data aggregate differently. Custom aggregation methods can be implemented by
subclassing the `BasicAggregator` class and using an instance of the custom
class to define a `StmtStat`.
"""
import logging
from collections import defaultdict
from itertools import permutations
from numpy import array, zeros, maximum, concatenate, append
from indra.assemblers.english import EnglishAssembler
from indra.statements import Agent, Influence, Event, get_statement_by_name, \
Statement
logger = logging.getLogger(__name__)
db_sources = ['psp', 'cbn', 'pc', 'bel_lc', 'signor', 'biogrid', 'lincs_drug',
'tas', 'hprd', 'trrust', 'ctd', 'vhn', 'pe', 'drugbank',
'omnipath', 'conib', 'crog', 'dgi']
reader_sources = ['geneways', 'tees', 'isi', 'trips', 'rlimsp', 'medscan',
'sparser', 'eidos', 'reach']
# These are mappings where the actual INDRA source, as it appears
# in the evidence source_api is inconsistent with the colors here and
# with what comes out of the INDRA DB
internal_source_mappings = {
'bel': 'bel_lc',
'phosphoelm': 'pe',
'biopax': 'pc',
'virhostnet': 'vhn',
'phosphosite': 'psp',
}
reverse_source_mappings = {v: k for k, v in internal_source_mappings.items()}
all_sources = db_sources + reader_sources
def _get_relation_keyed_stmts(stmt_list, expand_nary=True):
"""Low level generator over a list of statements, intended for internal use.
Non-unique grouping keys are generated for each statement. Each row
generated will contain a tuple, beginning with a relation key,
generally of the from (verb, ...agents...), though sometimes different in
the case of some kinds of Statement, (e.g. ActiveForm), an agent key, which
is always (...agents...), and the Statement object itself.
If expand n-ary is set to True (the default), Complexes and
Conversions will have their many agents grouped into appropriate pairs,
with each pair yielded as a separate entry IN ADDITION to an entry for the
full set of agents. So Complex(A(), B(), C()) will yield entries for:
- Complex(A(), B()),
- Complex(B(), C()),
- Complex(A(), C()), and
- Complex(A(), B(), C()).
If False, only Complex(A(), B(), C()) will be generated.
"""
def name(agent):
return 'None' if agent is None else agent.name
for s in stmt_list:
# Create a key.
verb = s.__class__.__name__
ags = s.agent_list()
rel_key = None
if verb == 'Complex':
ag_ns = {name(ag) for ag in ags}
if expand_nary:
if 1 < len(ag_ns) < 6:
for pair in permutations(ag_ns, 2):
yield (verb,) + tuple(pair), tuple(pair), s
if len(ag_ns) == 2:
continue
ag_key = tuple(sorted(ag_ns))
elif verb == 'Conversion':
subj = name(s.subj)
objs_from = tuple(sorted({name(ag) for ag in s.obj_from}))
objs_to = tuple(sorted({name(ag) for ag in s.obj_to}))
if expand_nary:
for obj in objs_from:
yield (verb, subj, objs_from, objs_to), (subj, obj), s
for obj in objs_to:
yield (verb, subj, objs_from, objs_to), (subj, obj), s
ag_key = (subj, objs_from, objs_to)
elif verb in ['ActiveForm', 'HasActivity']:
ag_name = name(ags[0])
ag_key = (ag_name,)
rel_key = (verb, ag_name, s.activity,
s.is_active if verb == 'ActiveForm' else s.has_activity)
elif verb == 'Influence':
sns, sid = s.subj.concept.get_grounding()
ons, oid = s.obj.concept.get_grounding()
skey = s.subj.concept.name if not sid \
else sid.split('/')[-1].replace('_', ' ')
okey = s.obj.concept.name if not oid \
else oid.split('/')[-1].replace('_', ' ')
ag_key = (skey, okey)
else:
ag_key = tuple([name(ag) for ag in ags])
# Set the default relation key.
if rel_key is None:
rel_key = (verb,) + ag_key
# Yield the next (default) element.
yield rel_key, ag_key, s
class StmtStat:
"""Abstraction of a metric applied to a set of statements.
Can be instantiated either via the constructor or two factory class methods:
- s = StmtStat(name, {hash: value, ...}, data_type, AggClass)
- [s1, ...] = \
StmtStat.from_dicts({hash: {label: value, ...}, ...}, data_type, AggClass)
- [s_ev_count, s_belief] = \
StmtStat.from_stmts([Statement(), ...], ('ev_count', 'belief'))
Note that each stat will have only one metric associated with it, so dicts
ingested by `from_dicts` will have their values broken up into separate
StmtStat instances.
Parameters
----------
name : str
The label for this data (e.g. "ev_count" or "belief")
data : dict{int: Number}
The relevant statistics as a dict keyed by hash.
data_type : type
The type of the data (e.g. `int` or `float`).
agg_class : type
A subclass of BasicAggregator which defines how these statistics will be
merged.
"""
def __init__(self, name, data, data_type, agg_class):
self.name = name
self.data = data
self.data_type = data_type
self.agg_class = agg_class
@classmethod
def from_dicts(cls, dict_data, data_type, agg_class):
"""Generate a list of StmtStat's from a dict of dicts.
Example Usage:
>> source_counts = {9623812756876: {'reach': 1, 'sparser': 2},
>> -39877587165298: {'reach': 3, 'sparser': 0}}
>> stmt_stats = StmtStat.from_dicts(source_counts, int, SumAggregator)
Parameters
----------
dict_data : dict{int: dict{str: Number}}
A dictionary keyed by hash with dictionary elements, where each
element gives a set of measurements for the statement labels as
keys. A common example is `source_counts`.
data_type : type
The type of the data being given (e.g. `int` or `float`).
agg_class : type
A subclass of BasicAggregator which defines how these statistics
will be merged (e.g. `SumAggregator`).
"""
data_groups = defaultdict(dict)
for h, data_dict in dict_data.items():
for name, value in data_dict.items():
data_groups[name][h] = value
data_groups = dict(data_groups)
classes = []
for class_name, class_data in data_groups.items():
classes.append(cls(class_name, class_data, data_type, agg_class))
return classes
@classmethod
def from_stmts(cls, stmt_list, values=None):
"""Generate a list of StmtStat's from a list of stmts.
The stats will include "ev_count", "belief", and "ag_count" by default,
but a more limited selection may be specified using `values`.
Example usage:
>> stmt_stats = StmtStat.from_stmts(stmt_list, ('ag_count', 'belief'))
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements, from which basic stats will be derived.
values : Optional[tuple(str)]
A tuple of the names of the values to gather from the list of
statements. For example, if you already have evidence counts, you
might only want to gather belief and agent counts.
"""
type_dict = {'ev_count': {'type': int, 'agg': SumAggregator},
'belief': {'type': float, 'agg': MaxAggregator},
'ag_count': {'type': int, 'agg': SumAggregator}}
if values is None:
values = tuple(type_dict.keys())
# Iterate over statements, filling in values that may have been
# missing.
data = {k: {} for k in values}
for stmt in stmt_list:
sh = stmt.get_hash()
if 'ev_count' in values:
data['ev_count'][sh] = len(stmt.evidence)
if 'belief' in values:
data['belief'][sh] = stmt.belief
if 'ag_count' in values:
data['ag_count'][sh] = len(stmt.agent_list())
# Create the objects.
return [cls(k, d, type_dict[k]['type'], type_dict[k]['agg'])
for k, d in data.items()]
def make_standard_stats(ev_counts=None, beliefs=None, source_counts=None):
"""Generate the standard ev_counts, beliefs, and source count stats."""
stats = []
if ev_counts:
stats.append(StmtStat('ev_count', ev_counts, int, SumAggregator))
if beliefs:
stats.append(StmtStat('belief', beliefs, float, MaxAggregator))
if source_counts:
stats.extend(StmtStat.from_dicts(source_counts, int, SumAggregator))
return stats
class StmtGroup:
"""Creates higher-level stmt groupings and aggregates metrics accordingly.
Used internally by `group_and_sort_statements`.
This class manages the accumulation of statistics for statement groupings,
such as by relation or agent pair. It calculates metrics for these
higher-level groupings using metric-specific aggregators implementing the
AggregatorMeta API (e.g., MultiAggregator and any children of
BasicAggregator).
For example, evidence counts for a relation can be calculated as the sum of
the statement-level evidence counts, while the belief for the relation can
be calculated as the average or maximum of the statement-level beliefs.
The primary methods for instantiating this class are the two factory
class methods:
- from_stmt_stats
- from_dicts
See the methods for more details on their purpose and usage.
Once instantiated, the StmtGroup behaves like a defaultdict of lists, where
the keys are group-level keys, and the lists contain statements.
Statements can be iteratively added to the group via the dict-like syntax
`stmt_group[group_key].include(stmt)`. This allows the caller to generate
keys and trigger metric aggregation in a single iteration over statements.
Example usage:
.. code-block:: python
# Get ev_count, belief, and ag_count from a list of statements.
stmt_stats = StmtStat.from_stmts(stmt_list)
# Add another stat for a measure of relevance
stmt_stats.append(
StmtStat('relevance', relevance_dict, float, AveAggregator)
)
# Create the Group
sg = StmtGroup.from_stmt_stats(*stmt_stats)
# Load it full of Statements, grouped by agents.
sg.fill_from_stmt_stats()
sg.start()
for s in stmt_list:
key = (ag.get_grounding() for ag in s.agent_list())
sg[key].include(s)
sg.finish()
# Now the stats for each group are aggregated and available for use.
metrics = sg[(('FPLX', 'MEK'), ('FPLX', 'ERK'))].get_dict()
"""
@classmethod
def from_stmt_stats(cls, *stmt_stats):
"""Create a stmt group from StmtStat objects.
Return a StmtGroup constructed existing StmtStat objects. This
method offers the user the most control and customizability.
"""
# Organize the data into groups by aggregation class.
stat_groups = defaultdict(lambda: {'stats': defaultdict(list),
'keys': [], 'types': []})
for stat in stmt_stats:
if not isinstance(stat, StmtStat):
raise ValueError("All arguments must be `StmtStat` object.")
stat_groups[stat.agg_class]['keys'].append(stat.name)
stat_groups[stat.agg_class]['types'].append(stat.data_type)
for h, v in stat.data.items():
stat_groups[stat.agg_class]['stats'][h].append(v)
return cls(stat_groups)
@classmethod
def from_dicts(cls, ev_counts=None, beliefs=None, source_counts=None):
"""Init a stmt group from dicts keyed by hash.
Return a StmtGroup constructed from the given keyword arguments.
The dict keys of `source_counts` will be broken out into their own
StmtStat objects, so that the resulting data model is in effect a flat
list of measurement parameters. There is some risk of name collision, so
take care not to name any sources "ev_counts" or "belief".
"""
stats = make_standard_stats(ev_counts=ev_counts, beliefs=beliefs,
source_counts=source_counts)
return cls.from_stmt_stats(*stats)
def __init__(self, stat_groups):
"""In this case, init is primarily intended for internal use."""
self.__stats = {}
self.__started = False
self.__finished = False
# Check the groups and solidify them in more immutable types.
hash_set = None
self.__stmt_stats = {}
rows = []
for agg_class, info_dict in stat_groups.items():
if hash_set is None:
hash_set = set(info_dict['stats'].keys())
else:
if hash_set != set(info_dict['stats'].keys()):
raise ValueError(f"Stats from {info_dict['keys']} do "
f"not cover the same corpora of hashes.")
self.__stmt_stats[agg_class] = {
'stats': {h: array(l) for h, l in info_dict['stats'].items()},
'keys': tuple(info_dict['keys']),
'types': tuple(info_dict['types'])
}
rows.extend(info_dict['keys'])
self.__rows = tuple(rows)
def add_stats(self, *stmt_stats):
"""Add more stats to the object.
If you have started accumulating data from statements and doing
aggregation, (e.g. if you have "started"), or if you are "finished",
this request will lead to an error.
"""
new_stats = [s for s in stmt_stats if s.name not in self.row_set()]
if not new_stats:
return
if self.__started or self.__finished:
raise RuntimeError("Cannot add stats after accumulation has "
"started or after it has finished.")
for stat in new_stats:
if not isinstance(stat, StmtStat):
raise ValueError("All arguments must be StmtStat objects.")
if stat.agg_class in self.__stmt_stats:
self.__stmt_stats[stat.agg_class]['keys'] += (stat.name,)
self.__stmt_stats[stat.agg_class]['types'] += (stat.data_type,)
for h, v in stat.data.items():
old_arr = self.__stmt_stats[stat.agg_class]['stats'][h]
self.__stmt_stats[stat.agg_class]['stats'][h] = \
append(old_arr, v)
else:
self.__stmt_stats[stat.agg_class] = {
'stats': {h: array([v]) for h, v in stat.data.items()},
'keys': (stat.name,),
'types': (stat.data_type,)
}
self.__rows += (stat.name,)
return
def row_set(self):
"""Get a set of the rows (data labels) of the stats in this instance."""
return set(self.__rows)
def __getitem__(self, key):
if key not in self.__stats:
if not self.__started:
raise KeyError(f"Could not add key {key} before "
"accumulation started.")
if not self.__finished:
# Remember, this is passing REFERENCES to the stats dict.
self.__stats[key] = MultiAggregator(
agg_class(d['keys'], d['stats'], d['types'])
for agg_class, d in self.__stmt_stats.items())
else:
raise KeyError(f"Key \"{key}\" not found! "
f"{self.__class__.__name__} is finished.")
return self.__stats[key]
def start(self):
"""Mark the start of Statement aggregation.
This will freeze the addition of StmtStats and will enable new keyed
entries to be added and aggregated.
"""
self.__started = True
def finish(self):
"""Finish adding entries, new keys will be rejected."""
self.__finished = True
for stat_grp in self.__stats.values():
stat_grp.finish()
return
def is_finished(self):
return self.__finished
def is_started(self):
return self.__started
def get_new_instance(self):
"""Create an instance to gather another level of data."""
return self.__class__(self.__stmt_stats)
def fill_from_stmt_stats(self):
"""Use the statements stats as stats and hashes as keys.
This is used if you decide you just want to represent statements.
"""
if self.__started or self.__finished:
raise RuntimeError("Cannot fill from stats if accumulation has"
"already started or after it has finished.")
# Gather stat rows from the stmt_stats.
stat_rows = defaultdict(lambda: {'keys': tuple(), 'arr': array([]),
'types': tuple()})
for info_dict in self.__stmt_stats.values():
for h, arr in info_dict['stats'].items():
stat_rows[h]['keys'] += info_dict['keys']
stat_rows[h]['arr'] = concatenate([stat_rows[h]['arr'], arr])
stat_rows[h]['types'] += info_dict['types']
stat_rows = dict(stat_rows)
# Fill up the stats.
for h, data in stat_rows.items():
self.__stats[h] = BasicAggregator.from_array(data['keys'],
data['arr'],
data['types'])
# Mark as finished.
self.finish()
return
class AggregatorMeta:
"""Define the API for an aggregator of statement metrics.
In general, an aggregator defines the ways that different kinds of
statement metrics are merged into groups. For example, evidence counts are
aggregated by summing, as are counts for various sources. Beliefs are
aggregated over a group of statements by maximum (usually).
"""
def include(self, stmt):
"""Add the metrics from the given statement to this aggregate."""
raise NotImplementedError()
def get_dict(self):
"""Get a dictionary representation of the data in this aggregate.
Keys are those originally given to the StmtStat instances used to
build this aggregator.
"""
raise NotImplementedError()
def finish(self):
raise NotImplementedError()
class MultiAggregator(AggregatorMeta):
"""Implement the AggregatorMeta API for multiple BasicAggregator children.
Takes an iterable of BasicAggregator children.
"""
def __init__(self, basic_aggs):
self.__basic_aggs = tuple(basic_aggs)
self.__keymap = {k: stat for stat in self.__basic_aggs
for k in stat.keys()}
return
def include(self, stmt):
for basic_agg in self.__basic_aggs:
basic_agg.include(stmt)
def get_dict(self):
return {k: v for basic_agg in self.__basic_aggs
for k, v in basic_agg.get_dict().items()}
def finish(self):
for basic_agg in self.__basic_aggs:
basic_agg.finish()
def __getitem__(self, key):
return self.__keymap[key][key]
class BasicAggregator(AggregatorMeta):
"""Gathers measurements for a statement or similar entity.
By defining a child of BasicAggregator, specifically defining the
operations that gather new data and finalize that data once all the
statements are collected, one can use arbitrary statistical methods to
aggregate metrics for high-level groupings of Statements for subsequent
sorting or filtering purposes.
Parameters
----------
keys : list[str]
A dict keyed by aggregation method of lists of the names for the
elements of data.
stmt_metrics : dict{int: np.ndarray}
A dictionary keyed by hash with each element a dict of arrays keyed
by aggregation type.
original_types : tuple(type)
The type classes of each numerical value stored in the base_group
dict, e.g. `(int, float, int)`.
"""
def __init__(self, keys, stmt_metrics, original_types):
self._keys = keys
self._stmt_metrics = stmt_metrics
self._original_types = original_types
self._values = zeros(len(keys))
self._count = 0
self.__stmt_hashes = set()
self.__frozen = False
self.__dict = None
@classmethod
def from_array(cls, keys, arr, original_types, stmt_metrics=None):
new_cls = cls(keys, stmt_metrics, original_types)
new_cls._values = arr
return new_cls
def _finalize(self):
return
def finish(self):
self._finalize()
self.__frozen = True
def include(self, stmt):
"""Include a statement and its statistics in the group."""
if self.__frozen:
raise RuntimeError(f"No longer adding more stmt data to "
f"{self.__class__.__name__}.")
if not isinstance(stmt, Statement):
raise ValueError(f"Invalid type for addition to BasicAggregator: "
f"{type(stmt)}. Must be a Statement.")
h = stmt.get_hash()
if h in self.__stmt_hashes:
return
assert self._stmt_metrics and h in self._stmt_metrics
self._merge(self._stmt_metrics[h])
self._count += 1
self.__stmt_hashes.add(h)
def _merge(self, metric_array):
raise NotImplemented
def __getitem__(self, item):
if item not in self._keys:
raise KeyError(f"Key '{item}' not found!")
idx = self._keys.index(item)
return self._values[idx].astype(self._original_types[idx])
def keys(self):
return self._keys[:]
def get_dict(self):
if not self.__frozen:
raise RuntimeError("Cannot load source dict until frozen.")
if self.__dict is None:
self.__dict = {key: value.astype(original_type)
for key, value, original_type
in zip(self._keys, self._values,
self._original_types)}
return self.__dict
class SumAggregator(BasicAggregator):
"""A stats aggregator that executes a sum."""
def _merge(self, metric_array):
self._values += metric_array
class AveAggregator(BasicAggregator):
"""A stats aggregator averages the included statement metrics."""
def _merge(self, metric_array):
self._values += metric_array
def _finalize(self):
self._values = self._values / self._count
class MaxAggregator(BasicAggregator):
"""A stats aggregator that takes the max of statement metrics."""
def _merge(self, metric_array):
self._values = maximum(self._values, metric_array)
def _get_ag_name_set_len(stmt):
return len(set(a.name if a else 'None' for a in stmt.agent_list()))
def group_and_sort_statements(stmt_list, sort_by='default', custom_stats=None,
grouping_level='agent-pair'):
"""Group statements by type and arguments, and sort by prevalence.
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements.
sort_by : str or function or None
If str, it indicates which parameter to sort by, such as 'belief' or
'ev_count', or 'ag_count'. Those are the default options because they
can be derived from a list of statements, however if you give a custom
`stmt_metrics`, you may use any of the parameters used to build it.
The default, 'default', is mostly a sort by ev_count but also favors
statements with fewer agents. Alternatively, you may give a function
that takes a dict as its single argument, a dictionary of metrics. These
metrics are determined by the contents of the `stmt_metrics` passed
as an argument (see StmtGroup for details), or else will contain
the default metrics that can be derived from the statements themselves,
namely `ev_count`, `belief`, and `ag_count`. The value may also
be None, in which case the sort function will return the
same value for all elements, and thus the original order of elements
will be preserved. This could have strange effects when statements are
grouped (i.e. when `grouping_level` is not 'statement'); such
functionality is untested and we make no guarantee that it will work.
custom_stats : list[StmtStat]
A list of custom statement statistics to be used in addition to, or upon
name conflict in place of, the default statement statistics derived from
the list of statements.
grouping_level : str
The options are 'agent-pair', 'relation', and 'statement'. These
correspond to grouping by agent pairs, agent and type relationships, and
a flat list of statements. The default is 'agent-pair'.
Returns
-------
sorted_groups : list[tuple]
A list of tuples of the form (sort_param, key, contents, metrics), where
the sort param is whatever value was calculated to sort the results,
the key is the unique key for the agent pair, relation, or statements,
and the contents are either relations, statements, or statement JSON,
depending on the level. This structure is recursive, so the each list
of relations will also follow this structure, all the way down to
the lowest level (statement JSON). The metrics a dict of the aggregated
metrics for the entry (e.g. source counts, evidence counts, etc).
"""
# Validate the grouping level parameter.
if grouping_level not in ['agent-pair', 'relation', 'statement']:
raise ValueError(f"Invalid grouping level: \"{grouping_level}\".")
# Get any missing default metrics.
if custom_stats is not None:
stats = custom_stats[:]
stat_rows = {stat.name for stat in custom_stats}
else:
stats = []
stat_rows = set()
missing_rows = {'ev_count', 'belief', 'ag_count'} - stat_rows
if missing_rows:
stats += StmtStat.from_stmts(stmt_list, missing_rows)
# Init the base group.
base_group = StmtGroup.from_stmt_stats(*stats)
base_group.fill_from_stmt_stats()
# Define the sort function.
if isinstance(sort_by, str):
def _sort_func(metric):
assert isinstance(sort_by, str)
if sort_by == 'default':
return metric['ev_count'] + 1/(1 + metric['ag_count'])
return metric[sort_by]
elif sort_by is None:
def _sort_func(metric):
return 0
else:
# Check that the sort function is a valid function.
sample_dict = dict.fromkeys(base_group.row_set(), 0)
try:
n = sort_by(sample_dict)
# If the return value is not sortable, this will raise a TypeError.
n < n
except Exception as e:
raise ValueError(f"Invalid sort function: {e}")
# Assign the function.
_sort_func = sort_by
# Write a recursive method to group statement content.
def iter_rows(rows, *metric_dicts):
assert metric_dicts
for key, contents in rows:
metrics = metric_dicts[0][key].get_dict()
if len(metric_dicts) > 1:
if isinstance(contents, dict):
contents = contents.items()
contents = sorted_rows(contents, *metric_dicts[1:])
yield (_sort_func(metrics), str(key)) if sort_by else 0, \
key, contents, metrics
def sorted_rows(rows, *metric_dicts):
return sorted(iter_rows(rows, *metric_dicts), key=lambda t: t[0],
reverse=True)
# Return the sorted statements, if that's all you want.
if grouping_level == 'statement':
stmt_rows = ((s.get_hash(), s) for s in stmt_list)
return sorted_rows(stmt_rows, base_group)
# Create gathering metrics from the statement data.
relation_metrics = base_group.get_new_instance()
relation_metrics.start()
if grouping_level == 'agent-pair':
agent_pair_metrics = base_group.get_new_instance()
agent_pair_metrics.start()
# Add up the grouped statements from the metrics.
if grouping_level == 'relation':
grouped_stmts = defaultdict(list)
else:
grouped_stmts = defaultdict(lambda: defaultdict(list))
expand = (grouping_level == 'agent-pair')
for rel_key, ag_key, stmt in _get_relation_keyed_stmts(stmt_list, expand):
relation_metrics[rel_key].include(stmt)
if grouping_level == 'agent-pair':
grouped_stmts[ag_key][rel_key].append((stmt.get_hash(), stmt))
agent_pair_metrics[ag_key].include(stmt)
else:
grouped_stmts[rel_key].append((stmt.get_hash(), stmt))
# Stop filling these stmt groups. No more "new" keys.
relation_metrics.finish()
if grouping_level == 'agent-pair':
agent_pair_metrics.finish()
# Sort the rows by count and agent names.
if grouping_level == 'relation':
return sorted_rows(grouped_stmts.items(), relation_metrics,
base_group)
return sorted_rows(grouped_stmts.items(), agent_pair_metrics,
relation_metrics, base_group)
def make_stmt_from_relation_key(relation_key, agents=None):
"""Make a Statement from the relation key.
Specifically, make a Statement object from the sort key used by
`group_and_sort_statements`.
"""
def make_agent(name):
if name == 'None' or name is None:
return None
return Agent(name)
verb = relation_key[0]
inps = relation_key[1:]
StmtClass = get_statement_by_name(verb)
if agents is None:
agents = []
if verb == 'Complex':
agents.extend([make_agent(name) for name in inps])
stmt = StmtClass(agents[:])
elif verb == 'Conversion':
names_from = [make_agent(name) for name in inps[1]]
names_to = [make_agent(name) for name in inps[2]]
agents.extend(names_from + names_to)
stmt = StmtClass(make_agent(inps[0]), names_from, names_to)
elif verb == 'ActiveForm' or verb == 'HasActivity':
agents.extend([make_agent(inps[0])])
stmt = StmtClass(agents[0], inps[1], inps[2])
elif verb == 'Influence':
agents.extend([make_agent(inp) for inp in inps[:2]])
stmt = Influence(*[Event(ag) for ag in agents])
elif verb == 'Association':
agents.extend([make_agent(inp) for inp in inps])
stmt = StmtClass([Event(ag) for ag in agents])
else:
agents.extend([make_agent(name) for name in inps])
stmt = StmtClass(*agents)
return stmt
def stmt_to_english(stmt):
"""Return an English assembled Statement as a sentence."""
ea = EnglishAssembler([stmt])
return ea.make_model()[:-1]
def make_string_from_relation_key(rel_key):
"""Make a Statement string via EnglishAssembler from the relation key.
Specifically, make a string from the key used by `group_and_sort_statements`
for contents grouped at the relation level.
"""
stmt = make_stmt_from_relation_key(rel_key)
return stmt_to_english(stmt)
def get_simplified_stmts(stmts):
simple_stmts = []
for rel_key, _, _ in _get_relation_keyed_stmts(stmts, expand_nary=False):
simple_stmts.append(make_stmt_from_relation_key(rel_key))
return simple_stmts
def _str_conversion_bits(tpl):
bolds = ['<b>%s</b>' % el for el in tpl]
return ', '.join(bolds[:-1]) + ', and ' + bolds[-1]
def make_top_level_label_from_names_key(names):
"""Make an english string from the tuple names."""
try:
if len(names) == 3 and isinstance(names[1], tuple): # Conversions
el_from = _str_conversion_bits(names[1])
el_to = _str_conversion_bits(names[2])
tl_label = ("<b>%s</b> converts %s to %s"
% (names[0], el_from, el_to))
else:
b_names = ['<b>%s</b>' % name for name in names]
if len(names) == 1:
tl_label = b_names[0]
elif len(names) == 2: # Singleton Modifications
if names[0] is None or names[0] == 'None':
tl_label = b_names[1] + " is modified"
else:
tl_label = b_names[0] + " affects " + b_names[1]
elif names[1] == "activity": # ActiveForms
if names[2] or names[2] == "True":
tl_label = b_names[0] + " is active"
else:
tl_label = b_names[0] + " is not active"
else: # Large Complexes
tl_label = b_names[0] + " affects "
tl_label += ", ".join(b_names[1:-1]) + ', and ' + b_names[-1]
return tl_label
except Exception as e:
logger.error("Could not handle: %s" % str(names))
raise e
def standardize_counts(counts):
"""Standardize hash-based counts dicts to be int-keyed."""
standardized_counts = {}
for k, v in counts.items():
try:
int_k = int(k)
standardized_counts[int_k] = v
except ValueError:
logger.warning('Could not convert statement hash %s to int' % k)
return standardized_counts
def get_available_ev_counts(stmts):
return {stmt.get_hash(): len(stmt.evidence) for stmt in stmts}
def get_available_beliefs(stmts):
return {stmt.get_hash(): stmt.belief for stmt in stmts}
def get_available_source_counts(stmts):
return {stmt.get_hash(): _get_available_ev_source_counts(stmt.evidence)
for stmt in stmts}
def _get_available_ev_source_counts(evidences):
counts = _get_initial_source_counts()
for ev in evidences:
sa = internal_source_mappings.get(ev.source_api, ev.source_api)
try:
counts[sa] += 1
except KeyError:
continue
return counts
def _get_initial_source_counts():
return {s: 0 for s in all_sources}
|
|
from collections import deque
from queue import PriorityQueue
from node import Node
from board import Board
from time import process_time
class Solver(object):
"""Solves a puzzle using one of the following methods:
BFS --> Breadth First Search
DFS --> Depth First Search
AST --> A-star search
IDA --> Ida-star search
"""
def __init__(self, method, initialState):
self.method = method # method used to solve puzzle
self.state = Node(initialState) # instance of State class
#self.tree = self.state # tree starting from initial configuration
if self.method == 'bfs':
self.frontier = deque([self.state], None)
elif self.method == 'dfs':
self.frontier = [self.state] # list of states to be explored
elif self.method == 'ast':
self.frontier = PriorityQueue()
self.frontier.put(self.state)
elif self.method == 'ida':
self.frontier = [self.state]
self.threshold = 1;
self.initialState = Node(initialState)
self.explored = set() # list of states already explored
self.goal = Node(list(range(len(initialState.split(',')))))
self.pathToGoal = [] # something like ['Up', 'Left', 'Left']
self.costOfPath = 0
self.nodesExpanded = 0
self.fringeSize = 1
self.maxFringeSize = 0
self.searchDepth = 0
self.maxSearchDepth = 0
self.runningTime = 0.0
self.maxRamUsage = 0.0
self.start = process_time()
def solve(self):
"""Main method for solving puzzle"""
if self.method == 'bfs':
retVal = self.bfs()
elif self.method == 'dfs':
retVal = self.dfs()
elif self.method == 'ast':
retVal = self.ast()
elif self.method == 'ida':
retVal = self.ida()
while retVal is not True:
self.threshold = self.threshold + 1
self.frontier = [self.initialState]
self.explored = set()
self.nodesExpanded = 0
self.fringeSize = 1
retVal = self.ida()
else:
raise ValueError('Possible methods are dfs, bfs, ast, ida')
if not retVal:
raise RuntimeError('Solver didn\'t reach final state')
self.runningTime = process_time() - self.start
self.maxRamUsage = 0; #resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
def bfs(self):
while len(self.frontier) > 0:
self.state = self.frontier.popleft()
#print("Current State: " + str(self.state.board.values))
self.fringeSize -= 1
self.explored.add(str(self.state.board.values))
if self.state.testEqual(self.goal):
self.searchDepth = self.state.depth
self.costOfPath = self.state.depth
self.pathToGoal = self.getPathToGoal()
return True
for neighbour in self.state.neighbours():
#if not neighbour.belongs(self.frontier) and not neighbour.belongs(self.explored):
if str(neighbour.board.values) not in self.explored:
self.frontier.append(neighbour)
self.explored.add(str(neighbour.board.values))
self.fringeSize += 1
if neighbour.depth > self.maxSearchDepth:
self.maxSearchDepth = neighbour.depth
self.nodesExpanded += 1
if self.fringeSize > self.maxFringeSize:
self.maxFringeSize = self.fringeSize
def dfs(self):
while len(self.frontier) > 0:
self.state = self.frontier.pop()
#print("Current State:\n" + str(self.state))
self.fringeSize -= 1
self.explored.add(str(self.state.board.values))
if self.state.testEqual(self.goal):
self.searchDepth = self.state.depth
self.costOfPath = self.state.depth
self.pathToGoal = self.getPathToGoal()
return True
neighbours = reversed(self.state.neighbours())
for neighbour in neighbours:
#if not neighbour.belongs(self.frontier) and not neighbour.belongs(self.explored):
if str(neighbour.board.values) not in self.explored:
self.frontier.append(neighbour)
self.explored.add(str(neighbour.board.values))
self.fringeSize += 1
if neighbour.depth > self.maxSearchDepth:
self.maxSearchDepth = neighbour.depth
self.nodesExpanded += 1
if self.fringeSize > self.maxFringeSize:
self.maxFringeSize = self.fringeSize
def ast(self):
while self.frontier.qsize() > 0:
self.state = self.frontier.get()
#print("Current State:\n" + str(self.state))
self.fringeSize -= 1
self.explored.add(str(self.state.board.values))
if self.state.testEqual(self.goal):
self.searchDepth = self.state.depth
self.costOfPath = self.state.depth
self.pathToGoal = self.getPathToGoal()
return True
neighbours = self.state.neighbours()
for neighbour in neighbours:
if str(neighbour.board.values) not in self.explored:
neighbour.heuristics = neighbour.depth + neighbour.board.manhattanDist()
self.frontier.put(neighbour)
self.explored.add(str(neighbour.board.values))
self.fringeSize += 1
if neighbour.depth > self.maxSearchDepth:
self.maxSearchDepth = neighbour.depth
self.nodesExpanded += 1
if self.fringeSize > self.maxFringeSize:
self.maxFringeSize = self.fringeSize
def ida(self):
while len(self.frontier) > 0:
self.state = self.frontier.pop()
#print("Current State:\n" + str(self.state))
self.fringeSize = len(self.frontier)
self.explored.add(str(self.state.board.values))
if self.state.depth > self.maxSearchDepth:
self.maxSearchDepth = self.state.depth
if self.state.testEqual(self.goal):
self.searchDepth = self.state.depth
self.costOfPath = self.state.depth
self.pathToGoal = self.getPathToGoal()
return True
neighbours = reversed(self.state.neighbours())
for neighbour in neighbours:
#if not neighbour.belongs(self.frontier) and not neighbour.belongs(self.explored):
if str(neighbour.board.values) not in self.explored:
neighbour.heuristics = neighbour.depth + neighbour.board.manhattanDist()
if neighbour.heuristics <= self.threshold:
self.frontier.append(neighbour)
self.explored.add(str(neighbour.board.values))
self.fringeSize = len(self.frontier)
self.nodesExpanded += 1
if self.fringeSize > self.maxFringeSize:
self.maxFringeSize = self.fringeSize
def writeResults(self):
f = open('output.txt', 'w')
s = "path_to_goal: " + str(self.pathToGoal) + "\n"
s += "cost_of_path: " + str(self.costOfPath) + "\n"
s += "nodes_expanded: " + str(self.nodesExpanded) + "\n"
s += "fringe_size: " + str(self.fringeSize) + "\n"
s += "max_fringe_size: " + str(self.maxFringeSize) + "\n"
s += "search_depth: " + str(self.searchDepth) + "\n"
s += "max_search_depth: " + str(self.maxSearchDepth) + "\n"
s += "running_time: " + str(self.runningTime) + "\n"
s += "max_ram_usage: " + str(self.maxRamUsage)
f.write(s)
#print(s)
f.close()
def getPathToGoal(self):
cState = self.state
path = []
while cState.action is not None:
path.append(cState.action)
cState = cState.parent
return path[::-1]
|
|
#### PATTERN | COMMONSENSE #########################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from builtins import str, bytes, dict, int
from builtins import map, zip, filter
from builtins import object, range
from codecs import BOM_UTF8
from itertools import chain
from functools import cmp_to_key
from io import open
try:
# Python 2
from urllib import urlopen
except ImportError:
# Python 3
from urllib.request import urlopen
from .__init__ import Graph, Node, Edge, bfs
from .__init__ import WEIGHT, CENTRALITY, EIGENVECTOR, BETWEENNESS
import os
import sys
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
if sys.version > "3":
BOM_UTF8 = str(BOM_UTF8.decode("utf-8"))
else:
BOM_UTF8 = BOM_UTF8.decode("utf-8")
#### COMMONSENSE SEMANTIC NETWORK ##################################################################
#--- CONCEPT ---------------------------------------------------------------------------------------
class Concept(Node):
def __init__(self, *args, **kwargs):
""" A concept in the sematic network.
"""
Node.__init__(self, *args, **kwargs)
self._properties = None
@property
def halo(self, depth=2):
""" Returns the concept halo: a list with this concept + surrounding concepts.
This is useful to reason more fluidly about the concept,
since the halo will include latent properties linked to nearby concepts.
"""
return self.flatten(depth=depth)
@property
def properties(self):
""" Returns the top properties in the concept halo, sorted by betweenness centrality.
The return value is a list of concept id's instead of Concepts (for performance).
"""
if self._properties is None:
g = self.graph.copy(nodes=self.halo)
p = (n for n in g.nodes if n.id in self.graph.properties)
p = [n.id for n in reversed(sorted(p, key=lambda n: n.centrality))]
self._properties = p
return self._properties
def halo(concept, depth=2):
return concept.flatten(depth=depth)
def properties(concept, depth=2, centrality=BETWEENNESS):
g = concept.graph.copy(nodes=halo(concept, depth))
p = (n for n in g.nodes if n.id in concept.graph.properties)
p = [n.id for n in reversed(sorted(p, key=lambda n: getattr(n, centrality)))]
return p
#--- RELATION --------------------------------------------------------------------------------------
class Relation(Edge):
def __init__(self, *args, **kwargs):
""" A relation between two concepts, with an optional context.
For example, "Felix is-a cat" is in the "media" context, "tiger is-a cat" in "nature".
"""
self.context = kwargs.pop("context", None)
Edge.__init__(self, *args, **kwargs)
#--- HEURISTICS ------------------------------------------------------------------------------------
# Similarity between concepts is measured using a featural approach:
# a comparison of the features/properties that are salient in each concept's halo.
# Commonsense.similarity() takes an optional "heuristic" parameter to tweak this behavior.
# It is a tuple of two functions:
# 1) function(concept) returns a list of salient properties (or other),
# 2) function(concept1, concept2) returns the cost to traverse this edge (0.0-1.0).
COMMONALITY = (
# Similarity heuristic that only traverses relations between properties.
lambda concept: concept.properties,
lambda edge: 1 - int(edge.context == "properties" and \
edge.type != "is-opposite-of"))
#--- COMMONSENSE -----------------------------------------------------------------------------------
class Commonsense(Graph):
def __init__(self, data=os.path.join(MODULE, "commonsense.csv"), **kwargs):
""" A semantic network of commonsense, using different relation types:
- is-a,
- is-part-of,
- is-opposite-of,
- is-property-of,
- is-related-to,
- is-same-as,
- is-effect-of.
"""
Graph.__init__(self, **kwargs)
self._properties = None
# Load data from the given path,
# a CSV-file of (concept1, relation, concept2, context, weight)-items.
if data is not None:
s = open(data, encoding = 'utf-8').read()
s = s.strip(BOM_UTF8)
s = ((v.strip("\"") for v in r.split(",")) for r in s.splitlines())
for concept1, relation, concept2, context, weight in s:
self.add_edge(concept1, concept2,
type = relation,
context = context,
weight = min(int(weight) * 0.1, 1.0))
@property
def concepts(self):
return self.nodes
@property
def relations(self):
return self.edges
@property
def properties(self):
""" Yields all concepts that are properties (i.e., adjectives).
For example: "cold is-property-of winter" => "cold".
"""
if self._properties is None:
#self._properties = set(e.node1.id for e in self.edges if e.type == "is-property-of")
self._properties = (e for e in self.edges if e.context == "properties")
self._properties = set(chain(*((e.node1.id, e.node2.id) for e in self._properties)))
return self._properties
def add_node(self, id, *args, **kwargs):
""" Returns a Concept (Node subclass).
"""
self._properties = None
kwargs.setdefault("base", Concept)
return Graph.add_node(self, id, *args, **kwargs)
def add_edge(self, id1, id2, *args, **kwargs):
""" Returns a Relation between two concepts (Edge subclass).
"""
self._properties = None
kwargs.setdefault("base", Relation)
return Graph.add_edge(self, id1, id2, *args, **kwargs)
def remove(self, x):
self._properties = None
Graph.remove(self, x)
def similarity(self, concept1, concept2, k=3, heuristic=COMMONALITY):
""" Returns the similarity of the given concepts,
by cross-comparing shortest path distance between k concept properties.
A given concept can also be a flat list of properties, e.g. ["creepy"].
The given heuristic is a tuple of two functions:
1) function(concept) returns a list of salient properties,
2) function(edge) returns the cost for traversing this edge (0.0-1.0).
"""
if isinstance(concept1, str):
concept1 = self[concept1]
if isinstance(concept2, str):
concept2 = self[concept2]
if isinstance(concept1, Node):
concept1 = heuristic[0](concept1)
if isinstance(concept2, Node):
concept2 = heuristic[0](concept2)
if isinstance(concept1, list):
concept1 = [isinstance(n, Node) and n or self[n] for n in concept1]
if isinstance(concept2, list):
concept2 = [isinstance(n, Node) and n or self[n] for n in concept2]
h = lambda id1, id2: heuristic[1](self.edge(id1, id2))
w = 0.0
for p1 in concept1[:k]:
for p2 in concept2[:k]:
p = self.shortest_path(p1, p2, heuristic=h)
w += 1.0 / (p is None and 1e10 or len(p))
return w / k
def nearest_neighbors(self, concept, concepts=[], k=3):
""" Returns the k most similar concepts from the given list.
"""
return sorted(concepts, key=lambda candidate: self.similarity(concept, candidate, k), reverse=True)
similar = neighbors = nn = nearest_neighbors
def taxonomy(self, concept, depth=3, fringe=2):
""" Returns a list of concepts that are descendants of the given concept, using "is-a" relations.
Creates a subgraph of "is-a" related concepts up to the given depth,
then takes the fringe (i.e., leaves) of the subgraph.
"""
def traversable(node, edge):
# Follow parent-child edges.
return edge.node2 == node and edge.type == "is-a"
if not isinstance(concept, Node):
concept = self[concept]
g = self.copy(nodes=concept.flatten(depth, traversable))
g = g.fringe(depth=fringe)
g = [self[n.id] for n in g if n != concept]
return g
field = semantic_field = taxonomy
#g = Commonsense()
#print(g.nn("party", g.field("animal")))
#print(g.nn("creepy", g.field("animal")))
#### COMMONSENSE DATA ##############################################################################
#--- NODEBOX.NET/PERCEPTION ------------------------------------------------------------------------
def download(path=os.path.join(MODULE, "commonsense.csv"), threshold=50):
""" Downloads commonsense data from http://nodebox.net/perception.
Saves the data as commonsense.csv which can be the input for Commonsense.load().
"""
s = "http://nodebox.net/perception?format=txt&robots=1"
s = urlopen(s).read()
s = s.decode("utf-8")
s = s.replace("\\'", "'")
# Group relations by author.
a = {}
for r in ([v.strip("'") for v in r.split(", ")] for r in s.split("\n")):
if len(r) == 7:
a.setdefault(r[-2], []).append(r)
# Iterate authors sorted by number of contributions.
# 1) Authors with 50+ contributions can define new relations and context.
# 2) Authors with 50- contributions (or robots) can only reinforce existing relations.
a = sorted(a.items(), key=cmp_to_key(lambda v1, v2: len(v2[1]) - len(v1[1])))
r = {}
for author, relations in a:
if author == "" or author.startswith("robots@"):
continue
if len(relations) < threshold:
break
# Sort latest-first (we prefer more recent relation types).
relations = sorted(relations, key=cmp_to_key(lambda r1, r2: r1[-1] > r2[-1]))
# 1) Define new relations.
for concept1, relation, concept2, context, weight, author, date in relations:
id = (concept1, relation, concept2)
if id not in r:
r[id] = [None, 0]
if r[id][0] is None and context is not None:
r[id][0] = context
for author, relations in a:
# 2) Reinforce existing relations.
for concept1, relation, concept2, context, weight, author, date in relations:
id = (concept1, relation, concept2)
if id in r:
r[id][1] += int(weight)
# Export CSV-file.
s = []
for (concept1, relation, concept2), (context, weight) in r.items():
s.append("\"%s\",\"%s\",\"%s\",\"%s\",%s" % (
concept1, relation, concept2, context, weight))
f = open(path, "w", encoding = 'utf-8')
f.write(BOM_UTF8)
f.write("\n".join(s))
f.close()
def json():
""" Returns a JSON-string with the data from commonsense.csv.
Each relation is encoded as a [concept1, relation, concept2, context, weight] list.
"""
f = lambda s: s.replace("'", "\\'").encode("utf-8")
s = []
g = Commonsense()
for e in g.edges:
s.append("\n\t['%s', '%s', '%s', '%s', %.2f]" % (
f(e.node1.id),
f(e.type),
f(e.node2.id),
f(e.context),
e.weight
))
return "commonsense = [%s];" % ", ".join(s)
#download("commonsense.csv", threshold=50)
#open("commonsense.js", "w", encoding = 'utf-8').write(json())
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Environment configuration object for Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from tensorflow.core.protobuf import config_pb2
# A list of the property names in RunConfig user allows to change.
_DEFAULT_REPLACEABLE_LIST = [
'model_dir',
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
'log_step_count_steps'
]
_SAVE_CKPT_ERR = (
'`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set.'
)
def _validate_save_ckpt_with_replaced_keys(new_copy, replaced_keys):
"""Validates the save ckpt properties."""
# Ensure one (and only one) of save_steps and save_secs is not None.
# Also, if user sets one save ckpt property, say steps, the other one (secs)
# should be set as None to improve usability.
save_steps = new_copy.save_checkpoints_steps
save_secs = new_copy.save_checkpoints_secs
if ('save_checkpoints_steps' in replaced_keys and
'save_checkpoints_secs' in replaced_keys):
# If user sets both properties explicitly, we need to error out if both
# are set or neither of them are set.
if save_steps is not None and save_secs is not None:
raise ValueError(_SAVE_CKPT_ERR)
elif 'save_checkpoints_steps' in replaced_keys and save_steps is not None:
new_copy._save_checkpoints_secs = None # pylint: disable=protected-access
elif 'save_checkpoints_secs' in replaced_keys and save_secs is not None:
new_copy._save_checkpoints_steps = None # pylint: disable=protected-access
def _validate_properties(run_config):
"""Validates the properties."""
def _validate(property_name, cond, message):
property_value = getattr(run_config, property_name)
if property_value is not None and not cond(property_value):
raise ValueError(message)
_validate('model_dir', lambda dir: dir,
message='model_dir should be non-empty')
_validate('save_summary_steps', lambda steps: steps >= 0,
message='save_summary_steps should be >= 0')
_validate('save_checkpoints_steps', lambda steps: steps >= 0,
message='save_checkpoints_steps should be >= 0')
_validate('save_checkpoints_secs', lambda secs: secs >= 0,
message='save_checkpoints_secs should be >= 0')
_validate('session_config',
lambda sc: isinstance(sc, config_pb2.ConfigProto),
message='session_config must be instance of ConfigProto')
_validate('keep_checkpoint_max', lambda keep_max: keep_max >= 0,
message='keep_checkpoint_max should be >= 0')
_validate('keep_checkpoint_every_n_hours', lambda keep_hours: keep_hours > 0,
message='keep_checkpoint_every_n_hours should be > 0')
_validate('log_step_count_steps', lambda num_steps: num_steps > 0,
message='log_step_count_steps should be > 0')
_validate('tf_random_seed', lambda seed: isinstance(seed, six.integer_types),
message='tf_random_seed must be integer.')
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class RunConfig(object):
"""This class specifies the configurations for an `Estimator` run."""
def __init__(self):
self._model_dir = None
self._tf_random_seed = 1
self._save_summary_steps = 100
self._save_checkpoints_secs = 600
self._save_checkpoints_steps = None
self._session_config = None
self._keep_checkpoint_max = 5
self._keep_checkpoint_every_n_hours = 10000
self._log_step_count_steps = 100
_validate_properties(self)
@property
def cluster_spec(self):
return None
@property
def evaluation_master(self):
return ''
@property
def is_chief(self):
return True
@property
def master(self):
return ''
@property
def num_ps_replicas(self):
return 0
@property
def num_worker_replicas(self):
return 1
@property
def task_id(self):
return 0
@property
def task_type(self):
return TaskType.WORKER
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def session_config(self):
return self._session_config
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
@property
def log_step_count_steps(self):
return self._log_step_count_steps
@property
def model_dir(self):
return self._model_dir
def replace(self, **kwargs):
"""Returns a new instance of `RunConfig` replacing specified properties.
Only the properties in the following list are allowed to be replaced:
- `model_dir`.
- `tf_random_seed`,
- `save_summary_steps`,
- `save_checkpoints_steps`,
- `save_checkpoints_secs`,
- `session_config`,
- `keep_checkpoint_max`,
- `keep_checkpoint_every_n_hours`,
- `log_step_count_steps`,
In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
can be set (should not be both).
Args:
**kwargs: keyword named properties with new values.
Raises:
ValueError: If any property name in `kwargs` does not exist or is not
allowed to be replaced, or both `save_checkpoints_steps` and
`save_checkpoints_secs` are set.
Returns:
a new instance of `RunConfig`.
"""
return self._replace(
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST, **kwargs)
def _replace(self, allowed_properties_list=None, **kwargs):
"""See `replace`.
N.B.: This implementation assumes that for key named "foo", the underlying
property the RunConfig holds is "_foo" (with one leading underscore).
Args:
allowed_properties_list: The property name list allowed to be replaced.
**kwargs: keyword named properties with new values.
Raises:
ValueError: If any property name in `kwargs` does not exist or is not
allowed to be replaced, or both `save_checkpoints_steps` and
`save_checkpoints_secs` are set.
Returns:
a new instance of `RunConfig`.
"""
new_copy = copy.deepcopy(self)
allowed_properties_list = allowed_properties_list or []
for key, new_value in six.iteritems(kwargs):
if key in allowed_properties_list:
setattr(new_copy, '_' + key, new_value)
continue
raise ValueError(
'Replacing {} is not supported. Allowed properties are {}.'.format(
key, allowed_properties_list))
_validate_save_ckpt_with_replaced_keys(new_copy, kwargs.keys())
_validate_properties(new_copy)
return new_copy
|
|
import traceback
import time
import io
from copy import copy
from PyQt5 import QtGui, QtCore, QtWidgets, uic
from settings import tmsettings
from functools import partial
from const import *
from event import TelepatContextAddEvent, TelepatContextUpdateEvent, ExceptionEvent
from conneditor import ConnectionEditor
from contextitem import ContextItem
from models.context import Context
from models.model import Model
from modelitem import ModelItem
from workers import ContextsWorker, SchemaWorker, ApplicationsWorker, RegisterWorker, UsersWorker
from telepat.transportnotification import NOTIFICATION_TYPE_ADDED, NOTIFICATION_TYPE_DELETED, NOTIFICATION_TYPE_UPDATED
import console
class TelepatManager(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(TelepatManager, self).__init__(parent)
self.applications = []
uic.loadUi('telepatmanager.ui', self)
console.set_widget(self.loggerWidget)
self.actionConnect.triggered.connect(self.openConnection)
self.actionRefresh.triggered.connect(self.refreshContexts)
self.actionEditApp.triggered.connect(self.editApplication)
self.actionShowNameId.toggled.connect(self.showNameId)
self.contextsTreeView.clicked.connect(self.itemSelected)
self.filterLineEdit.textChanged.connect(self.filterChanged)
# Set up the UI
self.actionRefresh.setEnabled(False)
self.loggerWidget.setFont(QtGui.QFont(QtGui.QFontDatabase.systemFont(QtGui.QFontDatabase.FixedFont)))
self.setupHistoryMenu()
self.setupSplitters()
self.setupAppsCombobox()
self.treeViewLayout.setContentsMargins(0, 0, 0, 0)
self.stackedWidget.setContentsMargins(0, 0, 0, 0)
self.setUnifiedTitleAndToolBarOnMac(True)
self.contexts_model = QtGui.QStandardItemModel()
self.proxy = QtCore.QSortFilterProxyModel(self)
self.proxy.setSourceModel(self.contexts_model)
self.proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.contextsTreeView.setModel(self.proxy)
console.log("Application started")
def setupHistoryMenu(self):
if tmsettings.value("recentConnections"):
self.menu_history.setEnabled(True)
for connection_dict in tmsettings.value("recentConnections"):
action = QtWidgets.QAction(connection_dict["url"], self.menu_history)
action.triggered.connect(partial(self.openConnection, connection_dict))
self.menu_history.addAction(action)
else:
self.menu_history.setEnabled(False)
def setupSplitters(self):
if tmsettings.value("consoleSplitterSize"):
self.consoleSplitter.restoreState(tmsettings.value("consoleSplitterSize"))
else:
self.consoleSplitter.setSizes([self.height() - 200, 200])
if tmsettings.value("treeViewSplitterSize"):
self.treeViewSplitter.restoreState(tmsettings.value("treeViewSplitterSize"))
else:
self.treeViewSplitter.setSizes([200, self.width() - 200])
def setupAppsCombobox(self):
widget = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout(widget)
self.appsCombobox = QtWidgets.QComboBox(self)
self.appsCombobox.setDisabled(True)
self.actionEditApp.setDisabled(True)
self.appsCombobox.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
self.appsCombobox.currentIndexChanged.connect(self.currentAppChanged)
layout.addWidget(QtWidgets.QLabel("Application:"))
layout.addWidget(self.appsCombobox)
self.applicationToolbar.insertWidget(self.actionEditApp, widget)
def openConnection(self, connection_dict=None):
self.connectionEditor = ConnectionEditor(self, connection_dict)
self.connectionEditor.success.connect(self.login_success)
self.connectionEditor.show()
def refreshContexts(self):
def contexts_success(contexts_list):
telepat = QtCore.QCoreApplication.instance().telepat_instance
telepat.on_update_context = self.on_update_context
telepat.on_add_context = self.on_add_context
application = self.applications[self.appsCombobox.currentIndex()]
self.contexts_model.setHorizontalHeaderLabels(["Contexts"])
self.actionRefresh.setEnabled(True)
for ctx in contexts_list:
item = ContextItem(ctx)
item.setEditable(False)
for key in application.schema:
subitem = ModelItem(key, Model(application.schema[key].to_json()))
subitem.setEditable(False)
item.appendRow(subitem)
self.contexts_model.appendRow(item)
def contexts_failed(err_code, msg):
self.actionRefresh.setEnabled(True)
QtWidgets.QMessageBox.critical(self, "Contexts retrieving error", "Error {0}: {1}".format(err_code, msg))
self.actionRefresh.setEnabled(False)
self.contexts_model.clear()
self.contexts_worker = ContextsWorker(self)
self.contexts_worker.success.connect(contexts_success)
self.contexts_worker.failed.connect(contexts_failed)
self.contexts_worker.log.connect(console.log)
self.contexts_worker.start()
def getUsers(self):
def users_success(users_list):
self.app_users = users_list
def users_failed(err_code, err_msg):
QtWidgets.QMessageBox.critical(self, "Cannot get application's users list", "Error {0}: {1}".format(err_code, err_msg))
self.users_worker = UsersWorker()
self.users_worker.success.connect(users_success)
self.users_worker.failed.connect(users_failed)
self.users_worker.log.connect(console.log)
self.users_worker.start()
def editApplication(self):
def schema_success(app_schema):
import json
print(json.dumps(app_schema.to_json()))
def schema_failed(err_code, err_msg):
QtWidgets.QMessageBox.critical(self, "Schema retrieving error", "Error {0}: {1}".format(err_code, msg))
self.schema_worker = SchemaWorker()
self.schema_worker.success.connect(schema_success)
self.schema_worker.failed.connect(schema_failed)
self.schema_worker.log.connect(console.log)
self.schema_worker.start()
def on_update_context(self, context, notification):
event = TelepatContextUpdateEvent(context, notification)
QtWidgets.QApplication.postEvent(self, event)
def on_add_context(self, context, notification):
event = TelepatContextAddEvent(context, notification)
QtWidgets.QApplication.postEvent(self, event)
def currentAppChanged(self, index):
telepat = QtCore.QCoreApplication.instance().telepat_instance
app = self.applications[index]
telepat.app_id = app["id"]
telepat.api_key = app["keys"][0]
self.registerDevice()
self.getUsers()
self.refreshContexts()
def filterChanged(self):
self.proxy.setFilterRegExp(self.filterLineEdit.text())
def itemSelected(self, index):
item = self.contexts_model.itemFromIndex(index.model().mapToSource(index))
if type(item) == ContextItem:
self.stackedWidget.setCurrentIndex(0)
self.tableView.editObject(item.context)
elif type(item) == ModelItem:
self.stackedWidget.setCurrentIndex(1)
self.modelBrowser.browseModel(item.parent().context, item.text(), self.app_users)
def showNameId(self):
i = 0
while self.contexts_model.item(i):
self.contexts_model.item(i).show_name(self.actionShowNameId.isChecked())
i += 1
def registerDevice(self):
def register_success():
pass
def register_failed(err_code, msg):
QtWidgets.QMessageBox.critical(self, "Failed to retrieve applications", "Error {0}: {1}".format(err_code, msg))
self.register_worker = RegisterWorker(self)
self.register_worker.success.connect(register_success)
self.register_worker.failed.connect(register_failed)
self.register_worker.log.connect(console.log)
self.register_worker.start()
def login_success(self):
def apps_success(apps_list):
self.applications = apps_list
for app in self.applications:
self.appsCombobox.addItem("{0} ({1})".format(app.name, app.id))
self.appsCombobox.setDisabled(False)
self.actionEditApp.setDisabled(False)
def apps_failed(err_code, msg):
QtWidgets.QMessageBox.critical(self, "Failed to retrieve applications", "Error {0}: {1}".format(err_code, msg))
self.apps_worker = ApplicationsWorker(self)
self.apps_worker.success.connect(apps_success)
self.apps_worker.failed.connect(apps_failed)
self.apps_worker.log.connect(console.log)
self.apps_worker.start()
def process_context_add_event(self, event):
application = self.applications[self.appsCombobox.currentIndex()]
context = event.obj
if not context.application_id == application["id"]:
return
item = ContextItem(Context(context.to_json()))
item.setEditable(False)
for key in application:
subitem = ModelItem(key, Model(application.schema[key].to_json()))
subitem.setEditable(False)
item.appendRow(subitem)
self.contexts_model.appendRow(item)
def process_context_update_event(self, event):
context = event.obj
i = 0
while self.contexts_model.item(i):
if context.id == self.contexts_model.item(i).context.id:
if event.notification.notification_type == NOTIFICATION_TYPE_UPDATED:
self.contexts_model.item(i).context = Context(event.obj.to_json())
self.tableView.editObject(self.contexts_model.item(i).context)
break
i += 1
def event(self, event):
if isinstance(event, ExceptionEvent):
event.callback()
elif isinstance(event, TelepatContextUpdateEvent):
self.process_context_update_event(event)
elif isinstance(event, TelepatContextAddEvent):
self.process_context_add_event(event)
return super(TelepatManager, self).event(event)
def excepthook(self, excType, excValue, tracebackobj):
def show_message():
notice = \
"""A fatal error occured. Please report this issue on the\n"""\
"""project's GitHub page or via email at [email protected]\n"""
separator = '-' * 80
errmsg = '%s: \n%s' % (str(excType), str(excValue))
timeString = time.strftime("%Y-%m-%d, %H:%M:%S")
tbinfofile = io.StringIO()
traceback.print_tb(tracebackobj, None, tbinfofile)
tbinfofile.seek(0)
tbinfo = tbinfofile.read()
sections = [separator, timeString, separator, errmsg, separator, tbinfo]
msg = '\n'.join(sections)
QtWidgets.QMessageBox.critical(None, "Fatal error", str(notice)+str(msg))
event = ExceptionEvent(TM_EVENT_EXCEPTION)
event.callback = show_message
QtWidgets.QApplication.postEvent(self, event)
def closeEvent(self, event):
tmsettings.setValue("consoleSplitterSize", self.consoleSplitter.saveState())
tmsettings.setValue("treeViewSplitterSize", self.treeViewSplitter.saveState())
super(TelepatManager, self).closeEvent(event)
|
|
import re, random, py
from rpython.rlib.rsre import rsre_core, rsre_char
from rpython.rlib.rsre.rpy import get_code, VERSION
def get_code_and_re(regexp):
return get_code(regexp), re.compile(regexp)
def test_get_code_repetition():
c1 = get_code(r"a+")
c2 = get_code(r"a+")
assert c1 == c2
class TestMatch:
def test_or(self):
r = get_code(r"a|bc|def")
assert rsre_core.match(r, "a")
assert rsre_core.match(r, "bc")
assert rsre_core.match(r, "def")
assert not rsre_core.match(r, "ghij")
def test_any(self):
r = get_code(r"ab.cd")
assert rsre_core.match(r, "abXcdef")
assert not rsre_core.match(r, "ab\ncdef")
assert not rsre_core.match(r, "abXcDef")
def test_any_repetition(self):
r = get_code(r"ab.*cd")
assert rsre_core.match(r, "abXXXXcdef")
assert rsre_core.match(r, "abcdef")
assert not rsre_core.match(r, "abX\nXcdef")
assert not rsre_core.match(r, "abXXXXcDef")
def test_any_all(self):
r = get_code(r"(?s)ab.cd")
assert rsre_core.match(r, "abXcdef")
assert rsre_core.match(r, "ab\ncdef")
assert not rsre_core.match(r, "ab\ncDef")
def test_any_all_repetition(self):
r = get_code(r"(?s)ab.*cd")
assert rsre_core.match(r, "abXXXXcdef")
assert rsre_core.match(r, "abcdef")
assert rsre_core.match(r, "abX\nXcdef")
assert not rsre_core.match(r, "abX\nXcDef")
def test_assert(self):
r = get_code(r"abc(?=def)(.)")
res = rsre_core.match(r, "abcdefghi")
assert res is not None and res.get_mark(1) == 4
assert not rsre_core.match(r, "abcdeFghi")
def test_assert_not(self):
r = get_code(r"abc(?!def)(.)")
res = rsre_core.match(r, "abcdeFghi")
assert res is not None and res.get_mark(1) == 4
assert not rsre_core.match(r, "abcdefghi")
def test_lookbehind(self):
r = get_code(r"([a-z]*)(?<=de)")
assert rsre_core.match(r, "ade")
res = rsre_core.match(r, "adefg")
assert res is not None and res.get_mark(1) == 3
assert not rsre_core.match(r, "abc")
assert not rsre_core.match(r, "X")
assert not rsre_core.match(r, "eX")
def test_negative_lookbehind(self):
def found(s):
res = rsre_core.match(r, s)
assert res is not None
return res.get_mark(1)
r = get_code(r"([a-z]*)(?<!dd)")
assert found("ade") == 3
assert found("adefg") == 5
assert found("abcdd") == 4
assert found("abddd") == 3
assert found("adddd") == 2
assert found("ddddd") == 1
assert found("abXde") == 2
def test_at(self):
r = get_code(r"abc$")
assert rsre_core.match(r, "abc")
assert not rsre_core.match(r, "abcd")
assert not rsre_core.match(r, "ab")
def test_repeated_set(self):
r = get_code(r"[a0x]+f")
assert rsre_core.match(r, "a0af")
assert not rsre_core.match(r, "a0yaf")
def test_category(self):
r = get_code(r"[\sx]")
assert rsre_core.match(r, "x")
assert rsre_core.match(r, " ")
assert not rsre_core.match(r, "n")
def test_groupref(self):
r = get_code(r"(xx+)\1+$") # match non-prime numbers of x
assert not rsre_core.match(r, "xx")
assert not rsre_core.match(r, "xxx")
assert rsre_core.match(r, "xxxx")
assert not rsre_core.match(r, "xxxxx")
assert rsre_core.match(r, "xxxxxx")
assert not rsre_core.match(r, "xxxxxxx")
assert rsre_core.match(r, "xxxxxxxx")
assert rsre_core.match(r, "xxxxxxxxx")
def test_groupref_ignore(self):
r = get_code(r"(?i)(xx+)\1+$") # match non-prime numbers of x
assert not rsre_core.match(r, "xX")
assert not rsre_core.match(r, "xxX")
assert rsre_core.match(r, "Xxxx")
assert not rsre_core.match(r, "xxxXx")
assert rsre_core.match(r, "xXxxxx")
assert not rsre_core.match(r, "xxxXxxx")
assert rsre_core.match(r, "xxxxxxXx")
assert rsre_core.match(r, "xxxXxxxxx")
def test_groupref_exists(self):
r = get_code(r"((a)|(b))c(?(2)d)$")
assert not rsre_core.match(r, "ac")
assert rsre_core.match(r, "acd")
assert rsre_core.match(r, "bc")
assert not rsre_core.match(r, "bcd")
#
r = get_code(r"((a)|(b))c(?(2)d|e)$")
assert not rsre_core.match(r, "ac")
assert rsre_core.match(r, "acd")
assert not rsre_core.match(r, "ace")
assert not rsre_core.match(r, "bc")
assert not rsre_core.match(r, "bcd")
assert rsre_core.match(r, "bce")
def test_in_ignore(self):
r = get_code(r"(?i)[a-f]")
assert rsre_core.match(r, "b")
assert rsre_core.match(r, "C")
assert not rsre_core.match(r, "g")
r = get_code(r"(?i)[a-f]+$")
assert rsre_core.match(r, "bCdEf")
assert not rsre_core.match(r, "g")
assert not rsre_core.match(r, "aaagaaa")
def test_not_literal(self):
r = get_code(r"[^a]")
assert rsre_core.match(r, "A")
assert not rsre_core.match(r, "a")
r = get_code(r"[^a]+$")
assert rsre_core.match(r, "Bx123")
assert not rsre_core.match(r, "--a--")
def test_not_literal_ignore(self):
r = get_code(r"(?i)[^a]")
assert rsre_core.match(r, "G")
assert not rsre_core.match(r, "a")
assert not rsre_core.match(r, "A")
r = get_code(r"(?i)[^a]+$")
assert rsre_core.match(r, "Gx123")
assert not rsre_core.match(r, "--A--")
def test_repeated_single_character_pattern(self):
r = get_code(r"foo(?:(?<=foo)x)+$")
assert rsre_core.match(r, "foox")
def test_flatten_marks(self):
r = get_code(r"a(b)c((d)(e))+$")
res = rsre_core.match(r, "abcdedede")
assert res.flatten_marks() == [0, 9, 1, 2, 7, 9, 7, 8, 8, 9]
assert res.flatten_marks() == [0, 9, 1, 2, 7, 9, 7, 8, 8, 9]
def test_bug1(self):
# REPEAT_ONE inside REPEAT
r = get_code(r"(?:.+)?B")
assert rsre_core.match(r, "AB") is not None
r = get_code(r"(?:AA+?)+B")
assert rsre_core.match(r, "AAAB") is not None
r = get_code(r"(?:AA+)+?B")
assert rsre_core.match(r, "AAAB") is not None
r = get_code(r"(?:AA+?)+?B")
assert rsre_core.match(r, "AAAB") is not None
# REPEAT inside REPEAT
r = get_code(r"(?:(?:xy)+)?B")
assert rsre_core.match(r, "xyB") is not None
r = get_code(r"(?:xy(?:xy)+?)+B")
assert rsre_core.match(r, "xyxyxyB") is not None
r = get_code(r"(?:xy(?:xy)+)+?B")
assert rsre_core.match(r, "xyxyxyB") is not None
r = get_code(r"(?:xy(?:xy)+?)+?B")
assert rsre_core.match(r, "xyxyxyB") is not None
def test_assert_group(self):
r = get_code(r"abc(?=(..)f)(.)")
res = rsre_core.match(r, "abcdefghi")
assert res is not None
assert res.span(2) == (3, 4)
assert res.span(1) == (3, 5)
def test_assert_not_group(self):
r = get_code(r"abc(?!(de)f)(.)")
res = rsre_core.match(r, "abcdeFghi")
assert res is not None
assert res.span(2) == (3, 4)
# this I definitely classify as Horrendously Implementation Dependent.
# CPython answers (3, 5).
assert res.span(1) == (-1, -1)
def test_match_start(self):
r = get_code(r"^ab")
assert rsre_core.match(r, "abc")
assert not rsre_core.match(r, "xxxabc", start=3)
assert not rsre_core.match(r, "xx\nabc", start=3)
#
r = get_code(r"(?m)^ab")
assert rsre_core.match(r, "abc")
assert not rsre_core.match(r, "xxxabc", start=3)
assert rsre_core.match(r, "xx\nabc", start=3)
def test_match_end(self):
r = get_code("ab")
assert rsre_core.match(r, "abc")
assert rsre_core.match(r, "abc", end=333)
assert rsre_core.match(r, "abc", end=3)
assert rsre_core.match(r, "abc", end=2)
assert not rsre_core.match(r, "abc", end=1)
assert not rsre_core.match(r, "abc", end=0)
assert not rsre_core.match(r, "abc", end=-1)
def test_match_bug1(self):
r = get_code(r'(x??)?$')
assert rsre_core.match(r, "x")
def test_match_bug2(self):
r = get_code(r'(x??)??$')
assert rsre_core.match(r, "x")
def test_match_bug3(self):
if VERSION == "2.7.5":
py.test.skip("pattern fails to compile with exactly 2.7.5 "
"(works on 2.7.3 and on 2.7.trunk though)")
r = get_code(r'([ax]*?x*)?$')
assert rsre_core.match(r, "aaxaa")
def test_bigcharset(self):
for i in range(100):
chars = [unichr(random.randrange(0x100, 0xD000))
for n in range(random.randrange(1, 25))]
pattern = u'[%s]' % (u''.join(chars),)
r = get_code(pattern)
for c in chars:
assert rsre_core.match(r, c)
for i in range(200):
c = unichr(random.randrange(0x0, 0xD000))
res = rsre_core.match(r, c)
if c in chars:
assert res is not None
else:
assert res is None
def test_simple_match_1(self):
r = get_code(r"ab*bbbbbbbc")
print r
match = rsre_core.match(r, "abbbbbbbbbcdef")
assert match
assert match.match_end == 11
def test_empty_maxuntil(self):
r = get_code("\\{\\{((?:.*?)+)\\}\\}")
match = rsre_core.match(r, "{{a}}{{b}}")
assert match.group(1) == "a"
def test_fullmatch_1(self):
r = get_code(r"ab*c")
assert not rsre_core.fullmatch(r, "abbbcdef")
assert rsre_core.fullmatch(r, "abbbc")
def test_fullmatch_2(self):
r = get_code(r"a(b*?)")
match = rsre_core.fullmatch(r, "abbb")
assert match.group(1) == "bbb"
assert not rsre_core.fullmatch(r, "abbbc")
def test_fullmatch_3(self):
r = get_code(r"a((bp)*?)c")
match = rsre_core.fullmatch(r, "abpbpbpc")
assert match.group(1) == "bpbpbp"
def test_fullmatch_4(self):
r = get_code(r"a((bp)*)c")
match = rsre_core.fullmatch(r, "abpbpbpc")
assert match.group(1) == "bpbpbp"
def test_fullmatch_assertion(self):
r = get_code(r"(?=a).b")
assert rsre_core.fullmatch(r, "ab")
r = get_code(r"(?!a)..")
assert not rsre_core.fullmatch(r, "ab")
def test_range_ignore(self):
from rpython.rlib.unicodedata import unicodedb
rsre_char.set_unicode_db(unicodedb)
#
r = get_code(u"[\U00010428-\U0001044f]", re.I)
assert r.count(27) == 1 # OPCODE_RANGE
r[r.index(27)] = 32 # => OPCODE_RANGE_IGNORE
assert rsre_core.match(r, u"\U00010428")
|
|
import sys
import os
import types
import jsonpickle
import collections
import tensorflow as tf
ADAM = 'adam'
SGD = 'sgd'
RMSPROP = 'rmsprop'
ADADELTA = 'adadelta'
ADAGRAD = 'adagrad'
MOMENTUM = 'momentum'
NESTEROV = 'nesterov'
class Optimizer(object):
"""Optimizer class to encapsulate (all) optimizers from its creation.
This is required to enable delayed-build of the optimizer.
"""
def __init__(self, optimizer_name=None, initial_lr=0.1,
step_interval=sys.maxint, rate=1.0, staircase=True):
"""Creates an optimizer with its default hyperparams.
Note: Momentum-based optimizers (RMSProp, Momentum, Nesterov) should
sets its momentum explicitely.
Parameters
----------
optimizer_name: str
The optimizer to use. Use keys such as 'tf.training.ADAM' or 'adam'.
initial_lr: float
The inital learning rate > 0.
step_interval: int, optional
The number of steps when to decay the learning rate.
Use sys.maxint to use no decay.
rate: float, optional
The decay rate.
staircase: Boolean, optional
Whether to use staircase decay (default) or not.
"""
assert initial_lr > 0, "Learning rate must be positive."
assert step_interval > 0, "Decay step interval must be > 0."
assert rate > 0 and rate <= 1, "Decay rate must be in range (0, 1]."
self._optimizer_name = optimizer_name.lower()
self._initial_lr = initial_lr
# set decay
self._decay = {"step_interval": step_interval,
"rate": rate,
"staircase": staircase}
# set default hyper-params
self._hyper = {}
self.set_hyperparams()
def set_hyperparams(self, eps=1e-8, beta1=0.9, beta2=0.999, momentum=0.0,
decay=0.9, rho=0.95, init_accu_val=0.1):
"""Sets the hyper parameters. Choose the related ones to your defined
learning algorithm. All others will be ignored. This function resets
all not-specified values to its defaults.
eps: float, optional
The fuzz factor. Value is typically close to 0.
beta1: float, optional
The exponential decay rate for the 1st moment estimates.
Value is typically close to 1.
beta2: float, optoinal
The exponential decay rate for the 2nd moment estimates.
Value is typically close to 1.
momentum: float, optional
The momentum to use.
decay: float, optional
Discounting factor for the history/coming gradient.
rho: float, optional
The rho-decay rate.
init_accu_val: float, optional
Starting value for the accumulators, must be positive
"""
assert eps >= 0, "Epsilon must be >= 0 (usually very small)."
assert beta1 > 0 and beta1 < 1, "Beta1 must be in range (0, 1)."
assert beta2 > 0 and beta2 < 1, "Beta2 must be in range (0, 1)."
assert momentum >= 0, "Momentum must be >= 0."
assert decay >= 0, "Decay must be >= 0."
assert rho >= 0, "Rho must be >= 0."
assert init_accu_val >= 0, "Accumulator value must be >= 0."
self._hyper = {"rho": rho,
"eps": eps,
"init_accu_val": init_accu_val,
"beta1": beta1,
"beta2": beta2,
"decay": decay,
"momentum": momentum}
def build(self, global_step):
"""Actually builds the optimizer including the learning rate decay
if it was configured.
Parameters
----------
global_step: int or tf.Variable
The global step counter.
Returns
----------
Tuple (optimizer, learning_rate) of the created optimizer.
"""
assert self.name is not None, \
"Specify an optimizer name or load() an optimizer from file."
if self.uses_decay:
# Decay the learning rate exponentially based on the number of steps
lr = tf.train.exponential_decay(self.initial_lr,
global_step,
self.decay["step_interval"],
self.decay["rate"],
staircase=self.decay["staircase"])
else:
lr = self.initial_lr
if self.name == SGD:
opt = tf.train.GradientDescentOptimizer(lr)
elif self.name == ADAM:
opt = tf.train.AdamOptimizer(lr,
beta1=self._hyper["beta1"],
beta2=self._hyper["beta2"],
epsilon=self._hyper["eps"])
elif self.name == RMSPROP:
opt = tf.train.RMSPropOptimizer(lr,
decay=self._hyper["decay"],
momentum=self._hyper["momentum"],
epsilon=self._hyper["eps"])
elif self.name == ADADELTA:
opt = tf.train.AdadeltaOptimizer(lr,
rho=self._hyper["rho"],
epsilon=self._hyper["eps"])
elif self.name == ADAGRAD:
opt = tf.train.AdagradOptimizer(lr,
init_accu_val=self._hyper["init_accu_val"])
elif self.name == MOMENTUM:
opt = tf.train.MomentumOptimizer(lr,
momentum=self._hyper["momentum"],
use_nesterov=False)
elif self.name == NESTEROV:
opt = tf.train.MomentumOptimizer(lr,
momentum=self._hyper["momentum"],
use_nesterov=True)
else:
raise ValueError("Unknown optimizer. Contributors welcome...")
return opt, lr
def save(self, filepath):
"""Saves the optimizer parameters to the specifiec path as JSON.
Parameters
----------
filepath: str
The file path.
"""
# check and create dirs
if not os.path.exists(os.path.dirname(filepath)):
subdirs = os.path.dirname(filepath)
if subdirs is not None and subdirs != '':
os.makedirs(subdirs)
with open(filepath, 'wb') as f:
json = jsonpickle.encode(self)
f.write(json)
def load(self, filepath):
"""Load the optimizer parameters from the specifiec path as JSON.
Parameters
----------
filepath: str
The file path.
"""
with open(filepath, 'r') as f:
json = f.read()
model = jsonpickle.decode(json)
self.__dict__.update(model.__dict__)
def print_params(self):
"""Shows the model parameters."""
params = self.__dict__.copy()
def trim_prefix(text, prefix):
# trim underscore prefix
return text[text.startswith(prefix) and len(prefix):]
print(">>> Optimizer:")
for name, value in params.iteritems():
print("{:16} -> {}".format(trim_prefix(name, '_'), value))
@property
def name(self):
"""Gets the optimizers name."""
return self._optimizer_name
@property
def initial_lr(self):
"""Gets the initial learning rate."""
return self._initial_lr
@property
def decay(self):
"""Gets the (exponential) decay properties."""
return self._decay
@property
def uses_decay(self):
"""Indicates whether (exponential) decay is used or not."""
return False if self.decay["step_interval"] == sys.maxint or \
self.decay["rate"] == 1 else True
@property
def hyperparams(self):
"""Gets the hyper parameters for the optimizer.
Only the hyper params relevant for this optimizer are used.
"""
return self._hyper
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers
in a mulit-GPU environment.
Note that this function provides a synchronization point across all towers.
Parameters
----------
tower_grads: List of lists of (gradient, variable) tuples
The tower gradients. The outer list is over individual gradients.
The inner list is over the gradient calculation for each tower.
Returns
----------
average_grads: List of pairs of (gradient, variable)
The gradients where the gradient has been averaged
across all towers.
"""
with tf.name_scope("avg_grads"):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def inverse_sigmoid_decay(initial_value, global_step, decay_rate=1000.0,
name=None):
"""Applies inverse sigmoid decay to the decay variable (learning rate).
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse sigmoid decay
function to a provided initial variable value. It requires a `global_step`
value to compute the decayed variable value. You can just pass a TensorFlow
variable that you increment at each training step.
The function returns the decayed variable value. It is computed as:
With decay-var = 1.0, gstep = x, decay_rate = 10000.0
1.0*(10000.0/(10000.0+exp(x/(10000.0))))
```python
decayed_var = decay_variable *
decay_rate / (decay_rate + exp(global_step / decay_rate))
```
Rough Infos | Value @ t=0 | (Real) decay start | Reaches Zero
-------------------------------------------------------------------------
decay_rate: 10.0 | 0.9 | -40 | 100
decay_rate: 100.0 | 0.985 | -20 | 1,100
decay_rate: 1000.0 | 1.0 | 2,000 | 12,000
decay_rate: 10000.0 | 1.0 | 50,000 | 110,000
Parameters
----------
initial_value: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial variable value to decay.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate >> 1.
name: String. Optional name of the operation. Defaults to
'InvSigmoidDecay'
Returns
----------
A scalar `Tensor` of the same type as `decay_variable`. The decayed
variable value (such as learning rate).
"""
assert decay_rate > 1, "The decay_rate has to be >> 1."
with tf.op_scope([initial_value, global_step, decay_rate],
name, "InvSigmoidDecay") as name:
initial_value = tf.convert_to_tensor(initial_value, name="initial_value")
dtype = initial_value.dtype
global_step = tf.cast(global_step, dtype)
decay_rate = tf.cast(decay_rate, dtype)
denom = decay_rate + tf.exp(global_step / decay_rate)
return tf.mul(initial_value, decay_rate / denom, name=name)
|
|
from __future__ import print_function
import json
from fds.galaxy_fds_client_exception import GalaxyFDSClientException
class FDSLifecycleConfig(dict):
'''
lifecycle config like this:
{
"rules": [
{
"id": 0,
"enabled": true,
"prefix": "log",
"actions": {
"nonCurrentVersionExpiration": {
"days": 7
},
"expiration": {
"days":30
}
}
},
{
"enabled": true,
"prefix": "images",
"actions": {
"nonCurrentVersionExpiration": {
"days": 7
},
"expiration": {
"days":30
},
"abortIncompleteMultipartUpload": {
"days":7
}
}
}
]
}
'''
def __init__(self, json={}):
dict.__init__(self, json)
self._rules = []
for rule in self.get('rules', []):
self._rules.append(FDSLifecycleRule(rule))
self['rules'] = self._rules
@property
def rules(self):
return self._rules
def get_rule_by_prefix(self, prefix):
for rule in self.rules:
if rule.prefix == prefix:
return rule
return None
def get_rule_by_object_name(self, object_name, enabled_rule_only=False):
'''
Get rule by object name
:param object_name:
:param enabled_rule_only:
:return: Only one rule will return if more than one rules matched.
'''
for rule in self.rules:
if object_name.startswith(rule.prefix) and (rule.enabled or not enabled_rule_only):
return rule
return None
class FDSLifecycleRule(dict):
def __init__(self, json={}):
dict.__init__(self, json)
self._actions = {}
for name, action in self.get('actions', {}).items():
if name == 'abortIncompleteMultipartUpload':
self._actions[name] = FDSAbortIncompleteMultipartUpload(action)
elif name == 'expiration':
self._actions[name] = FDSExpiration(action)
elif name == 'nonCurrentVersionExpiration':
self._actions[name] = FDSNonCurrentVersionExpiration(action)
elif name == 'lifeCycleStorageClass':
self._actions[name] = FDSLifecycleStorageClass(action)
else:
raise GalaxyFDSClientException("invalid action type: " + name)
self['actions'] = self._actions
@property
def id(self):
return self.get('id', None)
@property
def enabled(self):
return self.get('enabled', False)
@enabled.setter
def enabled(self, enabled):
self['enabled'] = enabled
@property
def prefix(self):
return self.get('prefix', None)
@prefix.setter
def prefix(self, prefix):
self['prefix'] = prefix
@property
def actions(self):
return self._actions
def update_action(self, action):
self.actions[action.name] = action
class FDSExpiration(dict):
name = 'expiration'
def __init__(self, json):
dict.__init__(self, json)
@property
def days(self):
return self.get('days', 0)
@days.setter
def days(self, days):
self['days'] = days
class FDSNonCurrentVersionExpiration(dict):
name = 'nonCurrentVersionExpiration'
def __init__(self, json):
dict.__init__(self, json)
@property
def days(self):
return self.get('days', 0)
@days.setter
def days(self, days):
self['days'] = days
class FDSAbortIncompleteMultipartUpload(dict):
name = 'abortIncompleteMultipartUpload'
@property
def days(self):
return self.get('days', 0)
@days.setter
def days(self, days):
self['days'] = days
class FDSLifecycleStorageClass(dict):
name = 'lifeCycleStorageClass'
@property
def days(self):
return self.get('days', 0)
@days.setter
def days(self, days):
self['days'] = days
@property
def storage_class(self):
return self.get('storageClass', "")
@storage_class.setter
def storage_class(self, storage_class):
self['storageClass'] = storage_class
if __name__ == '__main__':
lifecycle_config = FDSLifecycleConfig()
rule1 = FDSLifecycleRule()
rule1.enabled = True
rule1.prefix = 'test'
action1 = FDSExpiration({'days': 30})
rule1.update_action(action1)
print(json.dumps(rule1, sort_keys=True))
lifecycle_config.rules.append(rule1)
print(json.dumps(lifecycle_config, sort_keys=True))
lifecycle_config.rules.append(rule1)
print(json.dumps(lifecycle_config, sort_keys=True))
jsonstr = ''' {
"rules": [
{
"enabled": true,
"prefix": "log",
"actions": {
"nonCurrentVersionExpiration": {
"days": 7
},
"expiration": {
"days":30.7
}
}
},
{
"enabled": true,
"prefix": "images",
"actions": {
"nonCurrentVersionExpiration": {
"days": 7
},
"expiration": {
"days":30
},
"abortIncompleteMultipartUpload": {
"days":7
},
"lifeCycleStorageClass": {
"days":2,
"storageClass":"ARCHIVE"
}
}
}
]
}
'''
print(jsonstr)
print(json.dumps(json.loads(jsonstr), sort_keys=True))
print(json.dumps(FDSLifecycleConfig(json.loads(jsonstr)), sort_keys=True))
|
|
from django.template.base import TemplateSyntaxError
from django.template.context import Context
from django.template.loader_tags import BlockContext, BlockNode
from django.test import SimpleTestCase
from ..utils import SilentAttrClass, SilentGetItemClass, SomeClass, setup
basic_templates = {
'basic-syntax01': 'something cool',
'basic-syntax02': '{{ headline }}',
'basic-syntax03': '{{ first }} --- {{ second }}',
}
class BasicSyntaxTests(SimpleTestCase):
@setup(basic_templates)
def test_basic_syntax01(self):
"""
Plain text should go through the template parser untouched.
"""
output = self.engine.render_to_string('basic-syntax01')
self.assertEqual(output, "something cool")
@setup(basic_templates)
def test_basic_syntax02(self):
"""
Variables should be replaced with their value in the current
context
"""
output = self.engine.render_to_string('basic-syntax02', {'headline': 'Success'})
self.assertEqual(output, 'Success')
@setup(basic_templates)
def test_basic_syntax03(self):
"""
More than one replacement variable is allowed in a template
"""
output = self.engine.render_to_string('basic-syntax03', {"first": 1, "second": 2})
self.assertEqual(output, '1 --- 2')
@setup({'basic-syntax04': 'as{{ missing }}df'})
def test_basic_syntax04(self):
"""
Fail silently when a variable is not found in the current context
"""
output = self.engine.render_to_string('basic-syntax04')
if self.engine.string_if_invalid:
self.assertEqual(output, 'asINVALIDdf')
else:
self.assertEqual(output, 'asdf')
@setup({'basic-syntax06': '{{ multi word variable }}'})
def test_basic_syntax06(self):
"""
A variable may not contain more than one word
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax06')
@setup({'basic-syntax07': '{{ }}'})
def test_basic_syntax07(self):
"""
Raise TemplateSyntaxError for empty variable tags.
"""
with self.assertRaisesMessage(TemplateSyntaxError, 'Empty variable tag on line 1'):
self.engine.get_template('basic-syntax07')
@setup({'basic-syntax08': '{{ }}'})
def test_basic_syntax08(self):
"""
Raise TemplateSyntaxError for empty variable tags.
"""
with self.assertRaisesMessage(TemplateSyntaxError, 'Empty variable tag on line 1'):
self.engine.get_template('basic-syntax08')
@setup({'basic-syntax09': '{{ var.method }}'})
def test_basic_syntax09(self):
"""
Attribute syntax allows a template to call an object's attribute
"""
output = self.engine.render_to_string('basic-syntax09', {'var': SomeClass()})
self.assertEqual(output, 'SomeClass.method')
@setup({'basic-syntax10': '{{ var.otherclass.method }}'})
def test_basic_syntax10(self):
"""
Multiple levels of attribute access are allowed.
"""
output = self.engine.render_to_string('basic-syntax10', {'var': SomeClass()})
self.assertEqual(output, 'OtherClass.method')
@setup({'basic-syntax11': '{{ var.blech }}'})
def test_basic_syntax11(self):
"""
Fail silently when a variable's attribute isn't found.
"""
output = self.engine.render_to_string('basic-syntax11', {'var': SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax12': '{{ var.__dict__ }}'})
def test_basic_syntax12(self):
"""
Raise TemplateSyntaxError when trying to access a variable
beginning with an underscore.
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax12')
# Raise TemplateSyntaxError when trying to access a variable
# containing an illegal character.
@setup({'basic-syntax13': "{{ va>r }}"})
def test_basic_syntax13(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax13')
@setup({'basic-syntax14': "{{ (var.r) }}"})
def test_basic_syntax14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax14')
@setup({'basic-syntax15': "{{ sp%am }}"})
def test_basic_syntax15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax15')
@setup({'basic-syntax16': "{{ eggs! }}"})
def test_basic_syntax16(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax16')
@setup({'basic-syntax17': "{{ moo? }}"})
def test_basic_syntax17(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax17')
@setup({'basic-syntax18': "{{ foo.bar }}"})
def test_basic_syntax18(self):
"""
Attribute syntax allows a template to call a dictionary key's
value.
"""
output = self.engine.render_to_string('basic-syntax18', {"foo": {"bar": "baz"}})
self.assertEqual(output, "baz")
@setup({'basic-syntax19': "{{ foo.spam }}"})
def test_basic_syntax19(self):
"""
Fail silently when a variable's dictionary key isn't found.
"""
output = self.engine.render_to_string('basic-syntax19', {"foo": {"bar": "baz"}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax20': "{{ var.method2 }}"})
def test_basic_syntax20(self):
"""
Fail silently when accessing a non-simple method
"""
output = self.engine.render_to_string('basic-syntax20', {'var': SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax20b': "{{ var.method5 }}"})
def test_basic_syntax20b(self):
"""
Don't silence a TypeError if it was raised inside a callable.
"""
template = self.engine.get_template('basic-syntax20b')
with self.assertRaises(TypeError):
template.render(Context({'var': SomeClass()}))
# Don't get confused when parsing something that is almost, but not
# quite, a template tag.
@setup({'basic-syntax21': "a {{ moo %} b"})
def test_basic_syntax21(self):
output = self.engine.render_to_string('basic-syntax21')
self.assertEqual(output, "a {{ moo %} b")
@setup({'basic-syntax22': "{{ moo #}"})
def test_basic_syntax22(self):
output = self.engine.render_to_string('basic-syntax22')
self.assertEqual(output, "{{ moo #}")
@setup({'basic-syntax23': "{{ moo #} {{ cow }}"})
def test_basic_syntax23(self):
"""
Treat "moo #} {{ cow" as the variable. Not ideal, but costly to work
around, so this triggers an error.
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax23')
@setup({'basic-syntax24': "{{ moo\n }}"})
def test_basic_syntax24(self):
"""
Embedded newlines make it not-a-tag.
"""
output = self.engine.render_to_string('basic-syntax24')
self.assertEqual(output, "{{ moo\n }}")
# Literal strings are permitted inside variables, mostly for i18n
# purposes.
@setup({'basic-syntax25': '{{ "fred" }}'})
def test_basic_syntax25(self):
output = self.engine.render_to_string('basic-syntax25')
self.assertEqual(output, "fred")
@setup({'basic-syntax26': r'{{ "\"fred\"" }}'})
def test_basic_syntax26(self):
output = self.engine.render_to_string('basic-syntax26')
self.assertEqual(output, "\"fred\"")
@setup({'basic-syntax27': r'{{ _("\"fred\"") }}'})
def test_basic_syntax27(self):
output = self.engine.render_to_string('basic-syntax27')
self.assertEqual(output, "\"fred\"")
# #12554 -- Make sure a silent_variable_failure Exception is
# suppressed on dictionary and attribute lookup.
@setup({'basic-syntax28': "{{ a.b }}"})
def test_basic_syntax28(self):
output = self.engine.render_to_string('basic-syntax28', {'a': SilentGetItemClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax29': "{{ a.b }}"})
def test_basic_syntax29(self):
output = self.engine.render_to_string('basic-syntax29', {'a': SilentAttrClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
# Something that starts like a number but has an extra lookup works
# as a lookup.
@setup({'basic-syntax30': "{{ 1.2.3 }}"})
def test_basic_syntax30(self):
output = self.engine.render_to_string(
'basic-syntax30',
{"1": {"2": {"3": "d"}}}
)
self.assertEqual(output, 'd')
@setup({'basic-syntax31': "{{ 1.2.3 }}"})
def test_basic_syntax31(self):
output = self.engine.render_to_string(
'basic-syntax31',
{"1": {"2": ("a", "b", "c", "d")}},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax32': "{{ 1.2.3 }}"})
def test_basic_syntax32(self):
output = self.engine.render_to_string(
'basic-syntax32',
{"1": (("x", "x", "x", "x"), ("y", "y", "y", "y"), ("a", "b", "c", "d"))},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax33': "{{ 1.2.3 }}"})
def test_basic_syntax33(self):
output = self.engine.render_to_string(
'basic-syntax33',
{"1": ("xxxx", "yyyy", "abcd")},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax34': "{{ 1.2.3 }}"})
def test_basic_syntax34(self):
output = self.engine.render_to_string(
'basic-syntax34',
{"1": ({"x": "x"}, {"y": "y"}, {"z": "z", "3": "d"})}
)
self.assertEqual(output, 'd')
# Numbers are numbers even if their digits are in the context.
@setup({'basic-syntax35': "{{ 1 }}"})
def test_basic_syntax35(self):
output = self.engine.render_to_string('basic-syntax35', {"1": "abc"})
self.assertEqual(output, '1')
@setup({'basic-syntax36': "{{ 1.2 }}"})
def test_basic_syntax36(self):
output = self.engine.render_to_string('basic-syntax36', {"1": "abc"})
self.assertEqual(output, '1.2')
@setup({'basic-syntax37': '{{ callable }}'})
def test_basic_syntax37(self):
"""
Call methods in the top level of the context.
"""
output = self.engine.render_to_string('basic-syntax37', {"callable": lambda: "foo bar"})
self.assertEqual(output, 'foo bar')
@setup({'basic-syntax38': '{{ var.callable }}'})
def test_basic_syntax38(self):
"""
Call methods returned from dictionary lookups.
"""
output = self.engine.render_to_string('basic-syntax38', {"var": {"callable": lambda: "foo bar"}})
self.assertEqual(output, 'foo bar')
@setup({'template': '{% block content %}'})
def test_unclosed_block(self):
msg = "Unclosed tag on line 1: 'block'. Looking for one of: endblock."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% if a %}'})
def test_unclosed_block2(self):
msg = "Unclosed tag on line 1: 'if'. Looking for one of: elif, else, endif."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'tpl-str': '%s', 'tpl-percent': '%%', 'tpl-weird-percent': '% %s'})
def test_ignores_strings_that_look_like_format_interpolation(self):
output = self.engine.render_to_string('tpl-str')
self.assertEqual(output, '%s')
output = self.engine.render_to_string('tpl-percent')
self.assertEqual(output, '%%')
output = self.engine.render_to_string('tpl-weird-percent')
self.assertEqual(output, '% %s')
class BlockContextTests(SimpleTestCase):
def test_repr(self):
block_context = BlockContext()
block_context.add_blocks({'content': BlockNode('content', [])})
self.assertEqual(
repr(block_context),
"<BlockContext: blocks=defaultdict(<class 'list'>, "
"{'content': [<Block Node: content. Contents: []>]})>",
)
|
|
#!/usr/bin/env python
from PIL import Image
import math
import sys
import heapq
from collections import deque
import control
cmdRight = None
cmdLeft = None
cmdDown = None
cmdUp = None
cmdStop = None
def setupDeviceProxy(devicePath):
control.setup(devicePath)
global cmdRight
global cmdLeft
global cmdDown
global cmdUp
global cmdStop
shouldLogToConsole = (devicePath == None)
def _tmpCmdRight(d):
if shouldLogToConsole:
print "R %d" % d
else:
control.cmdRight(d)
def _tmpCmdLeft(d):
if shouldLogToConsole:
print "L %d" % d
else:
control.cmdLeft(d)
def _tmpCmdUp(d):
if shouldLogToConsole:
print "U %d" % d
else:
control.cmdUp(d)
def _tmpCmdDown(d):
if shouldLogToConsole:
print "D %d" % d
else:
control.cmdDown(d)
def _tmpCmdStop():
if not shouldLogToConsole:
control.cmdStop()
cmdRight = _tmpCmdRight
cmdLeft = _tmpCmdLeft
cmdUp = _tmpCmdUp
cmdDown = _tmpCmdDown
cmdStop = _tmpCmdStop
def distance(p1, p2):
x = abs(p1[0] - p2[0])
y = abs(p1[1] - p2[1])
return math.sqrt(x*x + y*y)
def isBlack(p):
return p[0] == 0 and p[1] == 0 and p[2] == 0
def gatherBlackPoints(image):
pixels = image.load()
width, height = image.size
result = []
for x in range(width):
for y in range(height):
if isBlack(pixels[x,y]):
result.append((x,y))
return result
def closestToGroup(group, points):
""" returns index of 'g' in group and index of 'p' in points that are closet to each other
"""
# really doesn't make sense to call this with empty lists
if len(group) == 0 and len(points) == 0:
return -1
gIdx = 0
pIdx = 0
for g in range(len(group)):
for p in range(len(points)):
if distance(group[gIdx], points[pIdx]) > distance(group[g], points[p]):
gIdx = g
pIdx = p
return (gIdx, pIdx)
def clamp(minval, value, maxval):
if value < minval:
return minval
elif value > maxval:
return maxval
else:
return value
def distForPointsSequence(points):
accum = 0
for i in range(len(points)-1):
accum += distance(points[i], points[i+1])
return accum
def placePointInGroup(group, gIdx, p):
""" p needs to be placed in group before or after gIdx
it's important to compare with both the previous and next gIdx +- 1 items in the mix
"""
if len(group) == 0 or len(group) == 1:
group.append(p)
return
# compare [gIdx-1,p,gIdx,gIdx+1] and [gIdx-1,gIdx,p,gIdx+1]
beforeSubset = group[clamp(0,gIdx-1,gIdx) : gIdx+2]
afterSubset = list(beforeSubset)
if gIdx == 0:
beforeSubset.insert(0, p)
afterSubset.insert(1, p)
else:
beforeSubset.insert(1, p)
afterSubset.insert(2, p)
beforeDist = distForPointsSequence(beforeSubset)
afterDist = distForPointsSequence(afterSubset)
if beforeDist < afterDist:
group.insert(gIdx, p)
else:
group.insert(gIdx+1, p)
def orderPoints(points):
if len(points) == 0 or len(points) == 1: return points
group = [points.pop()]
while len(points) > 0:
gIdx,pIdx = closestToGroup(group, points)
p = points.pop(pIdx)
placePointInGroup(group, gIdx, p)
return group
### begin solid drawing
def newNxM(width,height,value=None):
""" Create a width by height array. Indexable using: result[x][y]
"""
return [ [value for j in range(height)] for i in range(width) ]
def pointsAroundP(P, width, height):
""" Return a list of points surround P provided that P is within the bounds of the area
"""
Px,Py = P
if not(Px >= 0 and Px < width and Py >= 0 and Py < height):
return []
result = [
(Px-1, Py),
(Px+1, Py),
(Px, Py-1),
(Px, Py+1)
]
result = [p for p in result if p[0] >= 0 and p[0] < width and p[1] >= 0 and p[1] < height]
return result
def distFromAtoB(A,B):
Ax,Ay = A
Bx,By = B
return abs(Ax-Bx) + abs(Ay-By)
def lineLength(line):
return distFromAtoB(line.S, line.E)
class Line:
""" All of these lines run horizontally
"""
def __init__(self, S, E):
self.adjacents = [] # sorted by distance to start point (use a heap?) depend on how we insert/visit nodes in bitmap
self.S = S
self.E = E
def __hash__(self):
return (self.S,self.E)
def connect(self, line):
if not self.isConnected(line):
lineDist = distFromAtoB(self.S, line.S)
heapq.heappush(self.adjacents, (lineDist, line))
line.connect(self)
def isConnected(self, line):
return line in [L[1] for L in self.adjacents]
def containsPoint(self, point):
Px,Py = point
Sx,Sy = self.S
Ex,Ey = self.E
return (Py == Sy and Py == Ey and Px >= Sx and Px <= Ex)
def points(self):
Sx,Sy = self.S
Ex,Ey = self.E
for x in range(Sx, Ex+1):
yield (x, Sy)
def pointToLine(lines, point):
""" given lines in a row and a point determine if the point falls within in range of any line
"""
for l in lines:
if l.containsPoint(point):
return l
return None
def imageToHorizontalLines(image):
"""
pointToLine[pointTuple] -> Line
rowToLines[rowNumber] -> [Line]
"""
pixels = image.load()
width, height = image.size
# the (S,E) pairs of lines that will be drawn on the device
rowToLines = []
lines = []
for y in range(height):
currentLines = []
S = None
E = None
for x in range(width):
if S is None: # searching
if isBlack(pixels[x,y]):
searching = False
S = (x,y)
E = (x,y)
else:
# white pixel is a continue
pass
else: # collecting
if isBlack(pixels[x,y]):
# continue we are looking for the end
# update the E to the current known end
E = (x,y)
else:
# we hit a white pixel while we were collecting so the previous pixel is the end
# place the (S,E) pair in the lines list
# move back to a searching state
currentLines.append( Line(S,E) )
S = None
E = None
if S and E: # last pixel in the row was black and we didn't get a chance to save it off
currentLines.append( Line(S,E) )
rowToLines.append(currentLines)
lines.extend(currentLines)
# now connect the lines to each other
for r1 in range(1, len(rowToLines)):
r0Lines = rowToLines[r1 - 1]
r1Lines = rowToLines[r1]
for l1 in r1Lines:
for Px,Py in l1.points():
aboveP = (Px,Py-1)
l2 = pointToLine(r0Lines, aboveP)
if l2:
l1.connect(l2)
return lines
def drawCmdsFromPath(path):
""" Assumes a grid based (non-diagonal) path
"""
cmds = []
for i in range(len(path)-1):
P1x,P1y = path[i]
P2x,P2y = path[i+1]
cmd = None
if P1x == P2x:
cmd = cmdUp if P2y < P1y else cmdDown
else: # P1y == P2y
cmd = cmdLeft if P2x < P1x else cmdRight
if len(cmds) == 0:
cmds.append( (cmd, 1) )
else:
prevCmd = cmds[-1]
if prevCmd[0] == cmd:
cmds.pop()
cmds.append( (cmd, prevCmd[1] + 1) )
else:
cmds.append( (cmd, 1) )
for c,i in cmds:
c(i)
def pathFromAtoB(image, A, B):
""" Find a path from A to B that falls within the shaded area
"""
pixels = image.load()
width, height = image.size
Ax,Ay = A
Bx,By = B
previous = newNxM(width, height)
dist = newNxM(width, height, value=float("inf"))
dist[Ax][Ay] = 0
previous[Ax][Ay] = A
# just a little dijkstra from A to B
queue = []
heapq.heappush(queue, (0, A))
while len(queue) > 0:
item = heapq.heappop(queue)[1]
if item == B:
# all done
break
else:
points = pointsAroundP(item, width, height)
for p in points:
Px,Py = p
# stay within the bounds
if not isBlack(pixels[Px,Py]): continue
if previous[Px][Py]: # seen this point before see if traveling to p via item is cheaper
# A->item->p < A->p?
alt = dist[item[0]][item[1]] + 1
if alt < dist[Px][Py]:
dist[Px][Py] = alt
previous[Px][Py] = item
else: # new points that we should enqueue
distAtoP = dist[item[0]][item[1]] + 1
previous[Px][Py] = item
dist[Px][Py] = distAtoP
heapq.heappush(queue, (distAtoP, p) )
p = B
result = [B]
while p != A:
Px,Py = p
prev = previous[Px][Py]
result.append(prev)
p = prev
result.reverse()
return result
def drawSolid(image):
result = imageToHorizontalLines(image)
# DFS visiting of adjacent lines
stack = [result[0]]
while len(stack) > 0:
line = stack.pop()
cmdRight(lineLength(line))
cmdLeft(lineLength(line))
# setup the next set of lines to draw
tmp = []
while len(line.adjacents) > 0:
adjacent = heapq.heappop(line.adjacents)[1]
tmp.append(adjacent)
# remove line from adjacent so it doesn't get queued up again
adjacent.adjacents = [x for x in adjacent.adjacents if x[1] != line]
heapq.heapify(adjacent.adjacents)
# reverse tmp so that we have the closest one on the top of the stack
stack.extend(reversed(tmp))
# position draw for next run through the loop
if len(stack) > 0:
last = stack[-1]
path = pathFromAtoB(image, line.S, last.S)
drawCmdsFromPath(path)
cmdStop()
### end solid drawing
def main():
import argparse
parser = argparse.ArgumentParser(description='Draws a black and white image on the Etch-a-Sketch')
parser.add_argument("image", metavar="IMAGE",type=str, help="Path to an image file")
parser.add_argument("--device", help="Path to Arduino such as /dev/tty.usbmodem1411. If DEVICE is not specified drawing commands will be logged to stdout.", type=str)
args = parser.parse_args()
setupDeviceProxy(args.device)
image = Image.open(args.image)
drawSolid(image)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file implements the main function for the output submitter, which uses
the OutputSubmitter class in the lib directory."""
import optparse
import os
import re
import sys
from lib import code_jam_login
from lib import constants
from lib import contest_manager
from lib import data_manager
from lib import error
from lib import google_login
from lib import output_submitter
from lib import user_status
from lib import utils
def main():
"""Main function for the output submitter script.
This script receives three positional arguments, the problem name, the
input size and the submit id.
"""
try:
# Create an option parser and use it to parse the supplied arguments.
program_version = 'GCJ solution submitter {0}'.format(
constants.VERSION)
parser = optparse.OptionParser(usage='%prog [options] problem input id',
version=program_version)
parser.add_option('-l', '--login', action='store_true', dest='renew_cookie',
help='Ignore the stored cookie and log in again')
parser.add_option('-p', '--passwd', action='store', dest='password',
help=('Password used to log in. You will be prompted for '
'a password if one is required and this flag is '
'left empty and there is no password in the '
'configuration files'))
parser.add_option('-f', '--force', action='store_true', dest='force',
help=('Skip check to verify if there is a running timer '
'and there is no submission if the input is large'))
parser.add_option('-d', '--data-directory', action='store',
dest='data_directory',
help=('Directory with the I/O files and main source '
'files [default: ./source]'))
parser.add_option('-o', '--output-name', action='store', dest='output_name',
help='Name of the file with the solution\'s output')
parser.add_option('-a', '--add-source', action='append',
dest='extra_sources',
help='Add EXTRA_SOURCE to the submitted source files',
metavar='EXTRA_SOURCE')
parser.add_option('-z', '--zip-sources', action='store_true',
dest='zip_sources',
help=('Put the source files into a zip file before '
'submitting'))
parser.add_option('--ignore-zip', action='store_true', dest='ignore_zip',
help=('Ignore source zip files not specified directly '
'using the -a option'))
parser.add_option('--ignore-default-source', action='store_true',
dest='ignore_def_source',
help=('Ignore files in the default source directory, '
'except for those specified using the -a option'))
parser.add_option('--gzip-content', action='store_true',
dest='gzip_content',
help=('Send the output and source code using gzip '
'encoding (faster)'))
parser.add_option('--nogzip-content', action='store_false',
dest='gzip_content',
help=('Send the output and sources using plain encoding '
'(slower)'))
parser.add_option('--base_dir', action='store', dest='base_dir',
help=('Base directory used to parametrize configuration '
'file paths'))
parser.set_defaults(renew_login=False, force=False, gzip_content=True,
zip_sources=False, ignore_zip=False,
ignore_def_source=False,
base_dir=os.path.dirname(os.path.realpath(__file__)))
options, args = parser.parse_args()
# Store the script location in a runtime constant, so it can be used by
# the library to locate the configuration files.
constants.SetRuntimeConstant('base_dir', options.base_dir)
# Check that the number of arguments is valid.
if len(args) != 3:
raise error.OptionError('need 3 positional arguments')
# Check that the problem idenfier is valid.
problem_name = args[0]
# Check that the submit id is a valid identifier.
id = args[2]
if not re.match('^\w+$', id):
raise error.OptionError('invalid id {0}, can only have numbers, letters '
'and underscores'.format(id))
# Check that the contest has been initialized.
if not contest_manager.IsInitialized():
raise error.ConfigurationError(
'Contest is not initialized, please initialize the contest before '
'trying to submit output files.\n')
# Read user and input information from the config file.
try:
current_config = data_manager.ReadData()
host = current_config['host']
user = current_config['user']
except KeyError as e:
raise error.ConfigurationError(
'Cannot find all required user data in the configuration files: {0}. '
'Please fill the missing fields in the user configuration '
'file.\n'.format(e))
# Read current contest information from the config file.
try:
middleware_tokens = current_config['middleware_tokens']
cookie = None if options.renew_cookie else current_config['cookie']
contest_id = current_config['contest_id']
problems = current_config['problems']
except KeyError as e:
raise error.ConfigurationError(
'Cannot find all required contest data in configuration files: {0}. '
'Reinitializing the contest might solve this error.\n'.format(e))
# Calculate the problem index and check if it is inside the range.
try:
problem_index = [p["name"] for p in problems].index(problem_name)
except ValueError:
raise error.UserError(
'Cannot find problem {0}.\n'.format(problem_name))
# Get the problem specification and the targeted I/O set from it.
problem = problems[problem_index]
io_set_name = args[1].lower()
io_set = utils.GetProblemIoSetByName(problem, io_set_name)
if io_set is None:
raise error.UserError(
'Input type {0} not found for problem {1}, available types are: '
'{2}.\n'.format(io_set_name, problem['name'],
', '.join(io_set['name']
for io_set in problem['io_sets'])))
input_id = io_set['input_id']
# Get the needed middleware tokens to submit solutions and check for running
# attempts.
try:
get_initial_values_token = middleware_tokens['GetInitialValues']
user_status_token = middleware_tokens['GetUserStatus']
submit_output_token = middleware_tokens['SubmitAnswer']
except KeyError as e:
raise error.ConfigurationError(
'Cannot find {0} token in configuration file. Reinitializing the '
'contest might solve this error.\n'.format(e))
# Get the data directory from the options, if not defined, get it from the
# configuration, using './source' as the default value if not found. In the
# same way, get the output filename format and the main source code filename
# format.
data_directory = (options.data_directory or
current_config.get('data_directory', './source'))
output_name_format = (options.output_name or
current_config.get('output_name_format',
'{problem}-{input}-{id}.out'))
source_names_format = current_config.get('source_names_format')
# There is no sensible default for the main source, so exit with error if no
# value is found and it wasn't ignored.
if not options.ignore_def_source and source_names_format is None:
raise error.UserError(
'No format found for the default sources file name. Specify '
'"source_name_format" in the configuration file or ignore it passing '
'--ignore-default-source.\n')
# Generate the output file name using the specified format and then return.
try:
output_basename = output_name_format.format(
problem=problem["name"], input=io_set_name, id=id)
output_filename = os.path.normpath(os.path.join(data_directory,
output_basename))
except KeyError as e:
raise error.ConfigurationError(
'Invalid output name format {0}, {1} is an invalid key, only use '
'"problem", "input" and "id".\n'.format(input_name_format, e))
# Create the list with all the source files and add the default source file
# if it was requested.
source_names = []
if not options.ignore_def_source:
try:
# Process each source name in the source formats list.
for source_name_format in source_names_format:
# Generate the source file name using the specified format and append
# it to the source list.
def_source_basename = source_name_format.format(
problem=problem["name"], input=io_set_name, id=id)
def_source_filename = os.path.normpath(os.path.join(
data_directory, def_source_basename))
source_names.append(def_source_filename)
except KeyError as e:
raise error.ConfigurationError(
'Invalid output name format {0}, {1} is an invalid key, only '
'use "problem", "input" and "id".\n'.format(input_name_format, e))
# Append any extra source file to the source list, normalizing their paths
# for the current operative system.
if options.extra_sources is not None:
for extra_source_format in options.extra_sources:
extra_source_file = extra_source_format.format(problem=problem["name"],
input=io_set_name, id=id)
source_names.append(os.path.normpath(extra_source_file))
# Print message indicating that an output is going to be submitted.
print '-' * 79
print '{0} output for "{1}" at "{2}"'.format(
io_set['difficulty_name'].capitalize(), problem['name'],
output_filename)
print '-' * 79
# Renew the cookie if the user requested a new login or the cookie has
# expired.
if google_login.CookieHasExpired(cookie):
print 'Cookie has expired, logging into the Code Jam servers...'
cookie = None
if not cookie or options.renew_cookie:
cookie = code_jam_login.Login(options.password)
# Get the contest status and check if it is accepting submissions.
contest_status = contest_manager.GetContestStatus(
host, cookie, get_initial_values_token, contest_id)
if not options.force and not contest_manager.CanSubmit(contest_status):
raise error.UserError('Cannot submit solutions to this contest, its not '
'active or in practice mode.\n')
# All problem inputs have public answers in practice mode.
io_set_public = (io_set['public'] or
contest_status == contest_manager.PRACTICE)
# Get the user status and check if it is participating or not.
current_user_status = user_status.GetUserStatus(
host, cookie, user_status_token, contest_id, problems)
if (contest_status == contest_manager.ACTIVE and
current_user_status is not None):
# Check that there is a running timer for this problem input.
problem_inputs = current_user_status.problem_inputs
problem_input_state = problem_inputs[problem_index][input_id]
if not options.force and problem_input_state.current_attempt == -1:
raise error.UserError(
'You cannot submit {0}-{1}, the timer expired or you did not '
'download this input.\n'.format(problem["name"], io_set_name))
# Ask for confirmation if user is trying to resubmit a non-public output.
if not io_set_public and problem_input_state.submitted:
submit_message = ('You already have submitted an output for {0}-{1}. '
'Resubmitting will override the previous one.'.format(
problem["name"], io_set_name))
utils.AskConfirmationOrDie(submit_message, 'Submit', options.force)
print 'Submitting new output and source files.'
else:
print 'Submitting output and source files.'
else:
print 'Submitting practice output and source files.'
# Check if the contest is running and no source file is being included, show
# a warning to the user indicating that he/she might be disqualified because
# of this. This confirmation cannot be skipped by using --force.
if contest_status == contest_manager.ACTIVE and not source_names:
submit_message = ('You did not include source code files for this '
'attempt. Submitting output files without source code '
'can lead to disqualification.'.format(
problem["name"], io_set_name))
utils.AskConfirmationOrDie(submit_message, 'Are you sure', False)
print 'Submitting without source files.'
# Create the output submitter and send the files.
submitter = output_submitter.OutputSubmitter(
host, cookie, submit_output_token, contest_id, problem['id'])
submitter.Submit(input_id, output_filename, source_names, io_set_public,
gzip_body=options.gzip_content, zip_sources=options.zip_sources,
add_ignored_zips=not options.ignore_zip)
except error.OptionError as e:
parser.print_usage()
program_basename = os.path.basename(sys.argv[0])
sys.stderr.write('{0}: error: {1}\n'.format(program_basename, e))
sys.exit(1)
except error.UserError as e:
sys.stderr.write(str(e))
sys.exit(1)
except error.CommandlineError as e:
sys.stderr.write('{0}: {1}'.format(e.__class__.__name__, e))
sys.exit(1)
if __name__ == '__main__':
main()
|
|
"""Implementation of beam search with penalties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
from layers import common_layers
# Assuming EOS_ID is 1
EOS_ID = 1
# Default value for INF
INF = 1. * 1e7
# TODO(b/124008972): Refactor the code and add unit tests.
def merge_beam_dim(tensor):
"""Reshapes first two dimensions in to single dimension.
Args:
tensor: Tensor to reshape of shape [A, B, ...]
Returns:
Reshaped tensor of shape [A*B, ...]
"""
shape = common_layers.shape_list(tensor)
shape[0] *= shape[1] # batch -> batch * beam_size
shape.pop(1) # Remove beam dim
return tf.reshape(tensor, shape)
def unmerge_beam_dim(tensor, batch_size, beam_size):
"""Reshapes first dimension back to [batch_size, beam_size].
Args:
tensor: Tensor to reshape of shape [batch_size*beam_size, ...]
batch_size: Tensor, original batch size.
beam_size: int, original beam size.
Returns:
Reshaped tensor of shape [batch_size, beam_size, ...]
"""
shape = common_layers.shape_list(tensor)
new_shape = [batch_size] + [beam_size] + shape[1:]
return tf.reshape(tensor, new_shape)
def expand_to_beam_size(tensor, beam_size):
"""Tiles a given tensor by beam_size.
Args:
tensor: tensor to tile [batch_size, ...]
beam_size: How much to tile the tensor by.
Returns:
Tiled tensor [batch_size, beam_size, ...]
"""
tensor = tf.expand_dims(tensor, axis=1)
tile_dims = [1] * tensor.shape.ndims
tile_dims[1] = beam_size
return tf.tile(tensor, tile_dims)
def get_state_shape_invariants(tensor):
"""Returns the shape of the tensor but sets middle dims to None."""
shape = tensor.shape.as_list()
for i in range(1, len(shape) - 1):
shape[i] = None
return tf.TensorShape(shape)
def compute_batch_indices(batch_size, beam_size):
"""Computes the i'th coordinate that contains the batch index for gathers.
Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which
batch the beam item is in. This will create the i of the i,j coordinate
needed for the gather.
Args:
batch_size: Batch size
beam_size: Size of the beam.
Returns:
batch_pos: [batch_size, beam_size] tensor of ids
"""
batch_pos = tf.range(batch_size * beam_size) // beam_size
batch_pos = tf.reshape(batch_pos, [batch_size, beam_size])
return batch_pos
def _create_make_unique(inputs):
"""Replaces the lower bits of each element with iota.
The iota is used to derive the index, and also serves the purpose to
make each element unique to break ties.
Args:
inputs: A tensor with rank of 2 and dtype of tf.float32.
[batch_size, original_size].
Returns:
A tensor after element wise transformation, with dtype the same as inputs.
[batch_size, original_size].
Raises:
ValueError: If the rank of the input tensor does not equal 2.
"""
if inputs.shape.ndims != 2:
raise ValueError("Input of top_k_with_unique must be rank-2 "
"but got: %s" % inputs.shape)
height = inputs.shape[0]
width = inputs.shape[1]
zeros = tf.zeros([height, width], dtype=tf.int32)
# Count_mask is used to mask away the low order bits to ensure that every
# element is distinct.
log2_ceiling = int(math.ceil(math.log(int(width), 2)))
next_power_of_two = 1 << log2_ceiling
count_mask = ~(next_power_of_two - 1)
count_mask_r0 = tf.constant(count_mask)
count_mask_r2 = tf.fill([height, width], count_mask_r0)
# Smallest_normal is the bit representation of the smallest positive normal
# floating point number. The sign is zero, exponent is one, and the fraction
# is zero.
smallest_normal = 1 << 23
smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32)
smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0)
# Low_bit_mask is used to mask away the sign bit when computing the absolute
# value.
low_bit_mask = ~(1 << 31)
low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32)
low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0)
iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0),
[height, 1])
# Compare the absolute value with positive zero to handle negative zero.
input_r2 = tf.bitcast(inputs, tf.int32)
abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2)
if_zero_r2 = tf.equal(abs_r2, zeros)
smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or(
input_r2, smallest_normal_r2)
input_no_zeros_r2 = tf.where(
if_zero_r2, smallest_normal_preserving_sign_r2, input_r2)
# Discard the low-order bits and replace with iota.
and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2)
or_r2 = tf.bitwise.bitwise_or(and_r2, iota)
return tf.bitcast(or_r2, tf.float32)
def _create_topk_unique(inputs, k):
"""Creates the top k values in sorted order with indices.
Args:
inputs: A tensor with rank of 2. [batch_size, original_size].
k: An integer, number of top elements to select.
Returns:
topk_r2: A tensor, the k largest elements. [batch_size, k].
topk_indices_r2: A tensor, indices of the top k values. [batch_size, k].
"""
height = inputs.shape[0]
width = inputs.shape[1]
neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32)
ones = tf.ones([height, width], dtype=tf.float32)
neg_inf_r2 = ones * neg_inf_r0
inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs)
# Select the current largest value k times and keep them in topk_r2. The
# selected largest values are marked as the smallest value to avoid being
# selected again.
tmp = inputs
topk_r2 = tf.zeros([height, k], dtype=tf.float32)
for i in range(k):
kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True)
k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0),
[height, 1])
topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2)
ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width]))
tmp = tf.where(ge_r2, neg_inf_r2, inputs)
log2_ceiling = int(math.ceil(math.log(float(int(width)), 2)))
next_power_of_two = 1 << log2_ceiling
count_mask = next_power_of_two - 1
mask_r0 = tf.constant(count_mask)
mask_r2 = tf.fill([height, k], mask_r0)
topk_r2_s32 = tf.bitcast(topk_r2, tf.int32)
topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2)
return topk_r2, topk_indices_r2
def top_k_with_unique(inputs, k):
"""Finds the values and indices of the k largests entries.
Instead of doing sort like tf.nn.top_k, this function finds the max value
k times. The running time is proportional to k, which is be faster when k
is small. The current implementation supports only inputs of rank 2.
In addition, iota is used to replace the lower bits of each element, this
makes the selection more stable when there are equal elements. The
overhead is that output values are approximated.
Args:
inputs: A tensor with rank of 2. [batch_size, original_size].
k: An integer, number of top elements to select.
Returns:
top_values: A tensor, the k largest elements in sorted order.
[batch_size, k].
indices: A tensor, indices of the top_values. [batch_size, k].
"""
unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32))
top_values, indices = _create_topk_unique(unique_inputs, k)
top_values = tf.cast(top_values, inputs.dtype)
return top_values, indices
def compute_topk_scores_and_seq(sequences,
scores,
scores_to_gather,
flags,
beam_size,
prefix="default"):
"""Given sequences and scores, will gather the top k=beam size sequences.
This function is used to grow alive, and finished. It takes sequences,
scores, and flags, and returns the top k from sequences, scores_to_gather,
and flags based on the values in scores.
This method permits easy introspection using tfdbg. It adds three named ops
that are prefixed by `prefix`:
- _topk_seq: the tensor for topk_seq returned by this method.
- _topk_flags: the tensor for topk_finished_flags returned by this method.
- _topk_scores: the tensor for tokp_gathered_scores returned by this method.
Args:
sequences: Tensor of sequences that we need to gather from.
[batch_size, beam_size, seq_length]
scores: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will use these to compute the topk.
scores_to_gather: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will return the gathered scores from here.
Scores to gather is different from scores because for grow_alive, we will
need to return log_probs, while for grow_finished, we will need to return
the length penalized scores.
flags: Tensor of bools for sequences that say whether a sequence has reached
EOS or not
beam_size: int
prefix: string that will prefix unique names for the ops run.
Returns:
Tuple of
(topk_seq [batch_size, beam_size, decode_length],
topk_gathered_scores [batch_size, beam_size],
topk_finished_flags[batch_size, beam_size],
topk_indexes)
"""
_, topk_indexes = top_k_with_unique(scores, k=beam_size)
# Gather up the highest scoring sequences. For each operation added, give
# it a concrete name to simplify observing these operations with tfdbg.
# Clients can capture these tensors by watching these node names.
topk_seq = tf.batch_gather(sequences, topk_indexes, prefix + "_topk_seq")
topk_flags = tf.batch_gather(flags, topk_indexes, prefix + "_topk_flags")
topk_gathered_scores = tf.batch_gather(scores_to_gather, topk_indexes,
prefix + "_topk_scores")
return topk_seq, topk_gathered_scores, topk_flags, topk_indexes
def beam_search(symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=None,
kv_encdecs=None,
eos_id=EOS_ID,
stop_early=True):
"""Beam search with length penalties.
Requires a function that can take the currently decoded symbols and return
the logits for the next symbol. The implementation is inspired by
https://arxiv.org/abs/1609.08144.
When running, the beam search steps can be visualized by using tfdbg to watch
the operations generating the output ids for each beam step. These operations
have the pattern:
(alive|finished)_topk_(seq,scores)
Operations marked `alive` represent the new beam sequences that will be
processed in the next step. Operations marked `finished` represent the
completed beam sequences, which may be padded with 0s if no beams finished.
Operations marked `seq` store the full beam sequence for the time step.
Operations marked `scores` store the sequence's final log scores.
The beam search steps will be processed sequentially in order, so when
capturing observed from these operations, tensors, clients can make
assumptions about which step is being recorded.
WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this
means that the shape of the 2nd dimension of these tensors will not be
available (i.e. set to None) inside symbols_to_logits_fn.
Args:
symbols_to_logits_fn: Interface to the model, to provide logits.
Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size]
initial_ids: Ids to start off the decoding, this will be the first thing
handed to symbols_to_logits_fn (after expanding to beam size)
[batch_size]
beam_size: Size of the beam.
decode_length: Number of steps to decode for.
vocab_size: Size of the vocab, must equal the size of the logits returned by
symbols_to_logits_fn
alpha: alpha for length penalty.
states: dict (possibly nested) of decoding states.
kv_encdecs: A dict, representing the key and value for encoder-decoder
attention used by decoding (inference).
eos_id: ID for end of sentence.
stop_early: a boolean - stop once best sequence is provably determined.
Returns:
Tuple of
(decoded beams [batch_size, beam_size, decode_length]
decoding probabilities [batch_size, beam_size])
"""
batch_size = common_layers.shape_list(initial_ids)[0]
# Assume initial_ids are prob 1.0
initial_log_probs = tf.constant([[0.] + [-INF] * (beam_size - 1)])
# Expand to beam_size (batch_size, beam_size)
alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1])
# Expand each batch and state to beam_size
alive_seq = expand_to_beam_size(initial_ids, beam_size)
alive_seq = tf.expand_dims(alive_seq, axis=2) # (batch_size, beam_size, 1)
alive_seq = tf.tile(alive_seq, [1, 1, decode_length + 1])
if states:
states = nest.map_structure(
lambda state: expand_to_beam_size(state, beam_size), states)
else:
states = {}
# Finished will keep track of all the sequences that have finished so far
# Finished log probs will be negative infinity in the beginning
# finished_flags will keep track of booleans
finished_seq = tf.zeros(common_layers.shape_list(alive_seq), tf.int32)
# Setting the scores of the initial to negative infinity.
finished_scores = tf.ones([batch_size, beam_size]) * -INF
finished_flags = tf.zeros([batch_size, beam_size], tf.bool)
def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq,
curr_scores, curr_finished):
"""Given sequences and scores, will gather the top k=beam size sequences.
Args:
finished_seq: Current finished sequences.
[batch_size, beam_size, current_decoded_length]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
finished_flags: finished bools for each of these sequences.
[batch_size, beam_size]
curr_seq: current topk sequence that has been grown by one position.
[batch_size, beam_size, current_decoded_length]
curr_scores: scores for each of these sequences. [batch_size, beam_size]
curr_finished: Finished flags for each of these sequences.
[batch_size, beam_size]
Returns:
Tuple of
(Topk sequences based on scores,
log probs of these sequences,
Finished flags of these sequences)
"""
# Set the scores of the unfinished seq in curr_seq to large negative
# values
curr_scores += (1. - tf.to_float(curr_finished)) * -INF
# concatenating the sequences and scores along beam axis
curr_finished_seq = tf.concat([finished_seq, curr_seq], axis=1)
curr_finished_scores = tf.concat([finished_scores, curr_scores], axis=1)
curr_finished_flags = tf.concat([finished_flags, curr_finished], axis=1)
return compute_topk_scores_and_seq(
curr_finished_seq, curr_finished_scores, curr_finished_scores,
curr_finished_flags, beam_size, "grow_finished")
def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished):
"""Given sequences and scores, will gather the top k=beam size sequences.
Args:
curr_seq: current topk sequence that has been grown by one position.
[batch_size, beam_size, i+1]
curr_scores: scores for each of these sequences. [batch_size, beam_size]
curr_log_probs: log probs for each of these sequences.
[batch_size, beam_size]
curr_finished: Finished flags for each of these sequences.
[batch_size, beam_size]
Returns:
Tuple of
(Topk sequences based on scores,
log probs of these sequences,
Finished flags of these sequences)
"""
# Set the scores of the finished seq in curr_seq to large negative
# values
curr_scores += tf.to_float(curr_finished) * -INF
return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs,
curr_finished, beam_size, "grow_alive")
def grow_topk(i, alive_seq, alive_log_probs, states):
r"""Inner beam search loop.
This function takes the current alive sequences, and grows them to topk
sequences where k = 2*beam. We use 2*beam because, we could have beam_size
number of sequences that might hit <EOS> and there will be no alive
sequences to continue. With 2*beam_size, this will not happen. This relies
on the assumption the vocab size is > beam size. If this is true, we'll
have at least beam_size non <EOS> extensions if we extract the next top
2*beam words.
Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to
https://arxiv.org/abs/1609.08144.
Args:
i: loop index
alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
alive_log_probs: probabilities of these sequences. [batch_size, beam_size]
states: dict (possibly nested) of decoding states.
Returns:
Tuple of
(Topk sequences extended by the next word,
The log probs of these sequences,
The scores with length penalty of these sequences,
Flags indicating which of these sequences have finished decoding,
dict of transformed decoding states,
Topk beam index)
"""
# Get the logits for all the possible next symbols
if states:
flat_ids = tf.reshape(
tf.slice(alive_seq, [0, 0, i], [batch_size, beam_size, 1]),
[batch_size * beam_size, -1])
else:
flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1])
# (batch_size * beam_size, decoded_length)
if states:
flat_states = nest.map_structure(merge_beam_dim, states)
flat_logits, flat_states = symbols_to_logits_fn(
flat_ids, i, flat_states, kv_encdecs)
states = nest.map_structure(
lambda t: unmerge_beam_dim(t, batch_size, beam_size), flat_states)
else:
flat_logits = symbols_to_logits_fn(flat_ids, i)
logits = tf.reshape(flat_logits, [batch_size, beam_size, -1])
# Convert logits to normalized log probs
candidate_log_probs = common_layers.log_prob_from_logits(logits)
# Multiply the probabilities by the current probabilities of the beam.
# (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)
log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
length_penalty = tf.pow(((5. + tf.to_float(i + 1)) / 6.), alpha)
curr_scores = log_probs / length_penalty
# Flatten out (beam_size, vocab_size) probs in to a list of possibilities
flat_curr_scores = tf.reshape(curr_scores, [-1, beam_size * vocab_size])
topk_scores, topk_ids = top_k_with_unique(flat_curr_scores, k=beam_size * 2)
# Recovering the log probs because we will need to send them back
topk_log_probs = topk_scores * length_penalty
# Work out what beam the top probs are in.
topk_beam_index = topk_ids // vocab_size
topk_ids %= vocab_size # Unflatten the ids
# Gather up the most probable 2*beams both for the ids and
# finished_in_alive bools
topk_seq = tf.batch_gather(alive_seq, topk_beam_index)
# Update the most probable alive
indices = tf.reshape(
tf.one_hot(i + 1, decode_length + 1, dtype=topk_seq.dtype),
[1, 1, decode_length + 1])
topk_seq += tf.expand_dims(topk_ids, axis=2) * indices
topk_finished = tf.equal(topk_ids, eos_id)
return (topk_seq, topk_log_probs, topk_scores, topk_finished, states,
topk_beam_index)
def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, states):
"""Inner beam search loop.
There are three groups of tensors, alive, finished, and topk.
The alive group contains information about the current alive sequences
The topk group contains information about alive + topk current decoded words
the finished group contains information about finished sentences, that is,
the ones that have decoded to <EOS>. These are what we return.
The general beam search algorithm is as follows:
While we haven't terminated (pls look at termination condition)
1. Grow the current alive to get beam*2 topk sequences
2. Among the topk, keep the top beam_size ones that haven't reached EOS
into alive
3. Among the topk, keep the top beam_size ones have reached EOS into
finished
Repeat
To make things simple with using fixed size tensors, we will end
up inserting unfinished sequences into finished in the beginning. To stop
that we add -ve INF to the score of the unfinished sequence so that when a
true finished sequence does appear, it will have a higher score than all the
unfinished ones.
Args:
i: loop index
alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
alive_log_probs: probabilities of the beams. [batch_size, beam_size]
finished_seq: Current finished sequences.
[batch_size, beam_size, i+1]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
finished_flags: finished bools for each of these sequences.
[batch_size, beam_size]
states: dict (possibly nested) of decoding states.
Returns:
Tuple of
(Incremented loop index
New alive sequences,
Log probs of the alive sequences,
New finished sequences,
Scores of the new finished sequences,
Flags indicating which sequence in finished as reached EOS,
dict of final decoding states)
"""
# Each inner loop, we carry out three steps:
# 1. Get the current topk items.
# 2. Extract the ones that have finished and haven't finished
# 3. Recompute the contents of finished based on scores.
(topk_seq, topk_log_probs, topk_scores, topk_finished, states,
first_selector) = grow_topk(i, alive_seq, alive_log_probs, states)
alive_seq, alive_log_probs, _, second_selector = grow_alive(
topk_seq, topk_scores, topk_log_probs, topk_finished)
selector = tf.batch_gather(first_selector, second_selector)
if states:
states = nest.map_structure(
lambda state: tf.batch_gather(state, selector), states)
finished_seq, finished_scores, finished_flags, _ = grow_finished(
finished_seq, finished_scores, finished_flags, topk_seq, topk_scores,
topk_finished)
return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, states)
def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq,
finished_scores, unused_finished_in_finished, unused_states):
"""Checking termination condition.
We terminate when we decoded up to decode_length or the lowest scoring item
in finished has a greater score that the highest prob item in alive divided
by the max length penalty
Args:
i: loop index
alive_log_probs: probabilities of the beams. [batch_size, beam_size]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
Returns:
Bool.
"""
max_length_penalty = tf.pow(((5. + tf.to_float(decode_length)) / 6.), alpha)
# The best possible score of the most likely alive sequence.
lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty
if not stop_early:
# by considering the min score (in the top N beams) we ensure that
# the decoder will keep decoding until there is at least one beam
# (in the top N) that can be improved (w.r.t. the alive beams).
# any unfinished beam will have score -INF - thus the min
# will always be -INF if there is at least one unfinished beam -
# which means the bound_is_met condition cannot be true in this case.
lowest_score_of_finished_in_finished = tf.reduce_min(finished_scores)
else:
# by taking the max score we only care about the first beam;
# as soon as this first beam cannot be beaten from the alive beams
# the beam decoder can stop.
# similarly to the above, if the top beam is not completed, its
# finished_score is -INF, thus it will not activate the
# bound_is_met condition. (i.e., decoder will keep going on).
# note we need to find the max for every sequence eparately - so, we need
# to keep the batch dimension (see axis=1)
lowest_score_of_finished_in_finished = tf.reduce_max(finished_scores,
axis=1)
bound_is_met = tf.reduce_all(
tf.greater(lowest_score_of_finished_in_finished,
lower_bound_alive_scores))
return tf.logical_and(
tf.less(i, decode_length), tf.logical_not(bound_is_met))
(_, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, _) = tf.while_loop(
_is_finished,
inner_loop, [
tf.constant(0), alive_seq, alive_log_probs, finished_seq,
finished_scores, finished_flags, states
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size, beam_size, decode_length + 1]),
alive_log_probs.get_shape(),
tf.TensorShape([batch_size, beam_size, decode_length + 1]),
finished_scores.get_shape(),
finished_flags.get_shape(),
nest.map_structure(lambda state: state.get_shape(), states),
],
parallel_iterations=1,
back_prop=False)
alive_seq.set_shape((None, beam_size, None))
finished_seq.set_shape((None, beam_size, None))
# Accounting for corner case: It's possible that no sequence in alive for a
# particular batch item ever reached EOS. In that case, we should just copy
# the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)
# if 0, means that no sequence for that batch index had reached EOS. We need
# to do the same for the scores as well.
finished_seq = tf.where(
tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
finished_scores = tf.where(
tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
return finished_seq, finished_scores
|
|
from sympy import (
Symbol, diff, Derivative, Rational, roots, S, sqrt, hyper,
cos, gamma, conjugate, factorial, pi, oo, zoo, binomial, RisingFactorial,
legendre, assoc_legendre, chebyshevu, chebyshevt, chebyshevt_root, chebyshevu_root,
laguerre, assoc_laguerre, laguerre_poly, hermite, gegenbauer, jacobi, jacobi_normalized)
from sympy.utilities.pytest import raises
x = Symbol('x')
def test_jacobi():
n = Symbol("n")
a = Symbol("a")
b = Symbol("b")
assert jacobi(0, a, b, x) == 1
assert jacobi(1, a, b, x) == a/2 - b/2 + x*(a/2 + b/2 + 1)
assert jacobi(n, a, a, x) == RisingFactorial(
a + 1, n)*gegenbauer(n, a + S(1)/2, x)/RisingFactorial(2*a + 1, n)
assert jacobi(n, a, -a, x) == ((-1)**a*(-x + 1)**(-a/2)*(x + 1)**(a/2)*assoc_legendre(n, a, x)*
factorial(-a + n)*gamma(a + n + 1)/(factorial(a + n)*gamma(n + 1)))
assert jacobi(n, -b, b, x) == ((-x + 1)**(b/2)*(x + 1)**(-b/2)*assoc_legendre(n, b, x)*
gamma(-b + n + 1)/gamma(n + 1))
assert jacobi(n, 0, 0, x) == legendre(n, x)
assert jacobi(n, S.Half, S.Half, x) == RisingFactorial(
S(3)/2, n)*chebyshevu(n, x)/factorial(n + 1)
assert jacobi(n, -S.Half, -S.Half, x) == RisingFactorial(
S(1)/2, n)*chebyshevt(n, x)/factorial(n)
X = jacobi(n, a, b, x)
assert isinstance(X, jacobi)
assert jacobi(n, a, b, -x) == (-1)**n*jacobi(n, b, a, x)
assert jacobi(n, a, b, 0) == 2**(-n)*gamma(a + n + 1)*hyper(
(-b - n, -n), (a + 1,), -1)/(factorial(n)*gamma(a + 1))
assert jacobi(n, a, b, 1) == RisingFactorial(a + 1, n)/factorial(n)
m = Symbol("m", positive=True)
assert jacobi(m, a, b, oo) == oo*RisingFactorial(a + b + m + 1, m)
assert conjugate(jacobi(m, a, b, x)) == \
jacobi(m, conjugate(a), conjugate(b), conjugate(x))
assert diff(jacobi(n, a, b, x), n) == Derivative(jacobi(n, a, b, x), n)
assert diff(jacobi(n, a, b, x), x) == \
(a/2 + b/2 + n/2 + S(1)/2)*jacobi(n - 1, a + 1, b + 1, x)
assert jacobi_normalized(n, a, b, x) == \
(jacobi(n, a, b, x)/sqrt(2**(a + b + 1)*gamma(a + n + 1)*gamma(b + n + 1)
/((a + b + 2*n + 1)*factorial(n)*gamma(a + b + n + 1))))
def test_gegenbauer():
n = Symbol("n")
a = Symbol("a")
assert gegenbauer(0, a, x) == 1
assert gegenbauer(1, a, x) == 2*a*x
assert gegenbauer(2, a, x) == -a + x**2*(2*a**2 + 2*a)
assert gegenbauer(3, a, x) == \
x**3*(4*a**3/3 + 4*a**2 + 8*a/3) + x*(-2*a**2 - 2*a)
assert gegenbauer(-1, a, x) == 0
assert gegenbauer(n, S(1)/2, x) == legendre(n, x)
assert gegenbauer(n, 1, x) == chebyshevu(n, x)
assert gegenbauer(n, -1, x) == 0
X = gegenbauer(n, a, x)
assert isinstance(X, gegenbauer)
assert gegenbauer(n, a, -x) == (-1)**n*gegenbauer(n, a, x)
assert gegenbauer(n, a, 0) == 2**n*sqrt(pi) * \
gamma(a + n/2)/(gamma(a)*gamma(-n/2 + S(1)/2)*gamma(n + 1))
assert gegenbauer(n, a, 1) == gamma(2*a + n)/(gamma(2*a)*gamma(n + 1))
assert gegenbauer(n, Rational(3, 4), -1) == zoo
m = Symbol("m", positive=True)
assert gegenbauer(m, a, oo) == oo*RisingFactorial(a, m)
assert conjugate(gegenbauer(n, a, x)) == gegenbauer(n, conjugate(a), conjugate(x))
assert diff(gegenbauer(n, a, x), n) == Derivative(gegenbauer(n, a, x), n)
assert diff(gegenbauer(n, a, x), x) == 2*a*gegenbauer(n - 1, a + 1, x)
def test_legendre():
raises(ValueError, lambda: legendre(-1, x))
assert legendre(0, x) == 1
assert legendre(1, x) == x
assert legendre(2, x) == ((3*x**2 - 1)/2).expand()
assert legendre(3, x) == ((5*x**3 - 3*x)/2).expand()
assert legendre(4, x) == ((35*x**4 - 30*x**2 + 3)/8).expand()
assert legendre(5, x) == ((63*x**5 - 70*x**3 + 15*x)/8).expand()
assert legendre(6, x) == ((231*x**6 - 315*x**4 + 105*x**2 - 5)/16).expand()
assert legendre(10, -1) == 1
assert legendre(11, -1) == -1
assert legendre(10, 1) == 1
assert legendre(11, 1) == 1
assert legendre(10, 0) != 0
assert legendre(11, 0) == 0
assert roots(legendre(4, x), x) == {
sqrt(Rational(3, 7) - Rational(2, 35)*sqrt(30)): 1,
-sqrt(Rational(3, 7) - Rational(2, 35)*sqrt(30)): 1,
sqrt(Rational(3, 7) + Rational(2, 35)*sqrt(30)): 1,
-sqrt(Rational(3, 7) + Rational(2, 35)*sqrt(30)): 1,
}
n = Symbol("n")
X = legendre(n, x)
assert isinstance(X, legendre)
assert legendre(-n, x) == legendre(n - 1, x)
assert legendre(n, -x) == (-1)**n*legendre(n, x)
assert conjugate(legendre(n, x)) == legendre(n, conjugate(x))
assert diff(legendre(n, x), x) == \
n*(x*legendre(n, x) - legendre(n - 1, x))/(x**2 - 1)
assert diff(legendre(n, x), n) == Derivative(legendre(n, x), n)
def test_assoc_legendre():
Plm = assoc_legendre
Q = sqrt(1 - x**2)
assert Plm(0, 0, x) == 1
assert Plm(1, 0, x) == x
assert Plm(1, 1, x) == -Q
assert Plm(2, 0, x) == (3*x**2 - 1)/2
assert Plm(2, 1, x) == -3*x*Q
assert Plm(2, 2, x) == 3*Q**2
assert Plm(3, 0, x) == (5*x**3 - 3*x)/2
assert Plm(3, 1, x).expand() == (( 3*(1 - 5*x**2)/2 ).expand() * Q).expand()
assert Plm(3, 2, x) == 15*x * Q**2
assert Plm(3, 3, x) == -15 * Q**3
# negative m
assert Plm(1, -1, x) == -Plm(1, 1, x)/2
assert Plm(2, -2, x) == Plm(2, 2, x)/24
assert Plm(2, -1, x) == -Plm(2, 1, x)/6
assert Plm(3, -3, x) == -Plm(3, 3, x)/720
assert Plm(3, -2, x) == Plm(3, 2, x)/120
assert Plm(3, -1, x) == -Plm(3, 1, x)/12
n = Symbol("n")
m = Symbol("m")
X = Plm(n, m, x)
assert isinstance(X, assoc_legendre)
assert Plm(n, 0, x) == legendre(n, x)
raises(ValueError, lambda: Plm(-1, 0, x))
raises(ValueError, lambda: Plm(0, 1, x))
assert conjugate(assoc_legendre(n, m, x)) == \
assoc_legendre(n, conjugate(m), conjugate(x))
def test_chebyshev():
assert chebyshevt(0, x) == 1
assert chebyshevt(1, x) == x
assert chebyshevt(2, x) == 2*x**2 - 1
assert chebyshevt(3, x) == 4*x**3 - 3*x
for n in range(1, 4):
for k in range(n):
z = chebyshevt_root(n, k)
assert chebyshevt(n, z) == 0
raises(ValueError, lambda: chebyshevt_root(n, n))
for n in range(1, 4):
for k in range(n):
z = chebyshevu_root(n, k)
assert chebyshevu(n, z) == 0
raises(ValueError, lambda: chebyshevu_root(n, n))
n = Symbol("n")
X = chebyshevt(n, x)
assert isinstance(X, chebyshevt)
assert chebyshevt(n, -x) == (-1)**n*chebyshevt(n, x)
assert chebyshevt(-n, x) == chebyshevt(n, x)
assert chebyshevt(n, 0) == cos(pi*n/2)
assert chebyshevt(n, 1) == 1
assert conjugate(chebyshevt(n, x)) == chebyshevt(n, conjugate(x))
assert diff(chebyshevt(n, x), x) == n*chebyshevu(n - 1, x)
X = chebyshevu(n, x)
assert isinstance(X, chebyshevu)
assert chebyshevu(n, -x) == (-1)**n*chebyshevu(n, x)
assert chebyshevu(-n, x) == -chebyshevu(n - 2, x)
assert chebyshevu(n, 0) == cos(pi*n/2)
assert chebyshevu(n, 1) == n + 1
assert conjugate(chebyshevu(n, x)) == chebyshevu(n, conjugate(x))
assert diff(chebyshevu(n, x), x) == \
(-x*chebyshevu(n, x) + (n + 1)*chebyshevt(n + 1, x))/(x**2 - 1)
def test_hermite():
assert hermite(0, x) == 1
assert hermite(1, x) == 2*x
assert hermite(2, x) == 4*x**2 - 2
assert hermite(3, x) == 8*x**3 - 12*x
assert hermite(4, x) == 16*x**4 - 48*x**2 + 12
assert hermite(6, x) == 64*x**6 - 480*x**4 + 720*x**2 - 120
n = Symbol("n")
assert hermite(n, x) == hermite(n, x)
assert hermite(n, -x) == (-1)**n*hermite(n, x)
assert hermite(-n, x) == hermite(-n, x)
assert conjugate(hermite(n, x)) == hermite(n, conjugate(x))
assert diff(hermite(n, x), x) == 2*n*hermite(n - 1, x)
assert diff(hermite(n, x), n) == Derivative(hermite(n, x), n)
def test_laguerre():
n = Symbol("n")
# Laguerre polynomials:
assert laguerre(0, x) == 1
assert laguerre(1, x) == -x + 1
assert laguerre(2, x) == x**2/2 - 2*x + 1
assert laguerre(3, x) == -x**3/6 + 3*x**2/2 - 3*x + 1
X = laguerre(n, x)
assert isinstance(X, laguerre)
assert laguerre(n, 0) == 1
assert conjugate(laguerre(n, x)) == laguerre(n, conjugate(x))
assert diff(laguerre(n, x), x) == -assoc_laguerre(n - 1, 1, x)
def test_assoc_laguerre():
n = Symbol("n")
m = Symbol("m")
alpha = Symbol("alpha")
# generalized Laguerre polynomials:
assert assoc_laguerre(0, alpha, x) == 1
assert assoc_laguerre(1, alpha, x) == -x + alpha + 1
assert assoc_laguerre(2, alpha, x).expand() == \
(x**2/2 - (alpha + 2)*x + (alpha + 2)*(alpha + 1)/2).expand()
assert assoc_laguerre(3, alpha, x).expand() == \
(-x**3/6 + (alpha + 3)*x**2/2 - (alpha + 2)*(alpha + 3)*x/2 +
(alpha + 1)*(alpha + 2)*(alpha + 3)/6).expand()
# Test the lowest 10 polynomials with laguerre_poly, to make sure it works:
for i in range(10):
assert assoc_laguerre(i, 0, x).expand() == laguerre_poly(i, x)
X = assoc_laguerre(n, m, x)
assert isinstance(X, assoc_laguerre)
assert assoc_laguerre(n, 0, x) == laguerre(n, x)
assert assoc_laguerre(n, alpha, 0) == binomial(alpha + n, alpha)
assert diff(assoc_laguerre(n, alpha, x), x) == \
-assoc_laguerre(n - 1, alpha + 1, x)
#k = Dummy("k")
#assert diff(assoc_laguerre(n, alpha, x), alpha) == Sum(assoc_laguerre(k, alpha, x)/(-alpha + n), (k, 0, n - 1))
assert conjugate(assoc_laguerre(n, alpha, x)) == \
assoc_laguerre(n, conjugate(alpha), conjugate(x))
|
|
"""Asynchronous Advantage Actor-Critic (A3C) algorithm for reinforcement learning."""
from deepchem.models import TensorGraph
from deepchem.models.tensorgraph import TFWrapper
from deepchem.models.tensorgraph.layers import Feature, Weights, Label, Layer
import numpy as np
import tensorflow as tf
import collections
import copy
import multiprocessing
import os
import re
import threading
class A3CLoss(Layer):
"""This layer computes the loss function for A3C."""
def __init__(self, value_weight, entropy_weight, **kwargs):
super(A3CLoss, self).__init__(**kwargs)
self.value_weight = value_weight
self.entropy_weight = entropy_weight
def create_tensor(self, **kwargs):
reward, action, prob, value, advantage = [
layer.out_tensor for layer in self.in_layers
]
prob = prob + np.finfo(np.float32).eps
log_prob = tf.log(prob)
policy_loss = -tf.reduce_mean(advantage * tf.reduce_sum(action * log_prob))
value_loss = tf.reduce_mean(tf.square(reward - value))
entropy = -tf.reduce_mean(tf.reduce_sum(prob * log_prob, axis=1))
self.out_tensor = policy_loss + self.value_weight * value_loss - self.entropy_weight * entropy
return self.out_tensor
class A3C(object):
"""
Implements the Asynchronous Advantage Actor-Critic (A3C) algorithm for reinforcement learning.
The algorithm is described in Mnih et al, "Asynchronous Methods for Deep Reinforcement Learning"
(https://arxiv.org/abs/1602.01783). This class requires the policy to output two quantities:
a vector giving the probability of taking each action, and an estimate of the value function for
the current state. It optimizes both outputs at once using a loss that is the sum of three terms:
1. The policy loss, which seeks to maximize the discounted reward for each action.
2. The value loss, which tries to make the value estimate match the actual discounted reward
that was attained at each step.
3. An entropy term to encourage exploration.
This class only supports environments with discrete action spaces, not continuous ones. The
"action" argument passed to the environment is an integer, giving the index of the action to perform.
This class supports Generalized Advantage Estimation as described in Schulman et al., "High-Dimensional
Continuous Control Using Generalized Advantage Estimation" (https://arxiv.org/abs/1506.02438).
This is a method of trading off bias and variance in the advantage estimate, which can sometimes
improve the rate of convergance. Use the advantage_lambda parameter to adjust the tradeoff.
This class supports Hindsight Experience Replay as described in Andrychowicz et al., "Hindsight
Experience Replay" (https://arxiv.org/abs/1707.01495). This is a method that can enormously
accelerate learning when rewards are very rare. It requires that the environment state contains
information about the goal the agent is trying to achieve. Each time it generates a rollout, it
processes that rollout twice: once using the actual goal the agent was pursuing while generating
it, and again using the final state of that rollout as the goal. This guarantees that half of
all rollouts processed will be ones that achieved their goals, and hence received a reward.
To use this feature, specify use_hindsight=True to the constructor. The environment must have
a method defined as follows:
def apply_hindsight(self, states, actions, goal):
...
return new_states, rewards
The method receives the list of states generated during the rollout, the action taken for each one,
and a new goal state. It should generate a new list of states that are identical to the input ones,
except specifying the new goal. It should return that list of states, and the rewards that would
have been received for taking the specified actions from those states.
"""
def __init__(self,
env,
policy,
max_rollout_length=20,
discount_factor=0.99,
advantage_lambda=0.98,
value_weight=1.0,
entropy_weight=0.01,
optimizer=None,
model_dir=None,
use_hindsight=False):
"""Create an object for optimizing a policy.
Parameters
----------
env: Environment
the Environment to interact with
policy: Policy
the Policy to optimize. Its create_layers() method must return a map containing the
keys 'action_prob' and 'value', corresponding to the action probabilities and value estimate
max_rollout_length: int
the maximum length of rollouts to generate
discount_factor: float
the discount factor to use when computing rewards
value_weight: float
a scale factor for the value loss term in the loss function
entropy_weight: float
a scale factor for the entropy term in the loss function
optimizer: TFWrapper
a callable object that creates the optimizer to use. If None, a default optimizer is used.
model_dir: str
the directory in which the model will be saved. If None, a temporary directory will be created.
use_hindsight: bool
if True, use Hindsight Experience Replay
"""
self._env = env
self._policy = policy
self.max_rollout_length = max_rollout_length
self.discount_factor = discount_factor
self.advantage_lambda = advantage_lambda
self.value_weight = value_weight
self.entropy_weight = entropy_weight
self.use_hindsight = use_hindsight
self._state_is_list = isinstance(env.state_shape[0], collections.Sequence)
if optimizer is None:
self._optimizer = TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.001, beta1=0.9, beta2=0.999)
else:
self._optimizer = optimizer
(self._graph, self._features, self._rewards, self._actions,
self._action_prob, self._value, self._advantages) = self._build_graph(
None, 'global', model_dir)
with self._graph._get_tf("Graph").as_default():
self._session = tf.Session()
self._rnn_states = self._graph.rnn_zero_states
def _build_graph(self, tf_graph, scope, model_dir):
"""Construct a TensorGraph containing the policy and loss calculations."""
state_shape = self._env.state_shape
if not self._state_is_list:
state_shape = [state_shape]
features = [Feature(shape=[None] + list(s)) for s in state_shape]
policy_layers = self._policy.create_layers(features)
action_prob = policy_layers['action_prob']
value = policy_layers['value']
rewards = Weights(shape=(None,))
advantages = Weights(shape=(None,))
actions = Label(shape=(None, self._env.n_actions))
loss = A3CLoss(
self.value_weight,
self.entropy_weight,
in_layers=[rewards, actions, action_prob, value, advantages])
graph = TensorGraph(
batch_size=self.max_rollout_length,
use_queue=False,
graph=tf_graph,
model_dir=model_dir)
for f in features:
graph._add_layer(f)
graph.add_output(action_prob)
graph.add_output(value)
graph.set_loss(loss)
graph.set_optimizer(self._optimizer)
with graph._get_tf("Graph").as_default():
with tf.variable_scope(scope):
graph.build()
return graph, features, rewards, actions, action_prob, value, advantages
def fit(self,
total_steps,
max_checkpoints_to_keep=5,
checkpoint_interval=600,
restore=False):
"""Train the policy.
Parameters
----------
total_steps: int
the total number of time steps to perform on the environment, across all rollouts
on all threads
max_checkpoints_to_keep: int
the maximum number of checkpoint files to keep. When this number is reached, older
files are deleted.
checkpoint_interval: float
the time interval at which to save checkpoints, measured in seconds
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
"""
with self._graph._get_tf("Graph").as_default():
step_count = [0]
workers = []
threads = []
for i in range(multiprocessing.cpu_count()):
workers.append(_Worker(self, i))
self._session.run(tf.global_variables_initializer())
if restore:
self.restore()
for worker in workers:
thread = threading.Thread(
name=worker.scope,
target=lambda: worker.run(step_count, total_steps))
threads.append(thread)
thread.start()
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
saver = tf.train.Saver(variables, max_to_keep=max_checkpoints_to_keep)
checkpoint_index = 0
while True:
threads = [t for t in threads if t.isAlive()]
if len(threads) > 0:
threads[0].join(checkpoint_interval)
checkpoint_index += 1
saver.save(
self._session, self._graph.save_file, global_step=checkpoint_index)
if len(threads) == 0:
break
def predict(self, state, use_saved_states=True, save_states=True):
"""Compute the policy's output predictions for a state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array
the state of the environment for which to generate predictions
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to all zeros before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the array of action probabilities, and the estimated value function
"""
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state, use_saved_states)
tensors = [self._action_prob.out_tensor, self._value.out_tensor]
if save_states:
tensors += self._graph.rnn_final_states
results = self._session.run(tensors, feed_dict=feed_dict)
if save_states:
self._rnn_states = results[2:]
return results[:2]
def select_action(self,
state,
deterministic=False,
use_saved_states=True,
save_states=True):
"""Select an action to perform based on the environment's state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array
the state of the environment for which to select an action
deterministic: bool
if True, always return the best action (that is, the one with highest probability).
If False, randomly select an action based on the computed probabilities.
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to all zeros before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the index of the selected action
"""
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state, use_saved_states)
tensors = [self._action_prob.out_tensor]
if save_states:
tensors += self._graph.rnn_final_states
results = self._session.run(tensors, feed_dict=feed_dict)
probabilities = results[0]
if save_states:
self._rnn_states = results[1:]
if deterministic:
return probabilities.argmax()
else:
return np.random.choice(
np.arange(self._env.n_actions), p=probabilities[0])
def restore(self):
"""Reload the model parameters from the most recent checkpoint file."""
last_checkpoint = tf.train.latest_checkpoint(self._graph.model_dir)
if last_checkpoint is None:
raise ValueError('No checkpoint found')
with self._graph._get_tf("Graph").as_default():
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
saver = tf.train.Saver(variables)
saver.restore(self._session, last_checkpoint)
def _create_feed_dict(self, state, use_saved_states):
"""Create a feed dict for use by predict() or select_action()."""
feed_dict = dict((f.out_tensor, np.expand_dims(s, axis=0))
for f, s in zip(self._features, state))
if use_saved_states:
rnn_states = self._rnn_states
else:
rnn_states = self._graph.rnn_zero_states
for (placeholder, value) in zip(self._graph.rnn_initial_states, rnn_states):
feed_dict[placeholder] = value
return feed_dict
class _Worker(object):
"""A Worker object is created for each training thread."""
def __init__(self, a3c, index):
self.a3c = a3c
self.index = index
self.scope = 'worker%d' % index
self.env = copy.deepcopy(a3c._env)
self.env.reset()
self.graph, self.features, self.rewards, self.actions, self.action_prob, self.value, self.advantages = a3c._build_graph(
a3c._graph._get_tf('Graph'), self.scope, None)
self.rnn_states = self.graph.rnn_zero_states
with a3c._graph._get_tf("Graph").as_default():
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope)
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
'global')
gradients = tf.gradients(self.graph.loss.out_tensor, local_vars)
grads_and_vars = list(zip(gradients, global_vars))
self.train_op = a3c._graph._get_tf('Optimizer').apply_gradients(
grads_and_vars)
self.update_local_variables = tf.group(
* [tf.assign(v1, v2) for v1, v2 in zip(local_vars, global_vars)])
def run(self, step_count, total_steps):
with self.graph._get_tf("Graph").as_default():
while step_count[0] < total_steps:
self.a3c._session.run(self.update_local_variables)
initial_rnn_states = self.rnn_states
states, actions, rewards, values = self.create_rollout()
self.process_rollout(states, actions, rewards, values,
initial_rnn_states)
if self.a3c.use_hindsight:
self.process_rollout_with_hindsight(states, actions,
initial_rnn_states)
step_count[0] += len(actions)
def create_rollout(self):
"""Generate a rollout."""
n_actions = self.env.n_actions
session = self.a3c._session
states = []
actions = []
rewards = []
values = []
# Generate the rollout.
for i in range(self.a3c.max_rollout_length):
if self.env.terminated:
break
state = self.env.state
states.append(state)
feed_dict = self.create_feed_dict(state)
results = session.run(
[self.action_prob.out_tensor, self.value.out_tensor] +
self.graph.rnn_final_states,
feed_dict=feed_dict)
probabilities, value = results[:2]
self.rnn_states = results[2:]
action = np.random.choice(np.arange(n_actions), p=probabilities[0])
actions.append(action)
values.append(float(value))
rewards.append(self.env.step(action))
# Compute an estimate of the reward for the rest of the episode.
if not self.env.terminated:
feed_dict = self.create_feed_dict(self.env.state)
final_value = self.a3c.discount_factor * float(
session.run(self.value.out_tensor, feed_dict))
else:
final_value = 0.0
values.append(final_value)
if self.env.terminated:
self.env.reset()
self.rnn_states = self.graph.rnn_zero_states
return states, actions, np.array(rewards), np.array(values)
def process_rollout(self, states, actions, rewards, values,
initial_rnn_states):
"""Train the network based on a rollout."""
# Compute the discounted rewards and advantages.
discounted_rewards = rewards.copy()
discounted_rewards[-1] += values[-1]
advantages = rewards - values[:-1] + self.a3c.discount_factor * np.array(
values[1:])
for j in range(len(rewards) - 1, 0, -1):
discounted_rewards[j -
1] += self.a3c.discount_factor * discounted_rewards[j]
advantages[
j -
1] += self.a3c.discount_factor * self.a3c.advantage_lambda * advantages[
j]
# Convert the actions to one-hot.
n_actions = self.env.n_actions
actions_matrix = []
for action in actions:
a = np.zeros(n_actions)
a[action] = 1.0
actions_matrix.append(a)
# Rearrange the states into the proper set of arrays.
if self.a3c._state_is_list:
state_arrays = [[] for i in range(len(self.features))]
for state in states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [states]
# Build the feed dict and apply gradients.
feed_dict = {}
for placeholder, value in zip(self.graph.rnn_initial_states,
initial_rnn_states):
feed_dict[placeholder] = value
for f, s in zip(self.features, state_arrays):
feed_dict[f.out_tensor] = s
feed_dict[self.rewards.out_tensor] = discounted_rewards
feed_dict[self.actions.out_tensor] = actions_matrix
feed_dict[self.advantages.out_tensor] = advantages
self.a3c._session.run(self.train_op, feed_dict=feed_dict)
def process_rollout_with_hindsight(self, states, actions, initial_rnn_states):
"""Create a new rollout by applying hindsight to an existing one, then train the network."""
hindsight_states, rewards = self.env.apply_hindsight(
states, actions, states[-1])
if self.a3c._state_is_list:
state_arrays = [[] for i in range(len(self.features))]
for state in hindsight_states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [hindsight_states]
feed_dict = {}
for placeholder, value in zip(self.graph.rnn_initial_states,
initial_rnn_states):
feed_dict[placeholder] = value
for f, s in zip(self.features, state_arrays):
feed_dict[f.out_tensor] = s
values = self.a3c._session.run(self.value.out_tensor, feed_dict=feed_dict)
values = np.append(values.flatten(), 0.0)
self.process_rollout(hindsight_states, actions,
np.array(rewards), np.array(values),
initial_rnn_states)
def create_feed_dict(self, state):
"""Create a feed dict for use during a rollout."""
if not self.a3c._state_is_list:
state = [state]
feed_dict = dict((f.out_tensor, np.expand_dims(s, axis=0))
for f, s in zip(self.features, state))
for (placeholder, value) in zip(self.graph.rnn_initial_states,
self.rnn_states):
feed_dict[placeholder] = value
return feed_dict
|
|
import logging
import os.path
import shutil
import socket
import salt.config
import salt.modules.network as network
import salt.utils.path
from salt._compat import ipaddress
from salt.exceptions import CommandExecutionError
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, mock_open, patch
from tests.support.unit import TestCase, skipIf
log = logging.getLogger(__name__)
class NetworkTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.network
"""
def setup_loader_modules(self):
opts = salt.config.DEFAULT_MINION_OPTS.copy()
utils = salt.loader.utils(
opts, whitelist=["network", "path", "platform", "stringutils"]
)
return {
network: {"__utils__": utils},
}
@patch("salt.utils.platform.is_windows")
def test___virtual__is_windows_true(self, mock_is_windows):
mock_is_windows.return_value = True
result = network.__virtual__()
expected = (
False,
"The network execution module cannot be loaded on Windows: use win_network"
" instead.",
)
self.assertEqual(result, expected)
@patch("salt.utils.platform.is_windows")
def test___virtual__is_windows_false(self, mock_is_windows):
mock_is_windows.return_value = False
result = network.__virtual__()
self.assertEqual(result, True)
def test_wol_bad_mac(self):
"""
tests network.wol with bad mac
"""
bad_mac = "31337"
self.assertRaises(ValueError, network.wol, bad_mac)
def test_wol_success(self):
"""
tests network.wol success
"""
mac = "080027136977"
bcast = "255.255.255.255 7"
class MockSocket:
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
pass
def setsockopt(self, *args, **kwargs):
pass
def sendto(self, *args, **kwargs):
pass
with patch("socket.socket", MockSocket):
self.assertTrue(network.wol(mac, bcast))
def test_ping(self):
"""
Test for Performs a ping to a host
"""
with patch.dict(
network.__utils__, {"network.sanitize_host": MagicMock(return_value="A")}
):
mock_all = MagicMock(side_effect=[{"retcode": 1}, {"retcode": 0}])
with patch.dict(network.__salt__, {"cmd.run_all": mock_all}):
self.assertFalse(network.ping("host", return_boolean=True))
self.assertTrue(network.ping("host", return_boolean=True))
with patch.dict(network.__salt__, {"cmd.run": MagicMock(return_value="A")}):
self.assertEqual(network.ping("host"), "A")
def test_netstat(self):
"""
Test for return information on open ports and states
"""
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.object(network, "_netstat_linux", return_value="A"):
with patch.object(network, "_ss_linux", return_value="A"):
self.assertEqual(network.netstat(), "A")
with patch.dict(network.__grains__, {"kernel": "OpenBSD"}):
with patch.object(network, "_netstat_bsd", return_value="A"):
self.assertEqual(network.netstat(), "A")
with patch.dict(network.__grains__, {"kernel": "A"}):
self.assertRaises(CommandExecutionError, network.netstat)
def test_active_tcp(self):
"""
Test for return a dict containing information on all
of the running TCP connections
"""
with patch.dict(
network.__utils__, {"network.active_tcp": MagicMock(return_value="A")}
):
with patch.dict(network.__grains__, {"kernel": "Linux"}):
self.assertEqual(network.active_tcp(), "A")
def test_traceroute(self):
"""
Test for Performs a traceroute to a 3rd party host
"""
def patched_which(binary):
binary_path = shutil.which(binary)
if binary_path:
# The path exists, just return it
return binary_path
if binary == "traceroute":
# The path doesn't exist but we mock it on the test.
# Return the binary name
return binary
# The binary does not exist
return binary_path
with patch("salt.utils.path.which", patched_which):
with patch.dict(network.__salt__, {"cmd.run": MagicMock(return_value="")}):
self.assertListEqual(network.traceroute("gentoo.org"), [])
with patch.dict(
network.__utils__,
{"network.sanitize_host": MagicMock(return_value="gentoo.org")},
):
with patch.dict(
network.__salt__, {"cmd.run": MagicMock(return_value="")}
):
self.assertListEqual(network.traceroute("gentoo.org"), [])
def test_dig(self):
"""
Test for Performs a DNS lookup with dig
"""
with patch("salt.utils.path.which", MagicMock(return_value="dig")), patch.dict(
network.__utils__, {"network.sanitize_host": MagicMock(return_value="A")}
), patch.dict(network.__salt__, {"cmd.run": MagicMock(return_value="A")}):
self.assertEqual(network.dig("host"), "A")
def test_arp(self):
"""
Test for return the arp table from the minion
"""
with patch.dict(
network.__salt__, {"cmd.run": MagicMock(return_value="A,B,C,D\nE,F,G,H\n")}
), patch("salt.utils.path.which", MagicMock(return_value="")):
self.assertDictEqual(network.arp(), {})
def test_interfaces(self):
"""
Test for return a dictionary of information about
all the interfaces on the minion
"""
with patch.dict(
network.__utils__, {"network.interfaces": MagicMock(return_value={})}
):
self.assertDictEqual(network.interfaces(), {})
def test_hw_addr(self):
"""
Test for return the hardware address (a.k.a. MAC address)
for a given interface
"""
with patch.dict(
network.__utils__, {"network.hw_addr": MagicMock(return_value={})}
):
self.assertDictEqual(network.hw_addr("iface"), {})
def test_interface(self):
"""
Test for return the inet address for a given interface
"""
with patch.dict(
network.__utils__, {"network.interface": MagicMock(return_value={})}
):
self.assertDictEqual(network.interface("iface"), {})
def test_interface_ip(self):
"""
Test for return the inet address for a given interface
"""
with patch.dict(
network.__utils__, {"network.interface_ip": MagicMock(return_value={})}
):
self.assertDictEqual(network.interface_ip("iface"), {})
def test_subnets(self):
"""
Test for returns a list of subnets to which the host belongs
"""
with patch.dict(
network.__utils__, {"network.subnets": MagicMock(return_value={})}
):
self.assertDictEqual(network.subnets(), {})
def test_in_subnet(self):
"""
Test for returns True if host is within specified
subnet, otherwise False.
"""
with patch.dict(
network.__utils__, {"network.in_subnet": MagicMock(return_value={})}
):
self.assertDictEqual(network.in_subnet("iface"), {})
def test_ip_addrs(self):
"""
Test for returns a list of IPv4 addresses assigned to the host.
"""
with patch.dict(
network.__utils__,
{
"network.ip_addrs": MagicMock(return_value=["0.0.0.0"]),
"network.in_subnet": MagicMock(return_value=True),
},
):
self.assertListEqual(
network.ip_addrs("interface", "include_loopback", "cidr"), ["0.0.0.0"]
)
self.assertListEqual(
network.ip_addrs("interface", "include_loopback"), ["0.0.0.0"]
)
def test_ip_addrs6(self):
"""
Test for returns a list of IPv6 addresses assigned to the host.
"""
with patch.dict(
network.__utils__, {"network.ip_addrs6": MagicMock(return_value=["A"])}
):
self.assertListEqual(network.ip_addrs6("int", "include"), ["A"])
def test_get_hostname(self):
"""
Test for Get hostname
"""
with patch.object(socket, "gethostname", return_value="A"):
self.assertEqual(network.get_hostname(), "A")
def test_mod_hostname(self):
"""
Test for Modify hostname
"""
self.assertFalse(network.mod_hostname(None))
file_d = "\n".join(["#", "A B C D,E,F G H"])
with patch.dict(
network.__utils__,
{
"path.which": MagicMock(return_value="hostname"),
"files.fopen": mock_open(read_data=file_d),
},
), patch.dict(
network.__salt__, {"cmd.run": MagicMock(return_value=None)}
), patch.dict(
network.__grains__, {"os_family": "A"}
):
self.assertTrue(network.mod_hostname("hostname"))
def test_mod_hostname_quoted(self):
"""
Test for correctly quoted hostname on rh-style distro
"""
fopen_mock = mock_open(
read_data={
"/etc/hosts": "\n".join(
["127.0.0.1 localhost.localdomain", "127.0.0.2 undef"]
),
"/etc/sysconfig/network": "\n".join(
["NETWORKING=yes", 'HOSTNAME="undef"']
),
}
)
with patch.dict(network.__grains__, {"os_family": "RedHat"}), patch.dict(
network.__salt__, {"cmd.run": MagicMock(return_value=None)}
), patch("socket.getfqdn", MagicMock(return_value="undef")), patch.dict(
network.__utils__,
{
"path.which": MagicMock(return_value="hostname"),
"files.fopen": fopen_mock,
},
):
self.assertTrue(network.mod_hostname("hostname"))
assert (
fopen_mock.filehandles["/etc/sysconfig/network"][1].write_calls[1]
== 'HOSTNAME="hostname"\n'
)
def test_mod_hostname_unquoted(self):
"""
Test for correctly unquoted hostname on rh-style distro
"""
fopen_mock = mock_open(
read_data={
"/etc/hosts": "\n".join(
["127.0.0.1 localhost.localdomain", "127.0.0.2 undef"]
),
"/etc/sysconfig/network": "\n".join(
["NETWORKING=yes", "HOSTNAME=undef"]
),
}
)
with patch.dict(network.__grains__, {"os_family": "RedHat"}), patch.dict(
network.__salt__, {"cmd.run": MagicMock(return_value=None)}
), patch("socket.getfqdn", MagicMock(return_value="undef")), patch.dict(
network.__utils__,
{
"path.which": MagicMock(return_value="hostname"),
"files.fopen": fopen_mock,
},
):
self.assertTrue(network.mod_hostname("hostname"))
assert (
fopen_mock.filehandles["/etc/sysconfig/network"][1].write_calls[1]
== "HOSTNAME=hostname\n"
)
def test_connect(self):
"""
Test for Test connectivity to a host using a particular
port from the minion.
"""
with patch("socket.socket") as mock_socket:
self.assertDictEqual(
network.connect(False, "port"),
{"comment": "Required argument, host, is missing.", "result": False},
)
self.assertDictEqual(
network.connect("host", False),
{"comment": "Required argument, port, is missing.", "result": False},
)
ret = "Unable to connect to host (0) on tcp port port"
mock_socket.side_effect = Exception("foo")
with patch.dict(
network.__utils__,
{"network.sanitize_host": MagicMock(return_value="A")},
):
with patch.object(
socket,
"getaddrinfo",
return_value=[["ipv4", "A", 6, "B", "0.0.0.0"]],
):
self.assertDictEqual(
network.connect("host", "port"),
{"comment": ret, "result": False},
)
ret = "Successfully connected to host (0) on tcp port port"
mock_socket.side_effect = MagicMock()
mock_socket.settimeout().return_value = None
mock_socket.connect().return_value = None
mock_socket.shutdown().return_value = None
with patch.dict(
network.__utils__,
{"network.sanitize_host": MagicMock(return_value="A")},
):
with patch.object(
socket,
"getaddrinfo",
return_value=[["ipv4", "A", 6, "B", "0.0.0.0"]],
):
self.assertDictEqual(
network.connect("host", "port"),
{"comment": ret, "result": True},
)
@skipIf(not bool(ipaddress), "unable to import 'ipaddress'")
def test_is_private(self):
"""
Test for Check if the given IP address is a private address
"""
with patch.object(ipaddress.IPv4Address, "is_private", return_value=True):
self.assertTrue(network.is_private("0.0.0.0"))
with patch.object(ipaddress.IPv6Address, "is_private", return_value=True):
self.assertTrue(network.is_private("::1"))
@skipIf(not bool(ipaddress), "unable to import 'ipaddress'")
def test_is_loopback(self):
"""
Test for Check if the given IP address is a loopback address
"""
with patch.object(ipaddress.IPv4Address, "is_loopback", return_value=True):
self.assertTrue(network.is_loopback("127.0.0.1"))
with patch.object(ipaddress.IPv6Address, "is_loopback", return_value=True):
self.assertTrue(network.is_loopback("::1"))
def test_get_bufsize(self):
"""
Test for return network buffer sizes as a dict
"""
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.object(os.path, "exists", return_value=True):
with patch.object(
network, "_get_bufsize_linux", return_value={"size": 1}
):
self.assertDictEqual(network.get_bufsize("iface"), {"size": 1})
with patch.dict(network.__grains__, {"kernel": "A"}):
self.assertDictEqual(network.get_bufsize("iface"), {})
def test_mod_bufsize(self):
"""
Test for Modify network interface buffers (currently linux only)
"""
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.object(os.path, "exists", return_value=True):
with patch.object(
network, "_mod_bufsize_linux", return_value={"size": 1}
):
self.assertDictEqual(network.mod_bufsize("iface"), {"size": 1})
with patch.dict(network.__grains__, {"kernel": "A"}):
self.assertFalse(network.mod_bufsize("iface"))
def test_routes(self):
"""
Test for return currently configured routes from routing table
"""
self.assertRaises(CommandExecutionError, network.routes, "family")
with patch.dict(network.__grains__, {"kernel": "A", "os": "B"}):
self.assertRaises(CommandExecutionError, network.routes, "inet")
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.object(
network,
"_netstat_route_linux",
side_effect=["A", [{"addr_family": "inet"}]],
):
with patch.object(
network,
"_ip_route_linux",
side_effect=["A", [{"addr_family": "inet"}]],
):
self.assertEqual(network.routes(None), "A")
self.assertListEqual(
network.routes("inet"), [{"addr_family": "inet"}]
)
def test_default_route(self):
"""
Test for return default route(s) from routing table
"""
self.assertRaises(CommandExecutionError, network.default_route, "family")
with patch.object(
network,
"routes",
side_effect=[[{"addr_family": "inet"}, {"destination": "A"}], []],
):
with patch.dict(network.__grains__, {"kernel": "A", "os": "B"}):
self.assertRaises(CommandExecutionError, network.default_route, "inet")
with patch.dict(network.__grains__, {"kernel": "Linux"}):
self.assertListEqual(network.default_route("inet"), [])
def test_default_route_ipv6(self):
"""
Test for return default route(s) from routing table for IPv6
Additionally tests that multicast, anycast, etc. do not throw errors
"""
mock_iproute_ipv4 = """default via 192.168.0.1 dev enx3c18a040229d proto dhcp metric 100
default via 192.168.0.1 dev wlp59s0 proto dhcp metric 600
3.15.90.221 via 10.16.119.224 dev gpd0
3.18.18.213 via 10.16.119.224 dev gpd0
10.0.0.0/8 via 10.16.119.224 dev gpd0
10.1.0.0/16 via 10.12.240.1 dev tun0
10.2.0.0/16 via 10.12.240.1 dev tun0
10.12.0.0/16 via 10.12.240.1 dev tun0
10.12.240.0/20 dev tun0 proto kernel scope link src 10.12.240.2
10.14.0.0/16 via 10.12.240.1 dev tun0
10.16.0.0/16 via 10.12.240.1 dev tun0
10.16.188.201 via 10.16.119.224 dev gpd0
10.16.188.202 via 10.16.119.224 dev gpd0
10.27.0.0/16 via 10.12.240.1 dev tun0
52.14.149.204 via 10.16.119.224 dev gpd0
52.14.159.171 via 10.16.119.224 dev gpd0
52.14.249.61 via 10.16.119.224 dev gpd0
52.15.65.251 via 10.16.119.224 dev gpd0
54.70.229.135 via 10.16.119.224 dev gpd0
54.71.37.253 via 10.12.240.1 dev tun0
54.189.240.227 via 10.16.119.224 dev gpd0
66.170.96.2 via 192.168.0.1 dev enx3c18a040229d
80.169.184.191 via 10.16.119.224 dev gpd0
107.154.251.105 via 10.16.119.224 dev gpd0
168.61.48.213 via 10.16.119.224 dev gpd0
169.254.0.0/16 dev enx3c18a040229d scope link metric 1000
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown
172.30.0.0/16 via 10.12.240.1 dev tun0
184.169.136.236 via 10.16.119.224 dev gpd0
191.237.22.167 via 10.16.119.224 dev gpd0
192.30.68.16 via 10.16.119.224 dev gpd0
192.30.71.16 via 10.16.119.224 dev gpd0
192.30.71.71 via 10.16.119.224 dev gpd0
192.168.0.0/24 dev enx3c18a040229d proto kernel scope link src 192.168.0.99 metric 100
192.168.0.0/24 dev wlp59s0 proto kernel scope link src 192.168.0.99 metric 600
192.240.157.233 via 10.16.119.224 dev gpd0
206.80.50.33 via 10.16.119.224 dev gpd0
209.34.94.97 via 10.16.119.224 dev gpd0
unreachable should ignore this
"""
mock_iproute_ipv6 = """::1 dev lo proto kernel metric 256 pref medium
2060:123:4069::10 dev enp5s0 proto kernel metric 100 pref medium
2060:123:4069::68 dev wlp3s0 proto kernel metric 600 pref medium
2060:123:4069::15:0/112 dev virbr0 proto kernel metric 256 pref medium
2060:123:4069::/64 dev enp5s0 proto ra metric 100 pref medium
2060:123:4069::/64 dev wlp3s0 proto ra metric 600 pref medium
2602:ae13:dc4:1b00::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:66:8300::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:a0:4d00::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:508:3900::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:513:a200::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:769:2b00::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:924:9700::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:9e1:6000::10:1 via fe80::222:15ff:fe3f:23fe dev enp5s0 proto static metric 100 pref medium
2602:ae14:9e1:6080::10:1 dev tun0 proto kernel metric 50 pref medium
2602:ae14:9e1:6080::10:1 dev tun0 proto kernel metric 256 pref medium
2602:ae14:9e1:6080::10:1001 dev tun0 proto kernel metric 50 pref medium
2602:ae14:9e1:6000::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:cc1:fa00::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:cd0:5b00::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2602:ae14:d5f:b400::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2a34:d014:1d3:5d00::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
2a34:d014:919:bb00::/56 via 2602:ae14:9e1:6080::10:1 dev tun0 proto static metric 50 pref medium
fd0d:3ed3:cb42:1::/64 dev enp5s0 proto ra metric 100 pref medium
fd0d:3ed3:cb42:1::/64 dev wlp3s0 proto ra metric 600 pref medium
fe80::222:15ff:fe3f:23fe dev enp5s0 proto static metric 100 pref medium
fe80::/64 dev enp5s0 proto kernel metric 100 pref medium
fe80::/64 dev virbr0 proto kernel metric 256 pref medium
fe80::/64 dev vnet2 proto kernel metric 256 pref medium
fe80::/64 dev docker0 proto kernel metric 256 linkdown pref medium
fe80::/64 dev vpn0 proto kernel metric 256 pref medium
fe80::/64 dev wlp3s0 proto kernel metric 600 pref medium
default via fe80::222:15ff:fe3f:23fe dev enp5s0 proto ra metric 100 pref medium
default via fe80::222:15ff:fe3f:23fe dev wlp3s0 proto ra metric 600 pref medium
local ::1 dev lo table local proto kernel metric 0 pref medium
anycast 2060:123:4069:: dev wlp3s0 table local proto kernel metric 0 pref medium
local 2060:123:4069::10 dev enp5s0 table local proto kernel metric 0 pref medium
local 2060:123:4069::68 dev wlp3s0 table local proto kernel metric 0 pref medium
anycast 2060:123:4069::15:0 dev virbr0 table local proto kernel metric 0 pref medium
local 2060:123:4069::15:1 dev virbr0 table local proto kernel metric 0 pref medium
local 2060:123:4069:0:f4d:7d09:358c:ce5 dev wlp3s0 table local proto kernel metric 0 pref medium
local 2060:123:4069:0:a089:c284:32a8:9536 dev enp5s0 table local proto kernel metric 0 pref medium
anycast 2602:ae14:9e1:6080::10:0 dev tun0 table local proto kernel metric 0 pref medium
local 2602:ae14:9e1:6080::10:1001 dev tun0 table local proto kernel metric 0 pref medium
anycast fd0d:3ed3:cb42:1:: dev wlp3s0 table local proto kernel metric 0 pref medium
local fd0d:3ed3:cb42:1:cffd:9b03:c50:6d2a dev wlp3s0 table local proto kernel metric 0 pref medium
local fd0d:3ed3:cb42:1:f00b:50ef:2143:36cf dev enp5s0 table local proto kernel metric 0 pref medium
anycast fe80:: dev virbr0 table local proto kernel metric 0 pref medium
anycast fe80:: dev vnet2 table local proto kernel metric 0 pref medium
anycast fe80:: dev docker0 table local proto kernel metric 0 pref medium
anycast fe80:: dev wlp3s0 table local proto kernel metric 0 pref medium
anycast fe80:: dev vpn0 table local proto kernel metric 0 pref medium
local fe80::42:bfff:fec9:f590 dev docker0 table local proto kernel metric 0 pref medium
local fe80::18b1:cf8e:49cc:a783 dev wlp3s0 table local proto kernel metric 0 pref medium
local fe80::5054:ff:fe55:9457 dev virbr0 table local proto kernel metric 0 pref medium
local fe80::d251:c2a7:f5c8:2778 dev enp5s0 table local proto kernel metric 0 pref medium
local fe80::df35:e22c:f7db:a892 dev vpn0 table local proto kernel metric 0 pref medium
local fe80::fc54:ff:fee6:9fef dev vnet2 table local proto kernel metric 0 pref medium
multicast ff00::/8 dev enp5s0 table local proto kernel metric 256 pref medium
multicast ff00::/8 dev virbr0 table local proto kernel metric 256 pref medium
multicast ff00::/8 dev vnet2 table local proto kernel metric 256 pref medium
multicast ff00::/8 dev docker0 table local proto kernel metric 256 linkdown pref medium
multicast ff00::/8 dev wlp3s0 table local proto kernel metric 256 pref medium
multicast ff00::/8 dev vpn0 table local proto kernel metric 256 pref medium
multicast ff00::/8 dev tun0 table local proto kernel metric 256 pref medium
unicast should ignore this
broadcast cast should ignore this
throw should ignore this
unreachable should ignore this
prohibit should ignore this
blackhole should ignore this
nat should ignore this
"""
self.assertRaises(CommandExecutionError, network.default_route, "family")
with patch.object(
network,
"routes",
side_effect=[[{"family": "inet6"}, {"destination": "A"}], []],
):
with patch.dict(network.__grains__, {"kernel": "A", "os": "B"}):
self.assertRaises(CommandExecutionError, network.default_route, "inet6")
cmd_mock = MagicMock(side_effect=[mock_iproute_ipv4, mock_iproute_ipv6])
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.dict(
network.__utils__, {"path.which": MagicMock(return_value=False)}
):
with patch.dict(network.__salt__, {"cmd.run": cmd_mock}):
self.assertListEqual(
network.default_route("inet6"),
[
{
"addr_family": "inet6",
"destination": "::/0",
"gateway": "fe80::222:15ff:fe3f:23fe",
"netmask": "",
"flags": "UG",
"interface": "enp5s0",
},
{
"addr_family": "inet6",
"destination": "::/0",
"gateway": "fe80::222:15ff:fe3f:23fe",
"netmask": "",
"flags": "UG",
"interface": "wlp3s0",
},
],
)
def test_get_route(self):
"""
Test for return output from get_route
"""
mock_iproute = MagicMock(
return_value="8.8.8.8 via 10.10.10.1 dev eth0 src 10.10.10.10 uid 0\ncache"
)
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.dict(network.__salt__, {"cmd.run": mock_iproute}):
expected = {
"interface": "eth0",
"source": "10.10.10.10",
"destination": "8.8.8.8",
"gateway": "10.10.10.1",
}
ret = network.get_route("8.8.8.8")
self.assertEqual(ret, expected)
mock_iproute = MagicMock(
return_value=(
"8.8.8.8 via 10.10.10.1 dev eth0.1 src 10.10.10.10 uid 0\ncache"
)
)
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.dict(network.__salt__, {"cmd.run": mock_iproute}):
expected = {
"interface": "eth0.1",
"source": "10.10.10.10",
"destination": "8.8.8.8",
"gateway": "10.10.10.1",
}
ret = network.get_route("8.8.8.8")
self.assertEqual(ret, expected)
mock_iproute = MagicMock(
return_value=(
"8.8.8.8 via 10.10.10.1 dev eth0:1 src 10.10.10.10 uid 0\ncache"
)
)
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.dict(network.__salt__, {"cmd.run": mock_iproute}):
expected = {
"interface": "eth0:1",
"source": "10.10.10.10",
"destination": "8.8.8.8",
"gateway": "10.10.10.1",
}
ret = network.get_route("8.8.8.8")
self.assertEqual(ret, expected)
mock_iproute = MagicMock(
return_value=(
"8.8.8.8 via 10.10.10.1 dev lan-br0 src 10.10.10.10 uid 0\ncache"
)
)
with patch.dict(network.__grains__, {"kernel": "Linux"}):
with patch.dict(network.__salt__, {"cmd.run": mock_iproute}):
expected = {
"interface": "lan-br0",
"source": "10.10.10.10",
"destination": "8.8.8.8",
"gateway": "10.10.10.1",
}
ret = network.get_route("8.8.8.8")
self.assertEqual(ret, expected)
|
|
from __future__ import unicode_literals
import datetime
import uuid
from copy import deepcopy
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, models, transaction
from django.db.models import TimeField, UUIDField
from django.db.models.aggregates import (
Avg, Count, Max, Min, StdDev, Sum, Variance,
)
from django.db.models.expressions import (
Case, Col, Date, DateTime, ExpressionWrapper, F, Func, OrderBy, Random,
RawSQL, Ref, Value, When,
)
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from django.utils.timezone import utc
from .models import UUID, Company, Employee, Experiment, Number, Time
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann", salary=30)
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
def test_annotate_values_aggregate(self):
companies = Company.objects.annotate(
salaries=F('ceo__salary'),
).values('num_employees', 'salaries').aggregate(
result=Sum(
F('salaries') + F('num_employees'),
output_field=models.IntegerField()
),
)
self.assertEqual(companies['result'], 2395)
def test_annotate_values_filter(self):
companies = Company.objects.annotate(
foo=RawSQL('%s', ['value']),
).filter(foo='value').order_by('name')
self.assertQuerysetEqual(
companies, [
'<Company: Example Inc.>',
'<Company: Foobar Ltd.>',
'<Company: Test GmbH>',
],
)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
self.company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_order_of_operations(self):
# Law of order of operations is followed
self. company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: six.text_type(c.point_of_contact),
ordered=False
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [
None,
None,
],
lambda n: n.float,
ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F('ceo'))
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
with transaction.atomic():
with self.assertRaises(FieldError):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
def test_new_object_save(self):
# We should be able to use Funcs when inserting new data
test_co = Company(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
test_co.save()
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_new_object_create(self):
test_co = Company.objects.create(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_object_create_with_aggregate(self):
# Aggregates are not allowed when inserting new data
with self.assertRaisesMessage(FieldError, 'Aggregate functions are not allowed in this query'):
Company.objects.create(
name='Company', num_employees=Max(Value(1)), num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(name="Test GmbH")
def test():
test_gmbh.point_of_contact = F("ceo")
with self.assertRaises(ValueError):
test()
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
with self.assertRaises(FieldError):
test_gmbh.save()
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
test_gmbh = Company.objects.get(name="Test GmbH")
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
msg = (
'Failed to insert expression "Col(expressions_company, '
'expressions.Company.num_employees) + Value(16)" on '
'expressions.Company.num_employees. F() expressions can only be '
'used to update, not to insert.'
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
acme.num_employees = 12
acme.name = Lower(F('name'))
msg = (
'Failed to insert expression "Lower(Col(expressions_company, '
'expressions.Company.name))" on expressions.Company.name. F() '
'expressions can only be used to update, not to insert.'
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
@skipIfDBFeature('has_case_insensitive_like')
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertQuerysetEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2], lambda x: x)
self.assertQuerysetEqual(
Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk'),
[e2, e3], lambda x: x)
def test_ticket_18375_join_reuse(self):
# Test that reverse multijoin F() references and the lookup target
# the same join. Pre #18375 the F() join was generated first, and the
# lookup couldn't reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# Test that F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
class ExpressionsTests(TestCase):
def test_F_object_deepcopy(self):
"""
Make sure F objects can be deepcopied (#23492)
"""
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_f_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="John"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__contains=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__startswith=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__endswith=F('lastname')),
["<Employee: Jean-Claude Claude>"],
ordered=False)
def test_insensitive_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="john"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__icontains=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__istartswith=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__iendswith=F('lastname')),
["<Employee: Jean-Claude claude>"],
ordered=False)
class ExpressionsNumericTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
def test_incorrect_field_expression(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword u?'nope' into field.*"):
list(Employee.objects.filter(firstname=F('nope')))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
cls.deltas = []
cls.delays = []
cls.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(
name='e0', assigned=sday, start=stime, end=end,
completed=end.date(), estimated_time=delta0,
)
cls.deltas.append(delta0)
cls.delays.append(e0.start - datetime.datetime.combine(e0.assigned, midnight))
cls.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(
name='e1', assigned=sday, start=stime + delay, end=end,
completed=end.date(), estimated_time=delta1,
)
cls.deltas.append(delta1)
cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight))
cls.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(
name='e2', assigned=sday - datetime.timedelta(3), start=stime,
end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1),
)
cls.deltas.append(delta2)
cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight))
cls.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(
name='e3', assigned=sday, start=stime + delay, end=end,
completed=end.date(), estimated_time=delta3,
)
cls.deltas.append(delta3)
cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight))
cls.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(
name='e4', assigned=sday - datetime.timedelta(10), start=stime,
end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1),
)
cls.deltas.append(delta4)
cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight))
cls.days_long.append(e4.completed - e4.assigned)
cls.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1))
]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))]
self.assertEqual(zeros, ['e0'])
end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
self.assertEqual(end_less, ['e2'])
delta_math = [
e.name for e in
Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))
]
self.assertEqual(delta_math, ['e4'])
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_date_subtraction(self):
queryset = Experiment.objects.annotate(
completion_duration=ExpressionWrapper(
F('completed') - F('assigned'), output_field=models.DurationField()
)
)
at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))}
self.assertEqual(at_least_5_days, {'e3', 'e4'})
less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))}
expected = {'e0', 'e2'}
if connection.features.supports_microsecond_precision:
expected.add('e1')
self.assertEqual(less_than_5_days, expected)
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_time_subtraction(self):
if connection.features.supports_microsecond_precision:
time = datetime.time(12, 30, 15, 2345)
timedelta = datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345)
else:
time = datetime.time(12, 30, 15)
timedelta = datetime.timedelta(hours=1, minutes=15, seconds=15)
Time.objects.create(time=time)
queryset = Time.objects.annotate(
difference=ExpressionWrapper(
F('time') - Value(datetime.time(11, 15, 0), output_field=models.TimeField()),
output_field=models.DurationField(),
)
)
self.assertEqual(queryset.get().difference, timedelta)
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_datetime_subtraction(self):
under_estimate = [
e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))
]
self.assertEqual(under_estimate, ['e2'])
over_estimate = [
e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))
]
self.assertEqual(over_estimate, ['e4'])
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = Experiment.objects.exclude(name='e1').filter(
completed__gt=self.stime + F('estimated_time'),
).order_by('name')
self.assertQuerysetEqual(over_estimate, ['e3', 'e4'], lambda e: e.name)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
class ReprTests(TestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>"
)
self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
self.assertEqual(repr(Date('published', 'exact')), "Date(published, exact)")
self.assertEqual(repr(DateTime('published', 'exact', utc)), "DateTime(published, exact, %s)" % utc)
self.assertEqual(repr(F('published')), "F(published)")
self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>")
self.assertEqual(
repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))"
)
self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
self.assertEqual(repr(Random()), "Random()")
self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
self.assertEqual(repr(Value(1)), "Value(1)")
def test_functions(self):
self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length('a')), "Length(F(a))")
self.assertEqual(repr(Lower('a')), "Lower(F(a))")
self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper('a')), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg('a')), "Avg(F(a))")
self.assertEqual(repr(Count('a')), "Count(F(a), distinct=False)")
self.assertEqual(repr(Count('*')), "Count('*', distinct=False)")
self.assertEqual(repr(Max('a')), "Max(F(a))")
self.assertEqual(repr(Min('a')), "Min(F(a))")
self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum('a')), "Sum(F(a))")
self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 5 18:11:33 2016
@author: johnlewisiii
"""
import math
import os
import statistics
import sys
from importlib import reload
import emcee
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astropy import constants as constants
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import Table
from astropy.wcs import WCS
from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
from scipy import integrate, interpolate, ndimage, signal, special, stats
from weighted import quantile
nd = ndimage
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def nice_pandas(format="{:3.3g}"):
pd.set_option("display.float_format", lambda x: format.format(x))
#############################
#############################
#### Plotting commands ####
#############################
#############################
# Set uniform plot options
# some constants
fwhm = 2 * np.sqrt(2 * np.log(2))
def set_plot_opts(serif_fonts=True):
if serif_fonts:
mpl.rcParams["mathtext.fontset"] = "stix"
mpl.rcParams["font.family"] = "serif"
mpl.rcParams["font.size"] = 14
return None
def check_iterable(arr):
return hasattr(arr, "__iter__")
def color_array(arr, alpha=1):
""" take an array of colors and convert to
an RGBA image that can be displayed
with imshow
"""
img = np.zeros(arr.shape + (4,))
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
c = mpl.colors.to_rgb(arr[row, col])
img[row, col, 0:3] = c
img[row, col, 3] = alpha
return img
def arr_to_rgb(arr, rgb=(0, 0, 0), alpha=1, invert=False, ax=None):
"""
arr to be made a mask
rgb:assumed using floats (0..1,0..1,0..1) or string
"""
# arr should be scaled to 1
img = np.asarray(arr, dtype=np.float64)
img = img - np.nanmin(img)
img = img / np.nanmax(img)
im2 = np.zeros(img.shape + (4,))
if isinstance(rgb, str):
rgb = mpl.colors.to_rgb(rgb)
if invert:
img = 1 - img
im2[:, :, 3] = img * alpha
r, g, b = rgb
im2[:, :, 0] = r
im2[:, :, 1] = g
im2[:, :, 2] = b
# if ax is None:
# ax = plt.gca()
# plt.sca(ax)
# plt.imshow(im2)
return im2
def invert_color(ml, *args, **kwargs):
rgb = mpl.colors.to_rgb(ml)
hsv = mpl.colors.rgb_to_hsv(rgb)
h, s, v = hsv
h = 1 - h
s = 1 - s
v = 1 - v
return mpl.colors.to_hex(mpl.colors.hsv_to_rgb((h, s, v)))
def icol(*args, **kwargs):
return invert_color(*args, **kwargs)
def get_xylim(ax=None):
if ax is None:
ax = plt.gca()
xlim, ylim = ax.get_xlim(), ax.get_ylim()
return xlim, ylim
def set_xylim(xlim=None, ylim=None, ax=None, origin=None):
"""set xylims with tuples
xlim: tuple of x axis limits
ylim: tuple of y axis limits
origin: sometimes you just want to change the origin
so you can keep the axis limits the same
but just change origin
"""
if ax is None:
ax = plt.gca()
if xlim is None:
xlim = ax.get_xlim()
if ylim is None:
ylim = ax.get_ylim()
if isinstance(xlim, tuple):
xlim = list(xlim)
if isinstance(ylim, tuple):
ylim = list(ylim)
if origin is not None:
if origin is True:
if ax.get_xaxis().get_scale()[:3] != "log":
xlim[0] = 0
if ax.get_yaxis().get_scale()[:3] != "log":
ylim[0] = 0
else:
xlim[0] = origin[0]
ylim[0] = origin[1]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return tuple(xlim), tuple(ylim)
def get_cax(ax=None, position="right", frac=0.03, pad=0.05):
"""get a colorbar axes of the same height as current axes
position: "left" "right" ( vertical | )
"top" "bottom" (horizontal --- )
"""
if ax is None:
ax = plt.gca()
size = f"{frac*100}%"
divider = make_axes_locatable(ax)
cax = divider.append_axes(position, size=size, pad=pad)
plt.sca(ax)
return cax
def colorbar(mappable=None, cax=None, ax=None, size=0.03, pad=0.05, **kw):
"""wrapper for pyplot.colorbar.
"""
if ax is None:
ax = plt.gca()
if cax is None:
cax = get_cax(ax=ax, frac=size, pad=pad)
ret = plt.colorbar(mappable, cax=cax, ax=ax, **kw)
return ret
# Plot the KDE for a set of x,y values. No weighting code modified from
# http://stackoverflow.com/questions/30145957/plotting-2d-kernel-density-estimation-with-python
def kdeplot(xp, yp, filled=False, ax=None, grid=None, bw=None, *args, **kwargs):
if ax is None:
ax = plt.gca()
rvs = np.append(xp.reshape((xp.shape[0], 1)), yp.reshape((yp.shape[0], 1)), axis=1)
kde = stats.kde.gaussian_kde(rvs.T)
# kde.covariance_factor = lambda: 0.3
# kde._compute_covariance()
kde.set_bandwidth(bw)
# Regular grid to evaluate kde upon
if grid is None:
x_flat = np.r_[rvs[:, 0].min() : rvs[:, 0].max() : 256j]
y_flat = np.r_[rvs[:, 1].min() : rvs[:, 1].max() : 256j]
else:
x_flat = np.r_[0 : grid[0] : complex(0, grid[0])]
y_flat = np.r_[0 : grid[1] : complex(0, grid[1])]
x, y = np.meshgrid(x_flat, y_flat)
grid_coords = np.append(x.reshape(-1, 1), y.reshape(-1, 1), axis=1)
z = kde(grid_coords.T)
z = z.reshape(x.shape[0], x.shape[1])
if filled:
cont = ax.contourf
else:
cont = ax.contour
cs = cont(x_flat, y_flat, z, *args, **kwargs)
return cs
def wcsaxis(header, N=6, ax=None, fmt="%0.2f", use_axes=False,label=True):
oldax = plt.gca()
if ax is None:
ax = plt.gca()
plt.sca(ax)
xlim = ax.axes.get_xlim()
ylim = ax.axes.get_ylim()
wcs = WCS(header)
naxis = header["NAXIS"] # naxis
naxis1 = header["NAXIS1"] # naxis1
naxis2 = header["NAXIS2"] # naxis2
# crpix1 = hdr['CRPIX1']
# crpix2 = hdr['CRPIX2']
# crval1 = hdr['CRVAL1']
# crval2 = hdr['CRVAL2']
# try:
# cdelt1 = wcs['CDELT1']
# cdelt2 = wcs['CDELT2']
# except BaseException:
# cdelt1 = wcs['CD1_1']
# cdelt2 = wcs['CD2_2']
if not use_axes:
xoffset = ((xlim[1] - xlim[0]) / N) / 5
x = np.linspace(xlim[0] + xoffset, xlim[1] - xoffset, N)
if naxis >= 2:
yoffset = ((ylim[1] - ylim[0]) / N) / 5
y = np.linspace(ylim[0] + yoffset, ylim[1] - yoffset, N)
else:
x = ax.get_xticks()
if naxis >= 2:
y = ax.get_yticks()
if naxis == 1:
x_tick = wcs.all_pix2world(x, 0)
elif naxis == 2:
coord = list(zip(x, y))
x_tick, y_tick = wcs.all_pix2world(coord, 0).T
elif naxis > 2:
c = [x, y]
for i in range(naxis - 2):
c.append([0] * N)
coord = list(zip(*c))
ticks = wcs.all_pix2world(coord, 0)
x_tick, y_tick = np.asarray(ticks)[:, :2].T
plt.xticks(x, [fmt % i for i in x_tick])
plt.yticks(y, [fmt % i for i in y_tick])
if label:
if header["CTYPE1"][0].lower() == "g":
plt.xlabel("Galactic Longitude (l)")
plt.ylabel("Galactic Latitude (b)")
else:
plt.xlabel("Right Ascension (J2000)")
plt.ylabel("Declination (J2000)")
ax.axes.set_xlim(xlim[0], xlim[1])
ax.axes.set_ylim(ylim[0], ylim[1])
plt.sca(oldax)
return ax
def rectangle(c, w, h, angle=0, center=True):
"""
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepts centers
Default : center
options for center: tl, tr, bl, br
"""
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2.0, +w / 2.0, +w / 2.0, -w / 2.0
y = +h / 2.0, +h / 2.0, -h / 2.0, -h / 2.0
# correct the center if starting from corner
if center is not True:
if center[0] == "b":
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.0
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.0
if center[1] == "l":
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.0
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.0
R = rot_matrix(angle * np.pi / 180.0)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def rectangle2(c, w, h, angle=0, center=True):
"""
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepts centers
Default : center
options for center: tl, tr, bl, br
"""
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2.0, +w / 2.0, +w / 2.0, -w / 2.0
y = +h / 2.0, +h / 2.0, -h / 2.0, -h / 2.0
# correct center if starting from corner
if center is not True:
if center[0] == "b":
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.0
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.0
if center[1] == "l":
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.0
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.0
R = rot_matrix(angle * np.pi / 180.0)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return np.array([c[0], c[1], c[2], c[3], c[0]]).T
def plot_rectangle(c, w, h, angle=0, center=True, ax=None, n=10, m="-", **plot_kwargs):
if False: # center is True:
print("Hey, did you know this is built into matplotlib")
print(
"Yeah, just do ax.add_patch(plt.Rectangle(xy=(cx,cy),height=h, width=w, angle=deg))"
)
print(
"of course this one will work even if grid is not rectilinear and can use points"
)
print("defined w.r.t. a corner")
if ax is None:
ax = plt.gca()
x, y = rectangle2(c, w, h, angle=angle, center=center)
ax.plot(x, y, **plot_kwargs)
n = n * 1j
# interpolate each linear segment
leg1 = np.r_[x[0] : x[1] : n], np.r_[y[0] : y[1] : n]
leg2 = np.r_[x[1] : x[2] : n], np.r_[y[1] : y[2] : n]
leg3 = np.r_[x[2] : x[3] : n], np.r_[y[2] : y[3] : n]
leg4 = np.r_[x[3] : x[4] : n], np.r_[y[3] : y[4] : n]
ax.plot(*leg1, m, *leg2, m, *leg3, m, *leg4, m, **plot_kwargs)
return ax
def color_hue_shift(c, shift=1):
c = mpl.colors.to_rgb(c)
h, s, v = mpl.colors.rgb_to_hsv(c)
h = h + shift % 1
return mpl.colors.to_hex(mpl.colors.hsv_to_rgb((h, s, v)))
def plot_covariances(p, cov, names=None, figsize=(12, 12), nsamps=5000, smooth=1):
p = np.random.multivariate_normal(p, cov, nsamps)
fig, axs = corner(p, smooth=smooth, names=names, figsize=figsize)
return fig, axs
def plot_astropy_fit_covariances(fit, fitter):
p = fit.parameters
cov = fitter.fit_info["param_cov"]
ax = plot_covariances(p, cov, names=fit.param_names)
return ax
def plot_walkers(sampler, limits=None, bad=None):
"""
sampler : emcee Sampler class
"""
if hasattr(sampler, "__getitem__"):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
for w, walk in enumerate(chain[:, limits:, :]):
if bad is None:
color = "k"
elif bad[w]:
color = "r"
else:
color = "k"
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=0.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
# TODO
# Make it scale properly
# How does matplotlib
# scaling work
def combine_cmap(cmaps, lower, upper, name="custom", N=None, register=True):
n = len(cmaps)
for ic, c in enumerate(cmaps):
if isinstance(c, str):
cmaps[ic] = mpl.cm.get_cmap(c)
if N is None:
N = [256] * n
values = np.array([])
colors = np.empty((0, 4))
for i in range(n):
step = (upper[i] - lower[i]) / N[i]
xcols = np.arange(lower[i], upper[i], step)
values = np.append(values, xcols)
xcols -= xcols.min()
xcols /= xcols.max()
cols = cmaps[i](xcols)
colors = np.vstack([colors, cols])
values -= values.min()
values /= values.max()
arr = list(zip(values, colors))
cmap = mpl.colors.LinearSegmentedColormap.from_list(name, arr)
if (name != "custom") & register:
mpl.cm.register_cmap(name=name, cmap=cmap)
return cmap
def custom_cmap(colormaps, lower, upper, log=(0, 0)):
"""
colormaps : a list of N matplotlib colormap classes
lower : the lower limits for each colormap: array or tuple
upper : the upper limits for each colormap: array or tuple
log : Do you want to plot logscale. This will create
a color map that is usable with LogNorm()
"""
if isinstance(log, tuple):
for lg in log:
if lg:
upper = [np.log10(i / lower[0]) for i in upper]
lower = [np.log10(i / lower[0]) for i in lower]
norm = upper[-1:][0]
else:
lower = lower
upper = upper
norm = upper[-1:][0]
elif log:
upper = [np.log10(i / lower[0]) for i in upper]
lower = [np.log10(i / lower[0]) for i in lower]
norm = upper[-1:][0]
else:
lower = lower
upper = upper
norm = upper[-1:][0]
for ic, c in enumerate(colormaps):
if isinstance(c, str):
colormaps[ic] = mpl.cm.get_cmap(c)
cdict = {"red": [], "green": [], "blue": []}
for color in ["red", "green", "blue"]:
for j, col in enumerate(colormaps):
# print j,col.name,color
x = [i[0] for i in col._segmentdata[color]]
y1 = [i[1] for i in col._segmentdata[color]]
y0 = [i[2] for i in col._segmentdata[color]]
x = [(i - min(x)) / (max(x) - min(x)) for i in x]
x = [((i * (upper[j] - lower[j])) + lower[j]) / norm for i in x]
if (j == 0) & (x[0] != 0):
x[:0], y1[:0], y0[:0] = [0], [y1[0]], [y0[0]]
for i in range(len(x)): # first x needs to be zero
cdict[color].append((x[i], y1[i], y0[i]))
return colors.LinearSegmentedColormap("my_cmap", cdict)
def cmap_split(*args, **kwargs):
"""alias for split_cmap"""
return split_cmap(*args, **kwargs)
def split_cmap(cmapn='viridis',split=0.5,vmin=0, vmaxs=(.5,1),vstep=None,
vsplit=None,log=False):
"""
split a colormap at a certain location
split - where along the colormap will be our split point
by default this split point is put in the middle
of the values
vmin value for colorbar to start at: should max vim in
plotting command
vmaxs (splitvalue,vmax) - where to start the second segment
of the color map. cmap(split) will be located
at valeu=splitvalue
vplit = instead of giving vmin,vmax,a you can split it at a
value between 0,1.
log doesn't do what anyone would think, don't recommend using
"""
if vsplit is not None:
vmin=0
vmaxs=(vsplit,1)
vmin1 = vmin
vmax1 = vmaxs[0]
vmin2 = vmax1
vmax2 = vmaxs[1]
if vstep is None:
vstep= (vmax2 - vmin1)/1024
levels1 = np.arange(vmin1, vmax1+vstep, vstep)
levels2 = np.arange(vmin2, vmax2+vstep, vstep)
ncols1 = len(levels1)-1
#ncols1 = int((vmax1-vmin1)//vstep)
ncols2 = len(levels2)-1
# ncols1 = int((vmax1-vmin1)//vstep)+1
# ncols2 = int((vmax2-vmin2)//vstep)+1
# ncols = ncols1 + ncols2
split = split
# Sample the right number of colours
# from the right bits (between 0 & 1) of the colormaps we want.
cmap2 = mpl.cm.get_cmap(cmapn)
if log:
cmap1 = mpl.cm.get_cmap(cmapn+'_r')
cols1 = cmap1(np.logspace(np.log10(1-split),0, ncols1))[::-1]
cols2 = cmap2(np.logspace(np.log10(split), 0, ncols2))
else:
cols1 = cmap2(np.linspace(0.0, split, ncols1))
cols2 = cmap2(np.linspace(split, 1, ncols2))
#cols2 = cmap2(np.logspace(np.log10(split), 0, ncols2))
# Combine them and build a new colormap:
allcols2 = np.vstack( (cols1,cols2) )
return mpl.colors.LinearSegmentedColormap.from_list('piecewise2', allcols2)
def plot_2dhist(
X,
Y,
xlog=True,
ylog=True,
cmap=None,
norm=mpl.colors.LogNorm(),
vmin=None,
vmax=None,
bins=50,
statistic=np.nanmean,
statstd=np.nanstd,
histbins=None,
histrange=None,
cmin=1,
binbins=None,
weighted_fit=True,
ax=None,
plot_bins=True,
plot_fit=True,
):
"""[plot the 2d hist and x-binned version]
Arguments:
X {array} -- array of x-values
Y {array} -- array of y-values
Keyword Arguments:
xlog {bool} -- use log of X (default: {True})
ylog {bool} -- use log of Y (default: {True})
cmap {[type]} -- cmap for histogram (default: {None})
norm {[type]} -- normalization for histogram cmap (default: {mpl.colors.LogNorm()})
vmin {number} -- min val for cmap (default: {None})
vmax {number} -- max val for cmap (default: {None})
bins {int} -- number of bins for hist2d (default: {50})
statistic {function} -- statistic function (default: {np.nanmean})
statstd {function} -- error stat function (default: {np.nanstd})
histbins {[type]} -- bins for hisogram (default: {None})
histrange {(xmin,xmax),(ymin,ymax)} -- range for histogram (default: {None})
cmin {int} -- [description] (default: {1})
binbins {[type]} -- [description] (default: {None})
weighted_fit {bool} -- [description] (default: {True})
ax {[type]} -- [description] (default: {None})
plot_bins {bool} -- [description] (default: {True})
plot_fit {bool} -- [description] (default: {True})
Returns:
[tuple] -- [x, y, p, ax]
Notes:
this uses mavg from this file. if it is not available, please change
"""
if ax is None:
ax = plt.gca()
if xlog:
x = np.log10(X)
else:
x = np.asarray(X)
if ylog:
y = np.log10(Y)
else:
y = np.asarray(Y)
_ = ax.hist2d(
x,
y,
range=histrange,
bins=histbins,
cmap=cmap,
cmin=cmin,
norm=norm,
vmin=vmin,
vmax=vmax,
zorder=1,
)
# bin the data
if binbins is None:
binbins = np.linspace(np.nanmin(x), np.nanmax(x), 10)
st, be, _ = stats.binned_statistic(x, y, statistic=statistic, bins=binbins)
est, be, _ = stats.binned_statistic(x, y, statistic=statstd, bins=binbins)
cl = np.isfinite(st) & np.isfinite(est)
if plot_bins:
ax.errorbar(
mavg(be)[cl],
st[cl],
yerr=est[cl],
fmt="s",
color="r",
label="binned data",
lw=1.5,
zorder=2,
)
if weighted_fit:
p = np.polyfit(mavg(be)[cl][1:], st[cl][1:], 1, w=1 / est[cl][1:] ** 2)
else:
p = np.polyfit(mavg(be)[cl][1:], st[cl][1:], 1)
funcname = "Best fit: {m:0.5G}*x + {b:0.5G}".format(m=p[0], b=p[1])
if plot_fit:
ax.plot([0, 64], np.polyval(p, [0, 64]), "dodgerblue", lw=1.5, label=funcname)
ax.legend()
return x, y, p, ax
def hist2d(
x,
y,
range=None,
bins=20,
smooth=False,
clip=False,
pad=True,
normed=True,
weights=None,
):
g = np.isfinite(x + y)
x = np.array(x)[g]
y = np.array(y)[g]
if bins is not None:
if range is None:
if isinstance(bins, int) or (bins == "auto"):
xedges = np.histogram_bin_edges(x, bins=bins)
yedges = np.histogram_bin_edges(y, bins=bins)
elif check_iterable(bins) & (len(bins) == 2):
xedges = np.histogram_bin_edges(x, bins=bins[0])
yedges = np.histogram_bin_edges(y, bins=bins[1])
bins = [xedges, yedges]
else:
if (len(range)==2) & (len(range[0])==2):
xedges = np.histogram_bin_edges(x, bins=bins, range=range[0])
yedges = np.histogram_bin_edges(y, bins=bins, range=range[1])
else:
xedges = np.histogram_bin_edges(x, bins=bins, range=range)
yedges = np.histogram_bin_edges(y, bins=bins, range=range)
bins = [xedges, yedges]
elif range is None:
xedges = np.histogram_bin_edges(x, bins=bins)
yedges = np.histogram_bin_edges(y, bins=bins)
bins = [xedges, yedges]
range = None
else:
range = list(map(np.sort, range))
H, X, Y = np.histogram2d(x, y, bins=bins, range=range, weights=weights)
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
if pad:
padn = np.max([2, int(smooth * 2 // 1)])
H, X1, Y1 = extend_hist(H, X1, Y1, fill=0, padn=padn)
if smooth:
if clip:
oldH = H == 0
H = nd.gaussian_filter(H, smooth)
if normed:
sm = data2norm(H)
else:
sm = H
return sm.T, X1, Y1
def clean_color(color, reverse=False):
if isinstance(color, str):
if color[-2:] == "_r":
return color[:-2], True
elif reverse is True:
return color, True
else:
return color, False
else:
return color, reverse
def color_cmap(c, alpha=1, to_white=True, reverse=False):
if to_white:
end = (1, 1, 1, alpha)
else:
end = (0, 0, 0, alpha)
color, reverse = clean_color(c, reverse=reverse)
cmap = mpl.colors.LinearSegmentedColormap.from_list("density_cmap", [color, end])
if reverse:
return cmap.reversed()
else:
return cmap
def contour_level_colors(cmap, levels, vmin=None, vmax=None, center=True):
"""get colors corresponding to those produced by contourf
Arguments:
cmap {string or cmap} -- colormap
levels {list or array} -- desired levels
Keyword Arguments:
vmin {number} -- min value (default: {0})
vmax {number} -- max value (default: {max(levels)})
center {True} -- contourf uses center=True values.
False will produce a border effect (default: {True})
Returns:
[ndarray] -- [list of colors]
"""
vmin = vmin or 0
vmax = vmax or max(levels)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# offset = np.diff(levels)[0] * .5
# colors = mpl.cm.get_cmap(cmap)(norm(levels-offset))
levels = np.r_[0, levels]
center_levels = 0.5 * (levels[1:] + levels[:-1])
return mpl.cm.get_cmap(cmap)(norm(center_levels))
def stat_plot1d(x, ax=None, bins="auto", histtype="step", lw=2, **plot_kwargs):
"""
really just a fall back for stat_plot2d
if one of the paramters has no varaince
Arguments:
x {[type]} -- array
"""
if ax is None:
ax = plt.gca()
ax.hist(x[np.isfinite(x)], bins="auto", histtype="step", lw=2, **plot_kwargs)
return ax
def stat_plot2d(
x,
y,
marker="k.",
bins=20,
range=None,
smooth=0,
xscale=None,
yscale=None,
plot_data=False,
plot_contourf=False,
plot_contour=False,
plot_imshow=False,
plot_binned=True,
color=None,
cmap=None,
levels=None,
mfc=None,
mec=None,
mew=None,
ms=None,
vmin=None,
vmax=None,
alpha=1,
rasterized=True,
linewidths=None,
data_kwargs=None,
contourf_kwargs=None,
contour_kwargs=None,
data_color=None,
contour_color=None,
default_color=None,
binned_color=None,
contourf_levels=None,
contour_levels=None,
lw=None,
debug=False,
zorder=0,
ax=None,
plot_datapoints=None,
):
"""
based on hist2d dfm's corner.py
but so much prettier and so many more options
will eventually part of my own corner.py
(of course most of the corner part will lifted
directly from corner.py (with attribution of course :D
)
## Look Has! Crappy Documentation!!! ##
just know the kwargs give the most direct control
they have precedence over the other keywords
color precedence:
color
marker color (for data only)
data_color (for data only, overrides marker)
contour_color (contour only, overrides color)
match (contour only, overrides both)
"""
if ax is None:
ax = plt.gca()
if xscale == "log":
x = np.log10(x)
if yscale == "log":
y = np.log10(y)
if plot_datapoints is None:
plot_datapoints = plot_data
if not (plot_data or plot_contour or plot_contourf):
# give the user a decent default plot
plot_data = True
plot_contour = True
smooth = 2
if smooth is None:
smooth = 0
g = np.isfinite(x + y)
x, y = np.asarray(x)[g], np.asarray(y)[g]
if (x.var() == 0) & (y.var() == 0):
print(
"Both variables have Variance=0. So no plot can be generated. Here is a plot to help"
)
print("First 10 (or less) elements of x", x[:10])
print("First 10 (or less) elements of y", y[:10])
ax.scatter(x, y)
return 0
elif x.var() == 0:
print(
"Variable X has variance=0. Instead of making an ugly plot, here is a histogram of the remaining variable"
)
stat_plot1d(y)
return 0
elif y.var() == 0:
print(
"Variable Y has variance=0. Instead of making an ugly plot, here is a histogram of the remaining variable"
)
stat_plot1d(x)
return 0
if range is None:
range = [[x.min(), x.max()], [y.min(), y.max()]]
sm_unflat, X1, Y1 = hist2d(x, y, bins=bins, range=range, smooth=smooth)
if xscale == "log":
x = np.power(10, x)
X1 = np.power(10, X1)
ax.set_xscale("log")
if yscale == "log":
y = np.power(10, y)
Y1 = np.power(10, Y1)
ax.set_yscale("log")
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# ALL the plotting stuff
if data_kwargs is None:
data_kwargs = dict()
if contour_kwargs is None:
contour_kwargs = dict()
if contourf_kwargs is None:
contourf_kwargs = dict()
if isinstance(cmap, str):
cmap = mpl.cm.get_cmap(cmap)
if default_color is None:
default_color = ax.plot([], [])[0].get_color()
color_match = color == "match"
data_match = data_color == "match"
colors_not_set = (color is None) & (cmap is None)
color_is_set = (color is not None) & (not color_match)
cmap_is_set = cmap is not None
reverse = False
if isinstance(color, str):
if color[-2:] == "_r":
color, reverse = color[:-2], True
else:
color, reverse = color, False
# MAKE SENSIBLE CHOICES WITH THE COLORS
if debug:
print("(1)", color, cmap)
# we only need color to be set
if colors_not_set: # color not set and cmap not set
color = default_color
cmap = "viridis"
cmap_is_set = True
color_is_set = True
if debug:
print("(1a)", color, cmap, color_is_set, cmap_is_set)
elif color_match & (not cmap_is_set): # color is match and cmap not set
color = default_color
cmap = "viridis"
color_is_set = True
cmap_is_set = True
if debug:
print("(1b)", color, cmap, color_is_set, cmap_is_set)
elif color_match & cmap_is_set:
color = mpl.cm.get_cmap(cmap)(0.5)
color_is_set = True
if debug:
print("(1c)", color, cmap, color_is_set, cmap_is_set)
elif (not color_is_set) & cmap_is_set:
color = default_color
color_is_set = True
if debug:
print("(1d)", color, cmap, color_is_set, cmap_is_set)
if debug:
print("(2)", color, cmap, color_is_set, cmap_is_set)
if data_match & colors_not_set:
# warnings.warn("Used data_color='match' w/o setting color or cmap"+
# "Setting data_color to default color")
data_match = False
data_color = color
if debug:
print("2(a)", data_color)
elif data_match & cmap_is_set:
data_color = mpl.cm.get_cmap(cmap)(0.5)
if debug:
print("2(b)", data_color)
elif data_match & color_is_set:
data_color = color
if debug:
print("2(c)", data_color)
elif data_color is None:
data_color = color
if debug:
print("2(d)", data_color)
if debug:
print("2(e)", data_color)
if debug:
print("(3)", color, cmap, color_is_set, cmap_is_set)
# only create linear colormap is cmap is not set
if not cmap_is_set:
if debug:
print("making linear cmap")
cmap = color_cmap(color, reverse=reverse)
cmap_is_set = True
if debug:
print("(3)", color, cmap, color_is_set, cmap_is_set)
def listornone(thing):
if thing is None:
return thing
elif isinstance(thing, list):
return thing
else:
return [thing]
# color_match is for contours and data
no_set_contour_color = contour_color is None
kwargs_not_set = (contour_kwargs.get("cmap") is None) & (
contour_kwargs.get("colors") is None
)
if kwargs_not_set:
if (color_match & no_set_contour_color) | (contour_color == "match"):
contour_kwargs["colors"] = contour_level_colors(cmap, levels)
elif contour_kwargs.get("colors") is None:
contour_kwargs["colors"] = listornone(contour_color) or listornone(color)
if contour_kwargs.get("levels") is None:
contour_kwargs["levels"] = np.array(levels) # levels
if contour_kwargs.get("linewidths") is None:
if (linewidths is None) & (lw is None):
pass
else:
lw = linewidths or lw
contour_kwargs["linewidths"] = [i for i in np.asarray([lw]).flatten()]
if contour_kwargs.get("alpha") is None:
contour_kwargs["alpha"] = alpha
if contourf_kwargs.get("levels") is None:
new_levels = np.hstack([[0], levels])
contourf_kwargs["levels"] = np.unique(new_levels) # close top contour
if contourf_kwargs.get("alpha") is None:
contourf_kwargs["alpha"] = alpha
if (contourf_kwargs.get("cmap") is None) & (contourf_kwargs.get("colors") is None):
contourf_kwargs["cmap"] = cmap
if data_kwargs.get("color") is None:
_, dmarker, dcolor = mpl.axes._base._process_plot_format(marker)
if dcolor is None:
if color_match | data_match:
data_kwargs["color"] = data_color or color
marker = dmarker
else:
data_kwargs["color"] = data_color or color
if data_kwargs.get("mfc") is None:
data_kwargs["mfc"] = mfc
if data_kwargs.get("mec") is None:
data_kwargs["mec"] = mec
if data_kwargs.get("mew") is None:
data_kwargs["mew"] = mew
if data_kwargs.get("ms") is None:
data_kwargs["ms"] = ms
if data_kwargs.get("alpha") is None:
data_kwargs["alpha"] = alpha
# FINALLY GETTING TO THE PLOTS
if plot_datapoints:
p = ax.plot(
x, y, marker, **data_kwargs, rasterized=rasterized, zorder=zorder + 1
)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
else:
p = None
# if vmin is None:
# vmin = 0
# if vmax is None:
# vmax = levels[-1]
if plot_contourf:
cntrf = ax.contourf(
X1,
Y1,
sm_unflat,
**contourf_kwargs,
vmin=vmin,
vmax=vmax,
zorder=zorder + 2,
)
else:
cntrf = None
if plot_contour:
cntr = ax.contour(
X1, Y1, sm_unflat, **contour_kwargs, vmin=vmin, vmax=vmax, zorder=zorder + 3
)
else:
cntr = None
if plot_imshow:
ax.imshow(
sm_unflat,
origin="lower",
extent=[X1.min(), X1.max(), Y1.min(), Y1.max()],
zorder=zorder + 4,
)
if plot_datapoints:
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
if plot_contour & plot_contourf:
return ax, cntr, cntrf
elif plot_contour:
return ax, cntr
elif plot_contourf:
return ax, cntrf
elif plot_datapoints:
return ax, p
else:
return ax
def annotate(
text,
x,
y,
ax=None,
horizontalalignment="center",
verticalalignment="center",
ha=None,
va=None,
transform="axes",
color="k",
fontsize=9,
facecolor="w",
alpha=0.75,
bbox=dict(),
**kwargs,
):
if ax is None:
ax = plt.gca()
horizontalalignment = ha or horizontalalignment
verticalalignment = va or verticalalignment
if transform == "axes":
transform = ax.transAxes
elif transform == "data":
transform = ax.transData
bbox1 = dict(facecolor=facecolor, alpha=alpha)
bbox1.update(bbox)
text = ax.text(
x,
y,
text,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=transform,
color=color,
fontsize=fontsize,
bbox=bbox1,
**kwargs,
)
return text
# alias only used becuase of old code
def jhist2d(*args, **kwargs):
return stat_plot2d(*args, **kwargs)
def corner(pos, names=None, smooth=1, bins=20, figsize=None, **kwargs):
"""produce a corner plot
Parameters
----------
pos : np.array
each item should be a row. pos.size = MxN, N items, M
names : list of strings, optional
names of variables to be plotted, must have N elements, by default None
smooth : int, optional
how much to smooth the contours/histogram, by default 1
bins : int, optional
number of bins for histogram, by default 20
figsize : tuple, optional
[description], by default 2 * pos.shape[1] + 0.5
Returns
-------
[type]
[description]
"""
if figsize is None:
dim = 2 * pos.shape[1] + 0.5
figsize = (dim, dim)
fig, axs = plt.subplots(
nrows=pos.shape[1],
ncols=pos.shape[1],
sharex=False,
sharey=False,
figsize=figsize,
)
for i in range(pos.shape[-1]):
for j in range(pos.shape[-1]):
ax = axs[i, j]
if i == j:
stat_plot1d(pos[:, i], ax=axs[i, j])
ax.set_xlabel(names[j])
if j < i:
stat_plot2d(
pos[:, j],
pos[:, i],
ax=ax,
bins=bins,
smooth=smooth,
plot_datapoints=True,
plot_contour=True,
**kwargs,
)
if names is not None:
try:
if i != j :
ax.set_xlabel(names[j])
ax.set_ylabel(names[i])
except:
pass
if j > i:
plt.delaxes(axs[i, j])
fig.tight_layout()
return fig, axs
def plotoneone(
color="k",
lw=2,
scale=1,
offset=0,
p=None,
invert=False,
n=50,
start=None,
end=None,
ax=None,
**kwargs,
):
if ax is None:
ax = plt.gca()
xlim, ylim = ax.get_xlim(), ax.get_ylim()
if start is None:
start = np.min([xlim[0], ylim[0]])
if end is None:
end = np.max([xlim[1], ylim[1]])
axscale = ax.get_xscale()
if axscale == "log":
xs = np.logspace(np.log10(start), np.log10(end), n)
else:
xs = np.linspace(start, end, n)
if p is not None:
scale, offset = p
ys = scale * xs + offset
if invert:
ax.plot(ys, xs, color=color, lw=lw, **kwargs)
else:
ax.plot(xs, ys, color=color, lw=lw, **kwargs)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
def oplot_hist(
X,
bins=None,
ylim=None,
scale=0.5,
ax=None,
show_mean=False,
show_median=False,
show_percentiles=None,
):
if ax is None:
ax = plt.gca()
if ylim is None:
ylim = ax.get_ylim()
if bins is None:
bins = "auto"
H, xedge = np.histogram(
X, range=np.nanpercentile(X, [0, 100]), bins=bins, density=True
)
H = (H / H.max()) * (ylim[1] - ylim[0]) * scale + ylim[0]
ax.step(mavg(xedge), H, where="mid", color="0.25", alpha=1, zorder=10, lw=1.5)
if show_mean:
ax.axvline(np.nanmean(X), 0, 1, color="0.45", ls="--")
if show_median:
ax.axvline(np.nanmedian(X), 0, 1, color="0.45", ls="--")
if not (show_percentiles is None):
for p in show_percentiles:
ax.axvline(p, 0, 1, color="0.45", ls="--", alpha=0.5)
return ax
def multi_colored_line_plot(
x, y, z=None, cmap="viridis", norm=None, vmin=None, vmax=None, ax=None, **kwargs
):
"""
adapted from matplotlib gallery
"""
if ax is None:
ax = plt.gca()
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
if z is None:
z = y
# Create a continuous norm to map from data points to colors
if vmin is None:
vmin = np.nanmin(z)
if vmax is None:
vmax = np.nanmax(z)
if norm is None:
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
lc = mpl.collections.LineCollection(segments, cmap=cmap, norm=norm, **kwargs)
# Set the values used for colormapping
lc.set_array(z)
line = ax.add_collection(lc)
# fig.colorbar(line, ax=axs[0])
return line
def errorbar_fill(
x=None,
y=None,
yerr=None,
*args,
ax=None,
mid=True,
color=None,
alpha=1,
lw=1,
ls="-",
fmt=None,
label=None,
**kwargs,
):
oldax = plt.gca()
if ax is None:
ax = oldax
plt.sca(ax)
if mid:
alpha_fill = alpha * 2
if alpha_fill >= 1:
alpha_fill = 1
plt.fill_between(x, y - yerr, y + yerr, color=color, alpha=alpha,label=label,**kwargs)
if mid:
plt.plot(x, y, "-", color=color, alpha=alpha, lw=lw, ls=ls,**kwargs)
plt.sca(oldax)
return None
def plot_to_origin(ax=None):
if ax is None:
ax = plt.gca()
ax.set_xlim(0, ax.get_xlim()[1])
ax.set_ylim(0, ax.get_ylim()[1])
return None
def plot_covariance_ellipse(cov, mu, n=1, ax=None, c='b', lw=1, zorder=100):
P, D, T = eigen_decomp(cov, mu, return_slope=False)
m = P[1] / P[0]
major = np.argmax(D.diagonal())
angle = np.arctan(m)[major] * 180 / np.pi
axes = n * np.sqrt(D.diagonal())
b, a = axes[np.argsort(D.diagonal())]
# let the width be the length fo the major axis
pat = mpl.patches.Ellipse(
angle=angle,
xy=b,
width=2*a,
height=2*b,
zorder=zorder,
facecolor="none",
edgecolor=c,
lw=lw,
)
if ax is None:
plt.gca().add_artist(pat)
else:
ax.add_artist(pat)
return a, b, angle
def eigenplot(A, b=[0, 0], n=3, plot_data=False, vec_c="r", ell_c="b", ell_lw=2, **kwargs):
# https://janakiev.com/blog/covariance-matrix/
eVa, eVe = np.linalg.eig(A)
b = np.array(b)
if plot_data:
data = np.random.multivariate_normal(b, A, 2000)
plt.plot(*data.T, "k.")
P, D = eVe, np.diag(eVa)
S = D ** 0.5
T = P @ S # transform from real to eigenspace
# Columns of T are scaled eigenvectors
# for eigenvector in T
for i in T.T:
i = b + n * i
plt.plot([b[0], i[0]], [b[1], i[1]], c=vec_c, zorder=100, **kwargs)
m = P[1] / P[0]
y_int = -m * b[0] + b[1]
major = np.argmax(eVa)
angle = np.arctan(m)[major] * 180 / np.pi
# print(angle)
# get the norm of the
# a1 = 2 * n * np.linalg.norm(T, axis=0)
a1 = 2 * n * np.sqrt(eVa)
h, w = a1[np.argsort(eVa)]
pat = mpl.patches.Ellipse(
angle=angle,
xy=b,
width=w,
height=h,
zorder=100,
facecolor="none",
edgecolor=ell_c,
lw=ell_lw,
)
plt.gca().add_artist(pat)
# print(m[major], y_int[major])
return m[major], y_int[major]
def eigenplot_from_data(x, y, n=3, data=False, vec_c="r", ell_c="b", ell_lw=2):
g = np.isfinite(x + y)
cov = np.cov(x[g], y[g])
b = np.mean(x[g]), np.mean(y[g])
if data:
plt.plot(x, y, "k.", zorder=0)
out = eigenplot(cov, b, data=False, n=n, vec_c=vec_c, ell_c=ell_c, ell_lw=ell_lw)
return out
def figsize(arr, default=[6, 6]):
arr = np.array(arr)
norm = np.array(arr.shape) / np.max(arr.shape)
figsize = (np.array(default) * norm)[::-1]
return figsize
|
|
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/gitian.sigs.git'])
if not os.path.isdir('bitcoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/bitcoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('bitcoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin/bitcoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('bitcoin-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(['make', '-C', '../bitcoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/bitcoin-*.tar.gz build/out/src/bitcoin-*.tar.gz ../bitcoin-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe ../bitcoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg ../bitcoin-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/bitcoin-*win64-setup.exe ../bitcoin-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/bitcoin-*win32-setup.exe ../bitcoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/bitcoin-osx-signed.dmg ../bitcoin-binaries/'+args.version+'/bitcoin-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../bitcoin/contrib/gitian-descriptors/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml'])
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/bitcoin/bitcoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set enviroment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
args.commit = ('' if args.commit else 'v') + args.version
print(args.commit)
if args.setup:
setup()
os.chdir('bitcoin')
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# reference: c041828_ISO_IEC_14496-12_2005(E).pdf
##################################################
# reader and writer
##################################################
import struct
from io import BytesIO
def skip(stream, n):
stream.seek(stream.tell() + n)
def skip_zeros(stream, n):
assert stream.read(n) == b'\x00' * n
def read_int(stream):
return struct.unpack('>i', stream.read(4))[0]
def read_uint(stream):
return struct.unpack('>I', stream.read(4))[0]
def write_uint(stream, n):
stream.write(struct.pack('>I', n))
def read_ushort(stream):
return struct.unpack('>H', stream.read(2))[0]
def read_ulong(stream):
return struct.unpack('>Q', stream.read(8))[0]
def read_byte(stream):
return ord(stream.read(1))
def copy_stream(source, target, n):
buffer_size = 1024 * 1024
while n > 0:
to_read = min(buffer_size, n)
s = source.read(to_read)
assert len(s) == to_read, 'no enough data'
target.write(s)
n -= to_read
class Atom:
def __init__(self, type, size, body):
assert len(type) == 4
self.type = type
self.size = size
self.body = body
def __str__(self):
#return '<Atom(%s):%s>' % (self.type, repr(self.body))
return '<Atom(%s):%s>' % (self.type, '')
def __repr__(self):
return str(self)
def write1(self, stream):
write_uint(stream, self.size)
stream.write(self.type)
def write(self, stream):
assert type(self.body) == bytes, '%s: %s' % (self.type, type(self.body))
assert self.size == 8 + len(self.body)
self.write1(stream)
stream.write(self.body)
def calsize(self):
return self.size
class CompositeAtom(Atom):
def __init__(self, type, size, body):
assert isinstance(body, list)
Atom.__init__(self, type, size, body)
def write(self, stream):
assert type(self.body) == list
self.write1(stream)
for atom in self.body:
atom.write(stream)
def calsize(self):
self.size = 8 + sum([atom.calsize() for atom in self.body])
return self.size
def get1(self, k):
for a in self.body:
if a.type == k:
return a
else:
raise Exception('atom not found: ' + k)
def get(self, *keys):
atom = self
for k in keys:
atom = atom.get1(k)
return atom
def get_all(self, k):
return list(filter(lambda x: x.type == k, self.body))
class VariableAtom(Atom):
def __init__(self, type, size, body, variables):
assert isinstance(body, bytes)
Atom.__init__(self, type, size, body)
self.variables = variables
def write(self, stream):
self.write1(stream)
i = 0
n = 0
for name, offset, value in self.variables:
stream.write(self.body[i:offset])
write_uint(stream, value)
n += offset - i + 4
i = offset + 4
stream.write(self.body[i:])
n += len(self.body) - i
assert n == len(self.body)
def get(self, k):
for v in self.variables:
if v[0] == k:
return v[2]
else:
raise Exception('field not found: ' + k)
def set(self, k, v):
for i in range(len(self.variables)):
variable = self.variables[i]
if variable[0] == k:
self.variables[i] = (k, variable[1], v)
break
else:
raise Exception('field not found: '+k)
def read_raw(stream, size, left, type):
assert size == left + 8
body = stream.read(left)
return Atom(type, size, body)
def read_body_stream(stream, left):
body = stream.read(left)
assert len(body) == left
return body, BytesIO(body)
def read_full_atom(stream):
value = read_uint(stream)
version = value >> 24
flags = value & 0xffffff
assert version == 0
return value
def read_mvhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
# new Date(movieTime * 1000 - 2082850791998L);
creation_time = read_uint(stream)
modification_time = read_uint(stream)
time_scale = read_uint(stream)
duration = read_uint(stream)
left -= 16
qt_preferred_fate = read_uint(stream)
qt_preferred_volume = read_ushort(stream)
assert stream.read(10) == b'\x00' * 10
qt_matrixA = read_uint(stream)
qt_matrixB = read_uint(stream)
qt_matrixU = read_uint(stream)
qt_matrixC = read_uint(stream)
qt_matrixD = read_uint(stream)
qt_matrixV = read_uint(stream)
qt_matrixX = read_uint(stream)
qt_matrixY = read_uint(stream)
qt_matrixW = read_uint(stream)
qt_previewTime = read_uint(stream)
qt_previewDuration = read_uint(stream)
qt_posterTime = read_uint(stream)
qt_selectionTime = read_uint(stream)
qt_selectionDuration = read_uint(stream)
qt_currentTime = read_uint(stream)
nextTrackID = read_uint(stream)
left -= 80
assert left == 0
return VariableAtom(b'mvhd', size, body, [('duration', 16, duration)])
def read_tkhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
# new Date(movieTime * 1000 - 2082850791998L);
creation_time = read_uint(stream)
modification_time = read_uint(stream)
track_id = read_uint(stream)
assert stream.read(4) == b'\x00' * 4
duration = read_uint(stream)
left -= 20
assert stream.read(8) == b'\x00' * 8
qt_layer = read_ushort(stream)
qt_alternate_group = read_ushort(stream)
qt_volume = read_ushort(stream)
assert stream.read(2) == b'\x00\x00'
qt_matrixA = read_uint(stream)
qt_matrixB = read_uint(stream)
qt_matrixU = read_uint(stream)
qt_matrixC = read_uint(stream)
qt_matrixD = read_uint(stream)
qt_matrixV = read_uint(stream)
qt_matrixX = read_uint(stream)
qt_matrixY = read_uint(stream)
qt_matrixW = read_uint(stream)
qt_track_width = read_uint(stream)
width = qt_track_width >> 16
qt_track_height = read_uint(stream)
height = qt_track_height >> 16
left -= 60
assert left == 0
return VariableAtom(b'tkhd', size, body, [('duration', 20, duration)])
def read_mdhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
# new Date(movieTime * 1000 - 2082850791998L);
creation_time = read_uint(stream)
modification_time = read_uint(stream)
time_scale = read_uint(stream)
duration = read_uint(stream)
left -= 16
packed_language = read_ushort(stream)
qt_quality = read_ushort(stream)
left -= 4
assert left == 0
return VariableAtom(b'mdhd', size, body, [('duration', 16, duration)])
def read_hdlr(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
qt_component_type = read_uint(stream)
handler_type = read_uint(stream)
qt_component_manufacturer = read_uint(stream)
qt_component_flags = read_uint(stream)
qt_component_flags_mask = read_uint(stream)
left -= 20
track_name = stream.read(left - 1)
assert stream.read(1) == b'\x00'
return Atom(b'hdlr', size, body)
def read_vmhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
assert left == 8
graphic_mode = read_ushort(stream)
op_color_read = read_ushort(stream)
op_color_green = read_ushort(stream)
op_color_blue = read_ushort(stream)
return Atom(b'vmhd', size, body)
def read_stsd(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
left -= 4
children = []
for i in range(entry_count):
atom = read_atom(stream)
children.append(atom)
left -= atom.size
assert left == 0
#return Atom('stsd', size, children)
class stsd_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for atom in self.body[1]:
atom.write(stream)
def calsize(self):
oldsize = self.size # TODO: remove
self.size = 8 + 4 + 4 + sum([atom.calsize() for atom in self.body[1]])
assert oldsize == self.size, '%s: %d, %d' % (self.type, oldsize, self.size) # TODO: remove
return self.size
return stsd_atom(b'stsd', size, (value, children))
def read_avc1(stream, size, left, type):
body, stream = read_body_stream(stream, left)
skip_zeros(stream, 6)
data_reference_index = read_ushort(stream)
skip_zeros(stream, 2)
skip_zeros(stream, 2)
skip_zeros(stream, 12)
width = read_ushort(stream)
height = read_ushort(stream)
horizontal_rez = read_uint(stream) >> 16
vertical_rez = read_uint(stream) >> 16
assert stream.read(4) == b'\x00' * 4
frame_count = read_ushort(stream)
string_len = read_byte(stream)
compressor_name = stream.read(31)
depth = read_ushort(stream)
assert stream.read(2) == b'\xff\xff'
left -= 78
child = read_atom(stream)
assert child.type in (b'avcC', b'pasp'), 'if the sub atom is not avcC or pasp (actual %s), you should not cache raw body' % child.type
left -= child.size
stream.read(left) # XXX
return Atom(b'avc1', size, body)
def read_avcC(stream, size, left, type):
stream.read(left)
return Atom(b'avcC', size, None)
def read_stts(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
assert entry_count == 1
left -= 4
samples = []
for i in range(entry_count):
sample_count = read_uint(stream)
sample_duration = read_uint(stream)
samples.append((sample_count, sample_duration))
left -= 8
assert left == 0
#return Atom('stts', size, None)
class stts_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for sample_count, sample_duration in self.body[1]:
write_uint(stream, sample_count)
write_uint(stream, sample_duration)
def calsize(self):
oldsize = self.size # TODO: remove
self.size = 8 + 4 + 4 + len(self.body[1]) * 8
assert oldsize == self.size, '%s: %d, %d' % (self.type, oldsize, self.size) # TODO: remove
return self.size
return stts_atom(b'stts', size, (value, samples))
def read_stss(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
left -= 4
samples = []
for i in range(entry_count):
sample = read_uint(stream)
samples.append(sample)
left -= 4
assert left == 0
#return Atom('stss', size, None)
class stss_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for sample in self.body[1]:
write_uint(stream, sample)
def calsize(self):
self.size = 8 + 4 + 4 + len(self.body[1]) * 4
return self.size
return stss_atom(b'stss', size, (value, samples))
def read_stsc(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
left -= 4
chunks = []
for i in range(entry_count):
first_chunk = read_uint(stream)
samples_per_chunk = read_uint(stream)
sample_description_index = read_uint(stream)
assert sample_description_index == 1 # what is it?
chunks.append((first_chunk, samples_per_chunk, sample_description_index))
left -= 12
#chunks, samples = zip(*chunks)
#total = 0
#for c, s in zip(chunks[1:], samples):
# total += c*s
#print 'total', total
assert left == 0
#return Atom('stsc', size, None)
class stsc_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for first_chunk, samples_per_chunk, sample_description_index in self.body[1]:
write_uint(stream, first_chunk)
write_uint(stream, samples_per_chunk)
write_uint(stream, sample_description_index)
def calsize(self):
self.size = 8 + 4 + 4 + len(self.body[1]) * 12
return self.size
return stsc_atom(b'stsc', size, (value, chunks))
def read_stsz(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
sample_size = read_uint(stream)
sample_count = read_uint(stream)
left -= 8
assert sample_size == 0
total = 0
sizes = []
if sample_size == 0:
for i in range(sample_count):
entry_size = read_uint(stream)
sizes.append(entry_size)
total += entry_size
left -= 4
assert left == 0
#return Atom('stsz', size, None)
class stsz_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, self.body[1])
write_uint(stream, self.body[2])
for entry_size in self.body[3]:
write_uint(stream, entry_size)
def calsize(self):
self.size = 8 + 4 + 8 + len(self.body[3]) * 4
return self.size
return stsz_atom(b'stsz', size, (value, sample_size, sample_count, sizes))
def read_stco(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
left -= 4
offsets = []
for i in range(entry_count):
chunk_offset = read_uint(stream)
offsets.append(chunk_offset)
left -= 4
assert left == 0
#return Atom('stco', size, None)
class stco_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for chunk_offset in self.body[1]:
write_uint(stream, chunk_offset)
def calsize(self):
self.size = 8 + 4 + 4 + len(self.body[1]) * 4
return self.size
return stco_atom(b'stco', size, (value, offsets))
def read_ctts(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
left -= 4
samples = []
for i in range(entry_count):
sample_count = read_uint(stream)
sample_offset = read_uint(stream)
samples.append((sample_count, sample_offset))
left -= 8
assert left == 0
class ctts_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for sample_count, sample_offset in self.body[1]:
write_uint(stream, sample_count)
write_uint(stream, sample_offset)
def calsize(self):
self.size = 8 + 4 + 4 + len(self.body[1]) * 8
return self.size
return ctts_atom(b'ctts', size, (value, samples))
def read_smhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
balance = read_ushort(stream)
assert stream.read(2) == b'\x00\x00'
left -= 4
assert left == 0
return Atom(b'smhd', size, body)
def read_mp4a(stream, size, left, type):
body, stream = read_body_stream(stream, left)
assert stream.read(6) == b'\x00' * 6
data_reference_index = read_ushort(stream)
assert stream.read(8) == b'\x00' * 8
channel_count = read_ushort(stream)
sample_size = read_ushort(stream)
assert stream.read(4) == b'\x00' * 4
time_scale = read_ushort(stream)
assert stream.read(2) == b'\x00' * 2
left -= 28
atom = read_atom(stream)
assert atom.type == b'esds'
left -= atom.size
assert left == 0
return Atom(b'mp4a', size, body)
def read_descriptor(stream):
tag = read_byte(stream)
raise NotImplementedError()
def read_esds(stream, size, left, type):
value = read_uint(stream)
version = value >> 24
assert version == 0
flags = value & 0xffffff
left -= 4
body = stream.read(left)
return Atom(b'esds', size, None)
def read_composite_atom(stream, size, left, type):
children = []
while left > 0:
atom = read_atom(stream)
children.append(atom)
left -= atom.size
assert left == 0, left
return CompositeAtom(type, size, children)
def read_mdat(stream, size, left, type):
source_start = stream.tell()
source_size = left
skip(stream, left)
#return Atom(type, size, None)
#raise NotImplementedError()
class mdat_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
self.write2(stream)
def write2(self, stream):
source, source_start, source_size = self.body
original = source.tell()
source.seek(source_start)
copy_stream(source, stream, source_size)
def calsize(self):
return self.size
return mdat_atom(b'mdat', size, (stream, source_start, source_size))
atom_readers = {
b'mvhd': read_mvhd, # merge duration
b'tkhd': read_tkhd, # merge duration
b'mdhd': read_mdhd, # merge duration
b'hdlr': read_hdlr, # nothing
b'vmhd': read_vmhd, # nothing
b'stsd': read_stsd, # nothing
b'avc1': read_avc1, # nothing
b'avcC': read_avcC, # nothing
b'stts': read_stts, # sample_count, sample_duration
b'stss': read_stss, # join indexes
b'stsc': read_stsc, # merge # sample numbers
b'stsz': read_stsz, # merge # samples
b'stco': read_stco, # merge # chunk offsets
b'ctts': read_ctts, # merge
b'smhd': read_smhd, # nothing
b'mp4a': read_mp4a, # nothing
b'esds': read_esds, # noting
b'ftyp': read_raw,
b'yqoo': read_raw,
b'moov': read_composite_atom,
b'trak': read_composite_atom,
b'mdia': read_composite_atom,
b'minf': read_composite_atom,
b'dinf': read_composite_atom,
b'stbl': read_composite_atom,
b'iods': read_raw,
b'dref': read_raw,
b'free': read_raw,
b'edts': read_raw,
b'pasp': read_raw,
b'mdat': read_mdat,
}
#stsd sample descriptions (codec types, initialization etc.)
#stts (decoding) time-to-sample
#ctts (composition) time to sample
#stsc sample-to-chunk, partial data-offset information
#stsz sample sizes (framing)
#stz2 compact sample sizes (framing)
#stco chunk offset, partial data-offset information
#co64 64-bit chunk offset
#stss sync sample table (random access points)
#stsh shadow sync sample table
#padb sample padding bits
#stdp sample degradation priority
#sdtp independent and disposable samples
#sbgp sample-to-group
#sgpd sample group description
#subs sub-sample information
def read_atom(stream):
header = stream.read(8)
if not header:
return
assert len(header) == 8
n = 0
size = struct.unpack('>I', header[:4])[0]
assert size > 0
n += 4
type = header[4:8]
n += 4
assert type != b'uuid'
if size == 1:
size = read_ulong(stream)
n += 8
left = size - n
if type in atom_readers:
return atom_readers[type](stream, size, left, type)
raise NotImplementedError('%s: %d' % (type, left))
def write_atom(stream, atom):
atom.write(stream)
def parse_atoms(stream):
atoms = []
while True:
atom = read_atom(stream)
if atom:
atoms.append(atom)
else:
break
return atoms
def read_mp4(stream):
atoms = parse_atoms(stream)
moov = list(filter(lambda x: x.type == b'moov', atoms))
mdat = list(filter(lambda x: x.type == b'mdat', atoms))
assert len(moov) == 1
assert len(mdat) == 1
moov = moov[0]
mdat = mdat[0]
return atoms, moov, mdat
##################################################
# merge
##################################################
def merge_stts(samples_list):
sample_list = []
for samples in samples_list:
assert len(samples) == 1
sample_list.append(samples[0])
counts, durations = zip(*sample_list)
assert len(set(durations)) == 1, 'not all durations equal'
return [(sum(counts), durations[0])]
def merge_stss(samples, sample_number_list):
results = []
start = 0
for samples, sample_number_list in zip(samples, sample_number_list):
results.extend(map(lambda x: start + x, samples))
start += sample_number_list
return results
def merge_stsc(chunks_list, total_chunk_number_list):
results = []
chunk_index = 1
for chunks, total in zip(chunks_list, total_chunk_number_list):
for i in range(len(chunks)):
if i < len(chunks) - 1:
chunk_number = chunks[i + 1][0] - chunks[i][0]
else:
chunk_number = total + 1 - chunks[i][0]
sample_number = chunks[i][1]
description = chunks[i][2]
results.append((chunk_index, sample_number, description))
chunk_index += chunk_number
return results
def merge_stco(offsets_list, mdats):
offset = 0
results = []
for offsets, mdat in zip(offsets_list, mdats):
results.extend(offset + x - mdat.body[1] for x in offsets)
offset += mdat.size - 8
return results
def merge_stsz(sizes_list):
return sum(sizes_list, [])
def merge_mdats(mdats):
total_size = sum(x.size - 8 for x in mdats) + 8
class multi_mdat_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
self.write2(stream)
def write2(self, stream):
for mdat in self.body:
mdat.write2(stream)
def calsize(self):
return self.size
return multi_mdat_atom(b'mdat', total_size, mdats)
def merge_moov(moovs, mdats):
mvhd_duration = 0
for x in moovs:
mvhd_duration += x.get(b'mvhd').get('duration')
tkhd_durations = [0, 0]
mdhd_durations = [0, 0]
for x in moovs:
traks = x.get_all(b'trak')
assert len(traks) == 2
tkhd_durations[0] += traks[0].get(b'tkhd').get('duration')
tkhd_durations[1] += traks[1].get(b'tkhd').get('duration')
mdhd_durations[0] += traks[0].get(b'mdia', b'mdhd').get('duration')
mdhd_durations[1] += traks[1].get(b'mdia', b'mdhd').get('duration')
#mvhd_duration = min(mvhd_duration, tkhd_durations)
trak0s = [x.get_all(b'trak')[0] for x in moovs]
trak1s = [x.get_all(b'trak')[1] for x in moovs]
stts0 = merge_stts(x.get(b'mdia', b'minf', b'stbl', b'stts').body[1] for x in trak0s)
stts1 = merge_stts(x.get(b'mdia', b'minf', b'stbl', b'stts').body[1] for x in trak1s)
stss = merge_stss((x.get(b'mdia', b'minf', b'stbl', b'stss').body[1] for x in trak0s), (len(x.get(b'mdia', b'minf', b'stbl', b'stsz').body[3]) for x in trak0s))
stsc0 = merge_stsc((x.get(b'mdia', b'minf', b'stbl', b'stsc').body[1] for x in trak0s), (len(x.get(b'mdia', b'minf', b'stbl', b'stco').body[1]) for x in trak0s))
stsc1 = merge_stsc((x.get(b'mdia', b'minf', b'stbl', b'stsc').body[1] for x in trak1s), (len(x.get(b'mdia', b'minf', b'stbl', b'stco').body[1]) for x in trak1s))
stco0 = merge_stco((x.get(b'mdia', b'minf', b'stbl', b'stco').body[1] for x in trak0s), mdats)
stco1 = merge_stco((x.get(b'mdia', b'minf', b'stbl', b'stco').body[1] for x in trak1s), mdats)
stsz0 = merge_stsz((x.get(b'mdia', b'minf', b'stbl', b'stsz').body[3] for x in trak0s))
stsz1 = merge_stsz((x.get(b'mdia', b'minf', b'stbl', b'stsz').body[3] for x in trak1s))
ctts = sum((x.get(b'mdia', b'minf', b'stbl', b'ctts').body[1] for x in trak0s), [])
moov = moovs[0]
moov.get(b'mvhd').set('duration', mvhd_duration)
trak0 = moov.get_all(b'trak')[0]
trak1 = moov.get_all(b'trak')[1]
trak0.get(b'tkhd').set('duration', tkhd_durations[0])
trak1.get(b'tkhd').set('duration', tkhd_durations[1])
trak0.get(b'mdia', b'mdhd').set('duration', mdhd_durations[0])
trak1.get(b'mdia', b'mdhd').set('duration', mdhd_durations[1])
stts_atom = trak0.get(b'mdia', b'minf', b'stbl', b'stts')
stts_atom.body = stts_atom.body[0], stts0
stts_atom = trak1.get(b'mdia', b'minf', b'stbl', b'stts')
stts_atom.body = stts_atom.body[0], stts1
stss_atom = trak0.get(b'mdia', b'minf', b'stbl', b'stss')
stss_atom.body = stss_atom.body[0], stss
stsc_atom = trak0.get(b'mdia', b'minf', b'stbl', b'stsc')
stsc_atom.body = stsc_atom.body[0], stsc0
stsc_atom = trak1.get(b'mdia', b'minf', b'stbl', b'stsc')
stsc_atom.body = stsc_atom.body[0], stsc1
stco_atom = trak0.get(b'mdia', b'minf', b'stbl', b'stco')
stco_atom.body = stss_atom.body[0], stco0
stco_atom = trak1.get(b'mdia', b'minf', b'stbl', b'stco')
stco_atom.body = stss_atom.body[0], stco1
stsz_atom = trak0.get(b'mdia', b'minf', b'stbl', b'stsz')
stsz_atom.body = stsz_atom.body[0], stsz_atom.body[1], len(stsz0), stsz0
stsz_atom = trak1.get(b'mdia', b'minf', b'stbl', b'stsz')
stsz_atom.body = stsz_atom.body[0], stsz_atom.body[1], len(stsz1), stsz1
ctts_atom = trak0.get(b'mdia', b'minf', b'stbl', b'ctts')
ctts_atom.body = ctts_atom.body[0], ctts
old_moov_size = moov.size
new_moov_size = moov.calsize()
new_mdat_start = mdats[0].body[1] + new_moov_size - old_moov_size
stco0 = list(map(lambda x: x + new_mdat_start, stco0))
stco1 = list(map(lambda x: x + new_mdat_start, stco1))
stco_atom = trak0.get(b'mdia', b'minf', b'stbl', b'stco')
stco_atom.body = stss_atom.body[0], stco0
stco_atom = trak1.get(b'mdia', b'minf', b'stbl', b'stco')
stco_atom.body = stss_atom.body[0], stco1
return moov
def merge_mp4s(files, output):
assert files
ins = [open(mp4, 'rb') for mp4 in files]
mp4s = list(map(read_mp4, ins))
moovs = list(map(lambda x: x[1], mp4s))
mdats = list(map(lambda x: x[2], mp4s))
moov = merge_moov(moovs, mdats)
mdat = merge_mdats(mdats)
with open(output, 'wb') as output:
for x in mp4s[0][0]:
if x.type == b'moov':
moov.write(output)
elif x.type == b'mdat':
mdat.write(output)
else:
x.write(output)
##################################################
# main
##################################################
# TODO: FIXME: duplicate of join_flv
def guess_output(inputs):
import os.path
inputs = map(os.path.basename, inputs)
n = min(map(len, inputs))
for i in reversed(range(1, n)):
if len(set(s[:i] for s in inputs)) == 1:
return inputs[0][:i] + '.mp4'
return 'output.mp4'
def concat_mp4(mp4s, output = None):
assert mp4s, 'no mp4 file found'
import os.path
if not output:
output = guess_output(mp4s)
elif os.path.isdir(output):
output = os.path.join(output, guess_output(mp4s))
print('Merging video parts...')
merge_mp4s(mp4s, output)
return output
def usage():
print('Usage: [python3] join_mp4.py --output TARGET.mp4 mp4...')
def main():
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:", ["help", "output="])
except getopt.GetoptError as err:
usage()
sys.exit(1)
output = None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
else:
usage()
sys.exit(1)
if not args:
usage()
sys.exit(1)
concat_mp4(args, output)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""
Usage: ssmrandom {recv|send|rawsend} [options]+ [host IP(only for recv)]+
Common options:
-h print this message
-v print version information
-g SSM multicast group (in 232.0.0.0/8)
-i local bind address
-p port
-f stay in the foreground (and log to stderr)
-F do not detatch (but otherwize act as a daemon) - useful for init
-P <pidfile>
-L <loglevel> set logging level (e.g. DEBUG)
recv options:
-s buffer size
-o ouput PIPE
send and rawsend options:
-t TTL
-s number of bytes of entropy payload
-r input entropy device
ssmramdom can be operated in either send or receive mode. In send mode it will
read data from the input entropy device and will transmit it framed (except when
using rawsend) as JSON objects on a multicast group in SSM address space. In
receive mode ssmrandom will receive a random sample (using random sleep intervals
between 1 and 20 seconds) of such SSM messages and will write the entropy
payload to a PIPE where it can be consumed by rngd from the rng-tools package.
BUGS: only ipv4 is supported
NOTE that you may need to enable igmpv3 on your network for SSM to work.
"""
from logging import StreamHandler
from ssmrandom.pidfile import PidFile
__author__ = 'leifj'
import socket
import json
import os
import base64
import logging
import getopt
import sys
import random
import time
from logging.handlers import SysLogHandler
import daemon
import lockfile
if not hasattr(socket, 'IP_MULTICAST_TTL'):
setattr(socket, 'IP_MULTICAST_TTL', 33)
if not hasattr(socket, 'IP_ADD_SOURCE_MEMBERSHIP'):
setattr(socket, 'IP_ADD_SOURCE_MEMBERSHIP', 39)
VERSION = "0.3"
PROTOCOL_VERSION = "1.0"
SSM_GROUP = '232.0.1.100'
SSM_PORT = '49999'
ENTROPY_DEVICE='/dev/urandom'
RNGD_PIPE = "/var/run/ssm-rng-pipe"
BUFSZ= "4096"
MSGSZ = "1024"
LOGLEVEL = "WARNING"
MCTTL = "32"
PIDFILE = '/var/run/ssmrandom.pid'
def _setup_logging(level,foreground=False):
loglevel = getattr(logging, level.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: %s' % loglevel)
if foreground:
handler = StreamHandler()
else:
handler = SysLogHandler(address='/dev/log',facility=SysLogHandler.LOG_DAEMON)
pid = os.getpid()
formatter = logging.Formatter('ssmrandom['+str(pid)+'] %(message)s')
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logging.root.setLevel(loglevel)
def usage():
print __doc__
def _sender(s,group,bufsz,src,level,foreground):
_setup_logging(level,foreground)
with open(src) as fd:
logging.info("entropy SSM transmitter v%s starting..." % VERSION)
while True:
try:
logging.debug("about to read from %s" % src)
d = fd.read(bufsz)
if sys.argv[1] == 'send':
e = base64.b64encode(d)
msg = {'v': PROTOCOL_VERSION, 's': src, 'd': e}
s.send(json.dumps(msg))
else: # rawsend
s.send(d)
logging.debug("sending %d bytes of entropy to SSM:@%s" % (len(d), group))
except KeyboardInterrupt,ex:
raise ex
except Exception, ex:
logging.warning(ex)
pass
def _receiver(s,group,bufsz,dst,level,foreground):
_setup_logging(level,foreground)
with open(dst, "w+") as fd:
logging.info("entropy SSM receiver v%s starting..." % VERSION)
while True:
try:
msg = json.loads(s.recv(bufsz))
data = base64.b64decode(msg['d'])
logging.debug(msg)
logging.info("sending %d bytes of entropy from SSM:@%s upstream" % (len(data), group))
fd.write(data)
z = random.randint(1, 20)
logging.debug("sleeping for %d seconds..." % z)
time.sleep(z)
except KeyboardInterrupt,ex:
raise ex
except Exception, ex:
logging.warning(ex)
time.sleep(1)
pass
def main():
try:
_main()
except KeyboardInterrupt:
sys.exit()
def _main():
opts = {}
args = []
flags = None
if len(sys.argv) < 2:
usage()
sys.exit(2)
if sys.argv[1] in ('recv'):
flags = 'vfFhL:P:g:s:i:p:o:'
elif sys.argv[1] in ('send','rawsend'):
flags = 'vfFhL:P:g:s:t:p:r:'
else:
usage()
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[2:], flags)
opts = dict(opts)
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
if '-h' in opts:
usage()
sys.exit()
if '-v' in opts:
print "ssmrandom version %s (c) NORDUnet A/S 2012" % VERSION
sys.exit()
opts.setdefault('-i','0.0.0.0')
opts.setdefault('-p',SSM_PORT)
opts.setdefault('-o',RNGD_PIPE)
opts.setdefault('-g',SSM_GROUP)
opts.setdefault('-L',LOGLEVEL)
opts.setdefault('-r',ENTROPY_DEVICE)
opts.setdefault('-L',LOGLEVEL)
opts.setdefault('-t',MCTTL)
opts.setdefault('-P',PIDFILE)
context = None
if not '-f' in opts:
context = daemon.DaemonContext(working_directory='/tmp')
context.pidfile = PidFile(opts['-P'])
if sys.argv[1] == 'recv':
group = opts['-g']
port = int(opts['-p'])
opts.setdefault('-s',BUFSZ)
if len(args) < 1:
usage()
sys.exit(2)
dst = opts['-o']
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
for host in args:
name,aliases,addrs = socket.gethostbyaddr(host)
imr = None
for addr in addrs:
if ':' in addr:
pass
else:
imr = (socket.inet_pton(socket.AF_INET, group) +
socket.inet_pton(socket.AF_INET, opts['-i']) +
socket.inet_pton(socket.AF_INET, addr))
if imr is not None:
s.setsockopt(socket.SOL_IP, socket.IP_ADD_SOURCE_MEMBERSHIP, imr)
s.bind((group,port))
if not os.path.exists(dst):
os.mkfifo(dst)
if context is not None:
context.files_preserve=[s]
if '-F' in opts:
context.detach_process = False
with context as ctx:
_receiver(s,group,int(opts['-s']),dst,opts['-L'],False)
else:
_receiver(s,group,int(opts['-s']),dst,opts['-L'],True)
elif sys.argv[1] == 'send' or sys.argv[1] == 'rawsend':
opts.setdefault('-s',MSGSZ)
group = opts['-g']
port = int(opts['-p'])
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if '-t' in opts:
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, chr(int(opts['-t'])))
if '-i' in opts:
s.bind((opts['-i'], 0))
s.connect((group,port))
if context is not None:
context.files_preserve=[s]
if '-F' in opts:
context.detach_process = False
with context as ctx:
_sender(s,group,int(opts['-s']),opts['-r'],opts['-L'],False)
else:
_sender(s,group,int(opts['-s']),opts['-r'],opts['-L'],True)
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for maml_bbb.py."""
from __future__ import print_function
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.layers.python import layers as tf_layers
FLAGS = flags.FLAGS
## Network helpers
def conv_block(x, weight, bias, reuse, scope):
# conv
x = tf.nn.conv2d(x, weight, [1, 1, 1, 1], 'SAME') + bias
# batch norm
x = tf_layers.batch_norm(
x, activation_fn=tf.nn.relu, reuse=reuse, scope=scope)
# # pooling
# x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
return x
## Loss functions
def mse(pred, label):
pred = tf.reshape(pred, [-1])
label = tf.reshape(label, [-1])
return tf.reduce_mean(tf.square(pred - label))
class MAML(object):
"""MAML algo object."""
def __init__(self, encoder_w, dim_input=1, dim_output=1):
"""Must call construct_model() after initializing MAML."""
self.beta = tf.placeholder_with_default(FLAGS.beta, ())
self.dim_input = dim_input
self.dim_output = dim_output
self.update_lr = FLAGS.update_lr
self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
self.loss_func = mse
self.encoder_w = encoder_w
self.dim_hidden = FLAGS.num_filters
self.forward = self.forward_conv
self.construct_weights = self.construct_conv_weights
self.channels = 1
self.img_size = int(np.sqrt(self.dim_input / self.channels))
def construct_model(self,
input_tensors=None,
prefix='metatrain_',
test_num_updates=0):
"""a: training data for inner gradient, b: test data for meta gradient."""
self.inputa = input_tensors['inputa']
self.inputb = input_tensors['inputb']
self.labela = input_tensors['labela']
self.labelb = input_tensors['labelb']
with tf.variable_scope('model', reuse=None) as training_scope:
if 'weights' in dir(self):
training_scope.reuse_variables()
weights = self.weights
else:
# Define the weights
self.weights = weights = self.construct_weights()
# outputbs[i] and lossesb[i] is the output and loss after i+1 gradient
# updates
num_updates = max(test_num_updates, FLAGS.num_updates)
def task_metalearn(inp, reuse=True):
"""Run meta learning."""
TRAIN = 'train' in prefix # pylint: disable=invalid-name
# Perform gradient descent for one task in the meta-batch.
inputa, inputb, labela, labelb = inp
task_outputbs, task_lossesb = [], []
task_msesb = []
# support_pred and loss, (n_data_per_task, out_dim)
task_outputa = self.forward(
inputa, weights, reuse=reuse) # only not reuse on the first iter
# labela is (n_data_per_task, out_dim)
task_lossa = self.loss_func(task_outputa, labela)
# INNER LOOP (no change with ib)
grads = tf.gradients(task_lossa, list(weights.values()))
if FLAGS.stop_grad:
grads = [tf.stop_gradient(grad) for grad in grads]
gradients = dict(zip(weights.keys(), grads))
# theta_pi = theta - alpha * grads
fast_weights = dict(
zip(weights.keys(), [
weights[key] - self.update_lr * gradients[key]
for key in weights.keys()
]))
# use theta_pi to forward meta-test
output = self.forward(inputb, weights, reuse=True)
task_outputbs.append(output)
# meta-test loss
task_kl_loss = sum(self.encoder_w.losses)
task_msesb.append(self.loss_func(output, labelb))
task_lossesb.append(
self.loss_func(output, labelb) + self.beta * task_kl_loss)
def while_body(fast_weights_values):
"""Update params."""
loss = self.loss_func(
self.forward(
inputa,
dict(zip(fast_weights.keys(), fast_weights_values)),
reuse=True), labela)
grads = tf.gradients(loss, fast_weights_values)
fast_weights_values = [
v - self.update_lr * g for v, g in zip(fast_weights_values, grads)
]
return fast_weights_values
fast_weights_values = tf.while_loop(
lambda _: True,
while_body,
loop_vars=[fast_weights.values()],
maximum_iterations=num_updates - 1,
back_prop=TRAIN)
fast_weights = dict(zip(fast_weights.keys(), fast_weights_values))
output = self.forward(inputb, fast_weights, reuse=True)
task_outputbs.append(output)
task_msesb.append(self.loss_func(output, labelb))
task_lossesb.append(
self.loss_func(output, labelb) + self.beta * task_kl_loss)
task_output = [
task_outputa, task_outputbs, task_lossa, task_lossesb, task_msesb
]
return task_output
if FLAGS.norm is not None:
# to initialize the batch norm vars, might want to combine this, and
# not run idx 0 twice.
_ = task_metalearn(
(self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]),
False)
out_dtype = [
tf.float32, [tf.float32] * 2, tf.float32, [tf.float32] * 2,
[tf.float32] * 2
]
result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, \
self.labela, self.labelb), dtype=out_dtype, \
parallel_iterations=FLAGS.meta_batch_size)
outputas, outputbs, lossesa, lossesb, msesb = result
## Performance & Optimization
if 'train' in prefix:
# lossesa is length(meta_batch_size)
self.total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(
FLAGS.meta_batch_size)
self.total_losses2 = total_losses2 = [
tf.reduce_sum(msesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(len(msesb))
]
self.total_losses3 = total_losses3 = [
tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(len(lossesb))
]
# after the map_fn
self.outputas, self.outputbs = outputas, outputbs
# OUTER LOOP
if FLAGS.metatrain_iterations > 0:
optimizer = tf.train.AdamOptimizer(self.meta_lr)
THETA = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model') # pylint: disable=invalid-name
PHI = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoder') # pylint: disable=invalid-name
self.gvs_theta = gvs_theta = optimizer.compute_gradients(
self.total_losses2[-1], THETA)
metatrain_theta_op = optimizer.apply_gradients(gvs_theta)
self.gvs_phi = gvs_phi = optimizer.compute_gradients(
self.total_losses3[-1], PHI)
metatrain_phi_op = optimizer.apply_gradients(gvs_phi)
with tf.control_dependencies([metatrain_theta_op, metatrain_phi_op]):
self.metatrain_op = tf.no_op()
scale_v = [
v for v in self.encoder_w.trainable_variables if 'scale' in v.name
]
scale_norm = [tf.reduce_mean(v) for v in scale_v]
scale_norm = tf.reduce_mean(scale_norm)
tf.summary.scalar(prefix + 'full_loss', total_losses3[-1])
tf.summary.scalar(prefix + 'regularizer',
total_losses3[-1] - total_losses2[-1])
tf.summary.scalar(prefix + 'untransformed_scale', scale_norm)
else:
self.metaval_total_loss1 = tf.reduce_sum(
lossesa) / tf.to_float(FLAGS.meta_batch_size)
self.metaval_total_losses2 = total_losses2 = [
tf.reduce_sum(msesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(len(msesb))
]
self.metaval_total_losses3 = total_losses3 = [
tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(len(lossesb))
]
tf.summary.scalar(prefix + 'Pre-mse', total_losses2[0])
tf.summary.scalar(prefix + 'Post-mse_' + str(num_updates),
total_losses2[-1])
def construct_conv_weights(self):
"""Construct conv weights."""
weights = {}
dtype = tf.float32
conv_initializer = contrib_layers.xavier_initializer_conv2d(dtype=dtype)
k = 3
weights['conv1'] = tf.get_variable(
'conv1', [k, k, self.channels, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv2'] = tf.get_variable(
'conv2', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b2'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv3'] = tf.get_variable(
'conv3', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b3'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv4'] = tf.get_variable(
'conv4', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b4'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['w5'] = tf.Variable(
tf.random_normal([self.dim_hidden, self.dim_output]), name='w5')
weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
return weights
def forward_conv(self, inp, weights, reuse=False, scope=''):
"""Forward conv."""
# reuse is for the normalization parameters.
channels = self.channels
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, channels])
hidden1 = conv_block(inp, weights['conv1'], weights['b1'], reuse,
scope + '0')
hidden2 = conv_block(hidden1, weights['conv2'], weights['b2'], reuse,
scope + '1')
hidden3 = conv_block(hidden2, weights['conv3'], weights['b3'], reuse,
scope + '2')
hidden4 = conv_block(hidden3, weights['conv4'], weights['b4'], reuse,
scope + '3')
# last hidden layer is 6x6x64-ish, reshape to a vector
hidden4 = tf.reduce_mean(hidden4, [1, 2])
return tf.matmul(hidden4, weights['w5']) + weights['b5']
|
|
import sublime
import sublime_plugin
from collections import OrderedDict
from operator import itemgetter
from datetime import datetime
from time import time
from bisect import bisect
from zipfile import ZipFile
from tempfile import mkstemp
import stat
import os
import threading
import subprocess
import sys
import re
from ..lib.packages import PackageInfo, PackageList, PackageFileSet
from ..lib.packages import override_display, check_potential_override
from ..lib.packages import find_zip_entry, check_potential_override
from ..lib.output_view import output_to_view
from ..lib.threads import BackgroundWorkerThread
from ..lib.utils import SettingsGroup
###----------------------------------------------------------------------------
# A group of view settings that indicate that a view is either an override or a
# diff of one. The settings indicate what package and override the contents of
# the buffer represents.
override_group = SettingsGroup("override_audit_package",
"override_audit_override",
"override_audit_diff")
###----------------------------------------------------------------------------
def loaded():
"""
Initialize plugin state.
"""
log("Initializing")
oa_setting.obj = sublime.load_settings("OverrideAudit.sublime-settings")
oa_setting.default = {
"reuse_views": True,
"clear_existing": True,
"ignore_overrides_in": [],
"diff_unchanged": "diff",
"diff_context_lines": 3,
"diff_empty_hdr": False,
"save_on_diff": False,
"confirm_deletion": True,
"confirm_freshen": True,
"confirm_revert": True,
"report_on_unignore": True,
"external_diff": False,
"ignore_unknown_overrides": [
"^\\.git/",
"^\\.svn/",
"^\\.hg/"
],
"mini_diff_underlying": True,
# This is currently undocumented and may go away in the future.
"enable_hover_popup": True,
# Inherits from user preferences
"binary_file_patterns": None
}
# Restore the diff in any open overrides; this also cleans any views that
# used to be overrides but no longer aren't (e.g. if the sublime-package
# file was deleted while the plugin was not loaded).
for window in sublime.windows():
for view in window.views():
setup_override_minidiff(view)
AutoReportTrigger()
def unloaded():
"""
Clean up state before unloading.
"""
log("Shutting down")
AutoReportTrigger.unregister()
def log(message, *args, status=False, dialog=False):
"""
Simple logging method; writes to the console and optionally also the status
message as well.
"""
message = message % args
print("OverrideAudit:", message)
if status:
sublime.status_message(message)
if dialog:
sublime.message_dialog(message)
def oa_syntax(file):
"""
Return the full name of an Override Audit syntax based on the short name.
"""
return "Packages/OverrideAudit/resources/syntax/%s.sublime-syntax" % file
def oa_setting(key):
"""
Get an OverrideAudit setting from a cached settings object.
"""
default = oa_setting.default.get(key, None)
return oa_setting.obj.get(key, default)
def oa_can_diff_externally():
"""
Determine if the external diff functionality should be enabled. This is
based on an introspection of the external_diff setting.
"""
spec = oa_setting("external_diff")
if not spec:
return False
if isinstance(spec, bool):
return False
if isinstance(spec, dict):
return True
if isinstance(spec, str):
if spec == "sublimerge":
# Both Sublimerge Pro and Sublimerge 3 include a top level resource
# by this name that contains their version.
for res in sublime.find_resources("version"):
if res.split("/")[1] in ("Sublimerge 3", "Sublimerge Pro"):
return True
return False
return False
def get_ignore_unknown_patterns():
"""
Fetch the value of the setting that tells us what unknown overrides we
should ignore in reports. The regular expressions from the settings file
(if any) are compiled in the returned list.
When the setting is a boolean, the result is either an empty list or a list
with a regular expression that will match everything, depending on the
state of the boolean.
"""
pattern_list = oa_setting("ignore_unknown_overrides")
# Only be case sensitive on Linux where the file system is case sensitive
re_opts = 0 if sublime.platform() == "linux" else re.IGNORECASE
patterns = []
if isinstance(pattern_list, bool):
return [re.compile(r'.')] if pattern_list else []
# Invalid regex patterns are ignored with a warning
for regex in pattern_list:
try:
patterns.append(re.compile(regex, re_opts))
except Exception as e:
log("Invalid ignore_unknown_overrides regex '%s': %s",
regex, str(e), status=True)
return patterns
def packages_with_overrides(pkg_list, name_list=None):
"""
Collect a list of package names from the given package list for which there
is at least a single (simple) override file and which is not in the list of
packages to ignore overrides in.
Optionally, if name_list is provided, the list of package names will be
filtered to only include packages whose name also exists in the name list.
"""
ignored = oa_setting("ignore_overrides_in")
items = [name for name, pkg in pkg_list if len(pkg.override_files()) > 0
and name not in ignored]
if name_list is not None:
items = list(filter(lambda name: name in name_list, items))
return items
def decorate_pkg_name(pkg_info, name_only=False):
"""
Decorate the name of the provided package with a prefix that describes its
status and optionally also a suffix if it is a complete override or is
expired.
"""
suffix = ""
pkg_name = pkg_info.name
if pkg_info.is_disabled:
pkg_name = "[%s]" % pkg_name
elif pkg_info.is_dependency:
pkg_name = "<%s>" % pkg_name
if name_only:
return pkg_name
if pkg_info.has_possible_overrides(simple=False):
suffix += " <Complete Override>"
if bool(pkg_info.expired_override_files(simple=False)):
suffix += " [EXPIRED]"
return "[{}{}{}] {}{}".format(
"S" if pkg_info.shipped_path is not None else " ",
"I" if pkg_info.installed_path is not None else " ",
"U" if pkg_info.unpacked_path is not None else " ",
pkg_name,
suffix)
def setup_override_minidiff(view):
"""
Check the view provided to see if it represents an edit session on a
package resource that is an override. If it isn't, or if the settings are
not set to indicate that the user wants the mini diff, this does nothing.
Otherwise, it will set up the reference document for this override to track
the base file.
"""
settings = sublime.load_settings("Preferences.sublime-settings")
mini_diff = settings.get("mini_diff")
mini_diff_underlying = oa_setting("mini_diff_underlying") and mini_diff is True
filename = view.file_name()
if (not mini_diff_underlying or
filename is None or not filename.startswith(sublime.packages_path()) or
not os.path.isfile(filename)):
return
result = check_potential_override(filename, deep=True, get_content=mini_diff_underlying)
if result is not None:
override_group.apply(view, result[0], result[1], False)
if result[2] is not None:
view.set_reference_document(result[2])
else:
override_group.remove(view)
def open_override(window, pkg_name, override):
"""
Open the provided override from the given package name.
"""
filename = os.path.join(sublime.packages_path(), pkg_name, override)
window.open_file(filename)
def delete_override(window, pkg_name, override):
"""
Delete the provided override from the given package name.
"""
# Import send2trash on demand; see Default/side_bar.py.
import Default.send2trash as send2trash
confirm = oa_setting("confirm_deletion")
relative_name = os.path.join(pkg_name, override)
full_name = os.path.join(sublime.packages_path(), relative_name)
if os.path.isfile(full_name):
if confirm:
msg = "Confirm deletion:\n\n{}".format(
override_display(relative_name))
if (confirm is False or
sublime.yes_no_cancel_dialog(msg) == sublime.DIALOG_YES):
send2trash.send2trash(full_name)
log("Deleted %s", relative_name, status=True)
def freshen_override(view, package, override=None):
"""
Touch either the explicitly specified override in the provided package or
all expired overrides in the package.
"""
if oa_setting("confirm_freshen"):
target = "Expired overrides in '%s'" % package
if override is not None:
relative_name = os.path.join(package, override)
target = override_display(relative_name)
msg = "Confirm freshen:\n\n{}".format(target)
if sublime.yes_no_cancel_dialog(msg) != sublime.DIALOG_YES:
return
callback = lambda thread: log(thread.result, status=True)
OverrideFreshenThread(view.window(), "Freshening Files", callback,
package=package, override=override, view=view).start()
def diff_override(window, pkg_info, override,
diff_only=False, force_reuse=False):
"""
Generate a diff for the given package and override in a background thread,
"""
context_lines = oa_setting("diff_context_lines")
action = "diff" if diff_only else oa_setting("diff_unchanged")
empty_diff_hdr = oa_setting("diff_empty_hdr")
if force_reuse:
reuse, clear = True, True
else:
reuse = oa_setting("reuse_views")
clear = oa_setting("clear_existing")
def _process_diff(thread):
diff = thread.diff
if diff is None:
return log("Unable to diff %s/%s\n\n"
"Error loading file contents of one or both files.\n"
"Check the console for more information",
pkg_info.name, override, dialog=True)
if diff.is_empty:
log("No changes detected in %s/%s", pkg_info.name, override,
status=True)
if action == "open":
return open_override(window, pkg_info.name, override)
elif action == "ignore":
return
title = "Diff of %s" % override_display(
os.path.join(pkg_info.name, override))
result = diff.result
prefix = diff.hdr if diff.is_empty and empty_diff_hdr else ""
content = prefix + "No differences found" if result == "" else result
view = output_to_view(window, title, content, reuse, clear,
"Packages/Diff/Diff.tmLanguage")
override_group.apply(view, pkg_info.name, override, True)
callback = lambda thread: _process_diff(thread)
OverrideDiffThread(window, "Diffing Override", callback,
pkg_info=pkg_info, override=override).start()
def filter_unmodified_overrides(pkg_info, overrides):
"""
Given a list of overrides from a particular package, return a copy of the
list that's filtered so that any overrides that have not been changed from
the underlying file are removed.
"""
for override in overrides:
result = pkg_info.override_diff(override, 1)
if result.is_empty:
overrides.remove(override)
return overrides
def diff_externally(window, pkg_info, override):
"""
Launch the configured external diff tool to diff the override from the
given package info. The task is launched in a background thread.
"""
base_file = None
override_file = None
if pkg_info.exists():
base_file = extract_packed_override(pkg_info, override)
override_file = os.path.join(pkg_info.unpacked_path, override)
diff_args = oa_setting("external_diff")
if None in (base_file, override_file):
return log("Unable to externally diff %s/%s\n\n"
"Error loading file contents of one or both files.\n"
"Check the console for more information",
pkg_info.name, override, dialog=True)
if diff_args == "sublimerge":
diff_with_sublimerge(base_file, override_file)
else:
callback = lambda thread: log(thread.result, status=True)
DiffExternallyThread(window, "Launching external diff", callback,
diff_args=diff_args,
base=base_file, override=override_file).start()
def diff_with_sublimerge(base_file, override_file):
"""
Use Sublimerge 3 or Sublimerge Pro to diff the override against its base
file. This assumes that one of those packages is installed and enabled
(the command is not visible otherwise).
"""
sublime.run_command("new_window")
window = sublime.active_window()
window.open_file(base_file).settings().set("_oa_ext_diff_base", base_file)
window.open_file(override_file)
window.run_command("sublimerge_diff_views", {
"left_read_only": True,
"right_read_only": False,
})
def revert_override(window, pkg_info, override):
if oa_setting("confirm_revert"):
target = override_display(os.path.join(pkg_info.name, override))
msg = (
"Are you sure you want to continue?\n\n" +
"The current content of this override will be permanently lost; " +
"you can't undo this operation.\n\n" +
"Confirm revert:\n\n{}".format(target))
if sublime.yes_no_cancel_dialog(msg) != sublime.DIALOG_YES:
return
callback = lambda thread: log(thread.result, status=True)
OverrideRevertThread(window, "Reverting File", callback,
pkg_info=pkg_info, override=override).start()
def find_override(view, pkg_name, override):
"""
Given a report view, return the bounds of the override belonging to the
given package. Returns None if the position cannot be located.
"""
if not view.match_selector(0, "text.override-audit"):
return None
bounds = None
packages = view.find_by_selector("entity.name.package")
for index, pkg_pos in enumerate(packages):
if view.substr(pkg_pos) == pkg_name:
end_pos = view.size()
if index + 1 < len(packages):
end_pos = packages[index + 1].begin() - 1
bounds = sublime.Region(pkg_pos.end() + 1, end_pos)
break
if bounds is None:
return
overrides = view.find_by_selector("entity.name.filename.override")
for file_pos in overrides:
if bounds.contains(file_pos) and view.substr(file_pos) == override:
return file_pos
return None
def extract_packed_override(pkg_info, override):
"""
Given a package information structure for a package and an override inside
of that packages, this determines the package file that the base file is
contained in and extracts it to a temporary file, whose name is returned.
"""
override_type, contents = pkg_info.packed_override_contents(override, as_list=False)
if override_type is None:
return log("Unable to extract %s/%s; unable to locate base file",
pkg_info.name, override)
name,ext = os.path.splitext(override)
prefix = "{pre}_{pkg}_{name}_".format(
pre=override_type,
pkg=pkg_info.name,
name=name.replace("/", "_")
)
try:
fd, base_name = mkstemp(prefix=prefix, suffix=ext)
os.chmod(base_name, stat.S_IREAD)
os.write(fd, contents.encode("utf-8"))
os.close(fd)
return base_name
except Exception as err:
return log("Error creating temporary file for %s/%s: %s",
pkg_info.name, override, str(err))
def delete_packed_override(filename):
"""
Attempt to delete the given named file, which should be a file returned
from extract_packed_override().
"""
try:
if os.path.exists(filename):
os.chmod(filename, stat.S_IREAD | stat.S_IWRITE)
os.remove(filename)
log("Deleted temporary file '%s'", filename)
except:
log("Error deleting '%s'", filename)
def setup_new_override_view(view, reposition=True):
"""
Given a view that represents a potential new override, set it up so that
our event handler will create an override on save. This presumes that the
view passed in is read-only; it will be marked as non-read-only once it is
finished loading. If the mini_diff setting is turned on, the reference
document will be set to the content of the buffer when this is called.
When reposition is True, the cursor is jumped to the start of the file, as
if the user just opened it from disk. Otherwise the cursor is left wherever
it was in the view to begin with.
"""
view.settings().set("_oa_is_new_override", True)
if view.is_loading():
return sublime.set_timeout(lambda: setup_new_override_view(view), 10)
settings = sublime.load_settings("Preferences.sublime-settings")
mini_diff = settings.get("mini_diff")
# File is left as a scratch buffer until the first modification
if reposition:
view.run_command("move_to", {"to": "bof"})
view.set_read_only(False)
# Sublime turns off mini_diff for packed files that it opens.
if mini_diff is True:
view.settings().set("mini_diff", mini_diff)
reference_doc = view.substr(sublime.Region(0, len(view)))
view.set_reference_document(reference_doc)
###----------------------------------------------------------------------------
class AutoReportTrigger():
"""
A simple singleton class for running an automated expired updates report
whenever a package is removed from the ignored packages list or at startup
when the build number of Sublime has changed.
"""
instance = None
def __init__(self):
if AutoReportTrigger.instance is not None:
return
AutoReportTrigger.instance = self
self.settings = sublime.load_settings("Preferences.sublime-settings")
ignored = self.settings.get("ignored_packages", [])
self.cached_ignored = PackageFileSet(ignored)
self.removed = PackageFileSet()
self.settings.add_on_change("_oa_sw", lambda: self.__settings_change())
self.__load_status()
@classmethod
def unregister(cls):
if AutoReportTrigger.instance is not None:
AutoReportTrigger.instance.settings.clear_on_change("_oa_sw")
AutoReportTrigger.instance = None
def __load_status(self):
self.last_build = "0"
self.force_report = False
self.status_file = os.path.join(sublime.packages_path(), "User",
"OverrideAudit.status")
if os.path.isfile(self.status_file):
with open(self.status_file) as file:
line = file.readline().split(",")
try:
self.last_build = line[0]
self.force_report = line[1] == "True"
except IndexError:
pass
if self.last_build == sublime.version() and self.force_report == False:
log("Sublime version is unchanged; skipping automatic report")
return
if self.last_build != sublime.version():
if self.last_build == "0":
reason = "Initial plugin installation"
else:
reason = "Sublime version has changed"
elif self.force_report:
reason = "Sublime restarted during a package upgrade"
log(reason + "; generating automatic report")
sublime.set_timeout(lambda: self.__execute_auto_report(), 1000)
def __save_status(self, force):
with open(self.status_file, "w") as file:
file.write("%s,%s" % (sublime.version(), force))
def __execute_auto_report(self):
self.__save_status(False)
self.removed = PackageFileSet()
window = sublime.active_window()
window.run_command("override_audit_override_report",
{"only_expired": True, "ignore_empty": True})
def __check_removed(self, removed_set):
if removed_set != self.removed:
return
self.__execute_auto_report()
def __settings_change(self):
new_list = PackageFileSet(self.settings.get("ignored_packages", []))
if new_list == self.cached_ignored:
return
removed = self.cached_ignored - new_list
added = new_list - self.cached_ignored
self.cached_ignored = new_list
if not oa_setting("report_on_unignore"):
return
self.removed |= removed
self.removed -= added
if len(self.removed) != 0:
self.__save_status(True)
# Send a copy of the list so we can detect if the list changes
# in the interim.
current = PackageFileSet(self.removed)
sublime.set_timeout(lambda: self.__check_removed(current), 1000)
else:
self.__save_status(False)
###----------------------------------------------------------------------------
class PackageListCollectionThread(BackgroundWorkerThread):
"""
Collect the list of packages in a background thread. The collection can
optionally filter the list returned to only a set of names given and can
also optionally pre-fetch the list of overrides in found packages.
"""
def _process(self):
self.pkg_list = PackageList(self.args.get("name_list", None))
if self.args.get("get_overrides", False) is True:
packages_with_overrides(self.pkg_list)
###----------------------------------------------------------------------------
class OverrideDiffThread(BackgroundWorkerThread):
"""
Diff a specific package override in a background thread.
"""
def _process(self):
context_lines = oa_setting("diff_context_lines")
pkg_info = self.args.get("pkg_info", None)
override = self.args.get("override", None)
if not pkg_info or not override:
self.diff = None
return log("diff thread not given a package or override to diff")
# Only need to do this if the user has a specific setting
binary_patterns = oa_setting("binary_file_patterns")
if binary_patterns is not None:
pkg_info.set_binary_pattern(binary_patterns)
self.diff = pkg_info.override_diff(override, context_lines,
binary_result="<File is binary>")
###----------------------------------------------------------------------------
# TODO Maybe this shouldn't freshen things that are not currently expired?
# Currently it will if you explicitly tell it to.
class OverrideFreshenThread(BackgroundWorkerThread):
"""
Touch either the explicitly specified override in the provided package or
all expired overrides in the package.
"""
def _touch_override(self, view, zFile, pkg_name, override):
new_mtime = None
now = time()
fname = os.path.join(sublime.packages_path(), pkg_name, override)
try:
entry = find_zip_entry(zFile, override)
zTime = datetime(*entry.date_time).timestamp()
if zTime > now:
log("Warning: The packaged '%s/%s' file is from the future" ,
pkg_name, override)
new_mtime = (now, zTime + 1)
with os.fdopen(os.open(fname, os.O_RDONLY)) as f:
os.utime(f.fileno() if os.utime in os.supports_fd else fname,
new_mtime)
# TODO: This command could take a list of overrides in the package
# and handle them all at once.
view.run_command("override_audit_modify_mark", {
"package": pkg_name,
"override": override
})
return True
except:
return False
def _msg(self, pkg_name, override, success):
prefix = "Freshened" if success else "Unable to freshen"
return "%s '%s/%s'" % (prefix, pkg_name, override)
def _clean_package(self, view, pkg_name):
pkg_list = view.settings().get("override_audit_expired_pkgs", [])
if pkg_name in pkg_list:
pkg_list.remove(pkg_name)
view.settings().set("override_audit_expired_pkgs", pkg_list)
def _single(self, view, zFile, pkg_info, override):
result = self._touch_override(view, zFile, pkg_info.name, override)
if result and not pkg_info.expired_override_files(simple=True):
self._clean_package(view, pkg_info.name)
return self._msg(pkg_info.name, override, result)
def _pkg(self, view, zFile, pkg_info):
count = 0
pkg_name = pkg_info.name
expired_list = pkg_info.expired_override_files(simple=True)
for expired_name in expired_list:
result = self._touch_override(view, zFile, pkg_name, expired_name)
log(self._msg(pkg_name, expired_name, result))
if result:
count += 1
if count == len(expired_list):
prefix = "All"
self._clean_package(view, pkg_name)
else:
prefix = "%d of %d" % (count, len(expired_list))
return "%s expired overrides freshened in '%s'" % (prefix, pkg_name)
def _process(self):
view = self.args.get("view", None)
package = self.args.get("package", None)
override = self.args.get("override", None)
if not view or not package:
self.result = "Nothing done; missing parameters"
return log("freshen thread not given a view or package")
pkg_info = PackageInfo(package)
if not pkg_info.exists():
self.result = "Unable to freshen '%s'; no such package" % package
return
if not pkg_info.package_file():
self.result = "Unable to freshen '%s'; no overrides" % package
return
try:
with ZipFile(pkg_info.package_file()) as zFile:
if override is not None:
self.result = self._single(view, zFile, pkg_info, override)
else:
self.result = self._pkg(view, zFile, pkg_info)
except Exception as e:
self.result = "Error while freshening: %s" % str(e)
###----------------------------------------------------------------------------
class OverrideRevertThread(BackgroundWorkerThread):
"""
Revert the explicitly specified override in the provided package back to
it's initial unpacked state.
"""
def _process(self):
pkg_info = self.args.get("pkg_info", None)
override = self.args.get("override", None)
if not pkg_info or not override:
self.result = "Nothing done; missing parameters"
return log("revert thread not given a package or override")
if not pkg_info.exists():
self.result = "Unable to revert '%s'; no such package" % package
return
if not pkg_info.package_file():
self.result = "Unable to revert '%s'; no overrides" % package
return
try:
fname = os.path.join(sublime.packages_path(), pkg_info.name, override)
o_type, contents = pkg_info.packed_override_contents(override, as_list=False)
with open(fname, 'wb') as file:
file.write(contents.encode("utf-8"))
self.result = "Reverted '%s/%s'" % (pkg_info.name, override)
except Exception as e:
self.result = "Error while reverting: %s" % str(e)
###----------------------------------------------------------------------------
class ReportGenerationThread(BackgroundWorkerThread):
"""
Helper base class for generating a report in a background thread.
"""
def __init__(self, window, spinner_text, current_view, **kwargs):
super().__init__(window, spinner_text,
lambda thread: self._display_report(thread),
**kwargs)
self.current_view = current_view
def _generation_time(self):
return datetime.now().strftime("Report Generated: %Y-%m-%d %H:%M:%S\n")
def _display_report(self, thread):
# Some reports don't call _set_content if they are empty
if not hasattr(self, "content"):
return
force_reuse = self.args.get("force_reuse", False)
reuse = True if force_reuse else oa_setting("reuse_views")
clear = True if force_reuse else oa_setting("clear_existing")
view = output_to_view(self.window, self.caption, self.content,
reuse, clear, self.syntax,
current_view=self.current_view)
view.settings().set("override_audit_report_type", self.report_type)
if self.settings is not None:
for setting,value in self.settings.items():
view.settings().set(setting, value)
view.run_command("move_to", {"to": "bof"})
def _set_content(self, caption, content, report_type, syntax,
settings=None):
self.caption = caption
self.content = content
self.report_type = report_type
self.syntax = syntax
self.settings = settings
###----------------------------------------------------------------------------
class DiffExternallyThread(BackgroundWorkerThread):
"""
Spawn a diff in an external process, waiting for it to complete and then
cleaning up any temporary files.
"""
def _launch(self, base, override, diff_args):
shell_cmd = diff_args.get("shell_cmd")
env = diff_args.get("env", {})
working_dir = diff_args.get("working_dir", "")
if not shell_cmd:
raise ValueError("shell_cmd is required")
if not isinstance(shell_cmd, str):
raise ValueError("shell_cmd must be a string")
variables = self.window.extract_variables()
variables["base"] = base
variables["override"] = override
# Don't expand vars in env; we let python do that for us.
shell_cmd = sublime.expand_variables(shell_cmd, variables)
working_dir = sublime.expand_variables(working_dir, variables)
if working_dir == "" and self.window.active_view():
path = os.path.dirname(self.window.active_view().file_name() or "")
if os.path.isdir(path):
working_dir = path
log("Running %s", shell_cmd)
# Hide the console window on Windows
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process_env = os.environ.copy()
process_env.update(env)
for var, value in process_env.items():
process_env[var] = os.path.expandvars(value)
# Might not exist, but that is a user error. We checked before auto
# changing it.
if working_dir != "":
os.chdir(working_dir)
if sys.platform == "win32":
# Use shell=True on Windows, so shell_cmd is passed through with the correct escaping
self.proc = subprocess.Popen(
shell_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
stdin=subprocess.DEVNULL,
startupinfo=startupinfo,
env=process_env,
shell=True)
elif sys.platform == "darwin":
# Use a login shell on OSX, otherwise the users expected env vars won't be setup
self.proc = subprocess.Popen(
["/usr/bin/env", "bash", "-l", "-c", shell_cmd],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
stdin=subprocess.DEVNULL,
startupinfo=startupinfo,
env=process_env,
shell=False)
elif sys.platform == "linux":
# Explicitly use /bin/bash on Linux, to keep Linux and OSX as
# similar as possible. A login shell is explicitly not used for
# linux, as it's not required
self.proc = subprocess.Popen(
["/usr/bin/env", "bash", "-c", shell_cmd],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
stdin=subprocess.DEVNULL,
startupinfo=startupinfo,
env=process_env,
shell=False)
def _prepare_args_dict(self, args):
prepared = args.copy()
osx = prepared.pop("osx", {})
linux = prepared.pop("linux", {})
windows = prepared.pop("windows", {})
prepared.update({
"osx": osx,
"linux": linux,
"windows": windows
}[sublime.platform()]
)
return prepared
def _process(self):
base = self.args.get("base", None)
override = self.args.get("override", None)
diff_args = self.args.get("diff_args", None)
if None in (base, override, diff_args):
self.result = "Nothing done; missing parameters"
return log("external diff thread not given files and diff args")
try:
diff_args = self._prepare_args_dict(diff_args)
self._launch(base, override, diff_args)
result_code = self.proc.wait()
self.result = "External diff tool has exited"
except Exception as err:
result_code = None
self.result = "External diff failure"
log("Error while diffing externally: %s", str(err), dialog=True)
if result_code:
log("External diff finished with return code %d", result_code)
delete_packed_override(base)
###----------------------------------------------------------------------------
class CommandContext(tuple):
"""
This is a custom named tuple that is used by the ContextHelper class to
describe the context that a command is being executed in.
"""
__slots__ = ()
package = property(itemgetter(0))
override = property(itemgetter(1))
is_diff = property(itemgetter(2))
source = property(itemgetter(3))
def __new__(_cls, package, override, is_diff, source):
return tuple.__new__(_cls, (package, override, is_diff, source))
def __repr__(self):
return self.__class__.__name__ + '(package=%r, override=%r, is_diff=%r, source=%r)' % self
def has_target(self):
return None not in (self.package, self.override)
def is_complete(self):
return None not in (self.package, self.override, self.is_diff)
def package_only(self):
return self.package is not None and self.override is None
def has_diff(self):
return self.is_diff is not None
###----------------------------------------------------------------------------
class ContextHelper():
"""
Helper class to allow context specific commands to seamlessly work in view
context menu, tab context menus and the command palette.
Finds the appropriate target view and package/override/diff options based
on where it is used.
"""
def _extract(self, scope, event):
if event is None:
return None
point = self.view.window_to_text((event["x"], event["y"]))
scope = "text.override-audit " + scope
if not self.view.match_selector(point, scope):
return None
return self.view.substr(self.view.extract_scope(point))
def _package_at_point(self, event):
return self._extract("entity.name.package", event)
def _override_at_point(self, event, expired=False):
scope="entity.name.filename.override"
if expired:
scope += ".expired"
return self._extract(scope, event)
def _package_for_override_at(self, event):
if event is not None:
point = self.view.window_to_text((event["x"], event["y"]))
packages = self.view.find_by_selector("entity.name.package")
if packages:
p_lines = [self.view.rowcol(p.begin())[0] for p in packages]
pkg_region = packages[bisect(p_lines, self.view.rowcol(point)[0]) - 1]
return self.view.substr(pkg_region)
return None
def _report_type(self, **kwargs):
target = self.view_target(self.view, **kwargs)
return target.settings().get("override_audit_report_type")
def _pkg_contains_expired(self, pkg_name, **kwargs):
target = self.view_target(self.view, **kwargs)
expired = target.settings().get("override_audit_expired_pkgs", [])
return pkg_name in expired
def view_target(self, view, group=-1, index=-1, **kwargs):
"""
Get target view specified by group and index, if needed.
"""
window = view.window()
return view if group == -1 else window.sheets_in_group(group)[index].view()
def view_context(self, view, expired, event=None, **kwargs):
"""
Return a CommandContext tuple for the provided view and possible event.
Some members of the tuple will be None if they do not apply or cannot
be determined by the current command state.
If view is none, view_target is invoked to determine it. Additionally,
expired indicates if the override found needs to be expired or not.
"""
if view is None:
view = self.view_target(self.view, **kwargs)
package = None
override = None
is_diff = None
source = None
# Prioritize explicit arguments when present
if any(key in kwargs for key in ("pkg_name", "package", "override")):
package = kwargs.get("pkg_name", kwargs.get("package", None))
override = kwargs.get("override", None)
is_diff = kwargs.get("is_diff", None)
source = "args"
# Favor settings if they exist (only for non-expired)
elif override_group.has(view) and expired == False:
package, override, is_diff = override_group.get(view)
source = "settings"
# Check for context clicks on a package or override name as a fallback
# Note: In ST4, commands in the tab context menu will get an event, but
# it will only have modifier key information
elif event is not None and "x" in event:
source = "context"
package = self._package_at_point(event)
if package is None:
override = self._override_at_point(event, expired)
if override is not None:
package = self._package_for_override_at(event)
return CommandContext(package, override, is_diff, source)
def always_visible(self, **kwargs):
return kwargs.get("always_visible", True)
def caption(self, caption, **kwargs):
target = self.view_target(self.view, **kwargs)
menu = target.settings().get("context_menu", "")
if "OverrideAudit" in menu:
return caption
return "OverrideAudit: %s" % caption
def override_exists(self, ctx):
if ctx.has_target():
relative_name = os.path.join(ctx.package, ctx.override)
full_name = os.path.join(sublime.packages_path(), relative_name)
return os.path.isfile(full_name)
return False
def override_unknown(self, view, ctx):
if ctx.has_target():
unknowns = view.settings().get("override_audit_unknown_overrides", {})
if ctx.package in unknowns:
if ctx.override in unknowns[ctx.package]:
return True
return False
def package_exists(self, ctx):
if ctx.package_only():
pkg_dir = os.path.join(sublime.packages_path(), ctx.package)
return os.path.isdir(pkg_dir)
return False
def package_overrides_possible(self, view, ctx):
if ctx.package_only():
pkgs = view.settings().get("override_audit_report_packages", {})
pkg_info = pkgs.get(ctx.package, {})
return pkg_info["is_shipped"] or pkg_info["is_installed"]
return False
def want_event(self):
return True
###----------------------------------------------------------------------------
|
|
#!/usr/bin/python -tt
'''
File: learning.py
Date: July 2, 2014
Description: this script takes as a list of word pairs, as well as the
vector representations of those words, and sets up a regression problem
to learn the parameters in our composition function.
Usage: python learning.py wordVectorsIn ParametersOut < training_data
Update (August 22, 2014): modified the script to take into account modified
handling of PPDB training extraction.
'''
import sys, commands, string, getopt, cPickle, math
import numpy as np
import multiprocessing as mp
import sklearn.linear_model as regressor
from extract_training import *
'''
read in word representations from text file
'''
def readVecFile(filename, normalize):
fh = open(filename, 'r')
repDict = {}
for line in fh:
word = line.strip().split()[0]
rep = np.array([float(i) for i in line.strip().split()[1:]])
repDict[word] = np.divide(rep, np.linalg.norm(rep)) if normalize else rep
return repDict
'''
from PPDB, add the training example if it meets certain criteria
'''
def addTrainingExample(wordVecs, filterDup, training_data, training_stats, key, output, input_left, input_right):
training_pos = training_data[key] if key in training_data else []
num_train = training_stats[key] if key in training_stats else [0,0]
num_train[0] += 1
if output in wordVecs and input_left in wordVecs and input_right in wordVecs:
if filterDup:
if input_left != output and input_right != output:
training_pos.append((wordVecs[output], wordVecs[input_left], wordVecs[input_right]))
num_train[1] += 1
else:
sys.stderr.write("Filtered example %s ||| %s because of redundancy\n"%(output, ' '.join([input_left, input_right])))
else:
training_pos.append((wordVecs[output], wordVecs[input_left], wordVecs[input_right]))
num_train[1] += 1
training_data[key] = training_pos
else:
sys.stderr.write("Could not find one of the following words in the vocabulary: %s, %s, or %s\n"%(output, input_left, input_right))
'''
function that goes through PPDB and assembles the training data by calling addTrainingExample and doing some pre and post-processing
'''
def createTraining(dbloc, wordVecs, filterDup):
print "Extracting training examples directly from PPDB"
extractor = TrainingExtractor(dbloc, "all")
extractor.extract_examples()
training_tuples = extractor.return_examples()
training_data = {}
training_stats = {}
for key, one_phrase, many_phrase in training_tuples:
input_left = many_phrase.split()[0]
input_right = many_phrase.split()[1]
addTrainingExample(wordVecs, filterDup, training_data, training_stats, key, one_phrase, input_left, input_right)
for key in training_stats:
print "POS Pair %s: out of %d training examples, valid input-output triples exist for %d examples"%(key, training_stats[key][0], training_stats[key][1])
return training_data
def readTrainingFromFile(trainFile, wordVecs, filterDup):
print "Reading training examples from file"
training_data = {}
training_stats = {}
train_fh = open(trainFile, 'rb')
for line in train_fh:
elements = line.strip().split(' ||| ')
key = elements[0]
for triple in elements[1:]:
assert len(triple.split()) == 3
one_phrase, input_left, input_right = triple.split()
addTrainingExample(wordVecs, filterDup, training_data, training_stats, key, one_phrase, input_left, input_right)
print "Completed reading in examples for %s"%key
train_fh.close()
return training_data
'''
distributed implementation of multivariate regression by doing the elements/coordinates independently.
There is something to be lost by this in that R^2 is always less than the multivariate solution,
but this difference is usually minimal and we can take advantage of multiple cores. Only works
for particular types of regressions: lasso, ridge, lars, and elastic
'''
def regressorParallel(data, labels, start, end, regStr, out_q):
reg = None
if regStr == "lasso":
reg = regressor.LassoCV()
elif regStr == "ridge":
reg = regressor.RidgeCV()
elif regStr == "lars":
reg = regressor.LassoLarsCV(n_jobs=1)
elif regStr == "elastic":
reg = regressor.ElasticNetCV()
coefficients = []
for idx in range(start, end):
reg.fit(data, labels[:,idx]) #what would happen if we passed in labels, not labels[:,idx]?
print "Dimension %d Alpha selected: %.3g"%(idx, reg.alpha_) #use this for CV experiments
R2 = reg.score(data, labels[:,idx])
print "Dimension %d R^2 on data: %.3f"%(idx, R2)
print "Dimension %d Number of non-zero values in coefficients: %d"%(idx, (reg.coef_ != 0).sum())
coefficients.append((idx, R2, reg.coef_, reg.intercept_))
out_q.put(coefficients)
'''
standard multivariate regression where it is done together
'''
def regressorMultivariate(data, labels, regStr):
reg = None
if regStr == "lasso":
reg = regressor.LassoCV()
elif regStr == "ridge":
reg = regressor.RidgeCV()
elif regStr == "lars":
reg = regressor.LassoLarsCV(n_jobs=1)
elif regStr == "elastic":
reg = regressor.ElasticNetCV()
reg.fit(data, labels)
print "Multivariate Alpha selected: %.3g"%reg.alpha_
R2 = reg.score(data, labels)
print "Multivariate R^2 on data: %.3f"%R2
print "Number of non-zero values in coefficients: %d"%((reg.coef_ != 0).sum())
return (reg.coef_, reg.intercept_)
'''
experimental regression function where the prior is linguistically motivated, so
we impose some kind of structural sparsity to the parameter matrix.
Only works for concatenative models
'''
def regressorLinguisticPrior(X, y, pos_pair, alpha, dim):
numExamples = X.shape[0]
print pos_pair #make change: only do this for certain combinations
X = np.concatenate((np.ones((numExamples,1)), X), axis=1)
left_mat = np.zeros((dim, dim+1)) #+1 because of intercept
right_mat = np.identity(dim)*alpha
W_star = np.concatenate((left_mat, right_mat), axis=1) #vx(p+1) matrix
#set b depending on what the POS pair is, otherwise can just always set it to the 'else' value
b = np.dot(X.T, y) if pos_pair == "X X" or pos_pair == "NN NN" else np.dot(X.T, y) + W_star.T #pxv matrix
A = np.dot(X.T, X) + alpha*np.identity(W_star.shape[1]) #(p+1)x(p+1) matrix
W = np.linalg.solve(A, b) #result should be (p+1)xv matrix
intercept = W[0,:]
return (W[1:,:].T, intercept)
'''
wrapper function for learning parameters
'''
def learnParameters(training_data, pos_pair, numProc, diagonal, concat, reg, multivariate, alpha):
numSamples = len(training_data)
dim = len(training_data[0][0])
P = dim if diagonal else dim * dim
if concat:
P = 2*dim
print "Number of training examples: %d; Number of regression problems: %d; Number of covariates: %d"%(numSamples, dim, P)
y = np.zeros((numSamples, dim))
X = np.zeros((numSamples, P))
for idx, triple in enumerate(training_data): #assemble the data in y and X
y[idx,:] = triple[0].transpose()
if concat:
X[idx,:] = np.concatenate((triple[1], triple[2]), axis=1)
elif diagonal:
X[idx,:] = np.diagonal(np.outer(triple[1], triple[2]))
else:
X[idx,:] = np.hstack(np.outer(triple[1], triple[2]))
print "Completed assembling training data into regression format. Now starting regression."
parameter = np.zeros((dim, dim, dim)) if not concat else np.zeros((dim, 2*dim))
intercept = np.zeros((dim))
if reg == "multitask" or alpha >= 0 or multivariate:
if alpha >= 0:
coeff, intercept = regressorLinguisticPrior(X, y, pos_pair, alpha, dim)
elif reg == "multitask":
lasso = regressor.MultiTaskLasso(alpha=5e-5) #call multitask lasso directly here
print "Fixing alpha to 5e-5"
lasso.fit(X, y)
print "Multitask R^2: %.3f"%(lasso.score(X, y))
coeff = lasso.coef_
intercept = lasso.intercept_
else: #can only be multivariate
coeff, intercept = regressorMultivariate(X, y, reg)
for idx in range(0, dim): #re-assemble parameters in the right structure
if concat:
parameter[idx,:] = coeff[idx,:]
else:
parameter[idx,:,:] = coeff[idx,:].reshape((dim, dim)) if not diagonal else np.diag(coeff[idx,:])
else: #for parallel/distributed estimation
out_q = mp.Queue()
procs = []
chunksize = int(math.floor(dim / float(numProc)))
for proc in range(numProc):
end = dim if proc == numProc - 1 else (proc+1)*chunksize
p = mp.Process(target=regressorParallel, args=(X, y, chunksize*proc, end, reg, out_q))
procs.append(p)
p.start()
coefficients = []
for proc in range(numProc):
coefficients += out_q.get()
for p in procs:
p.join()
avgR2 = 0
for coeff_idx_tuple in coefficients:
idx, R2, coeff, inter = coeff_idx_tuple
avgR2 += R2
if concat:
parameter[idx, :] = coeff
else:
parameter[idx, :, :] = coeff.reshape((dim, dim)) if not diagonal else np.diag(coeff)
intercept[idx] = inter
print "Parameter estimation complete and tensor has been formed"
print "Average R2 across the %d regression problems: %.3f"%(dim, avgR2/dim)
return parameter, intercept
def main():
(opts, args) = getopt.getopt(sys.argv[1:], 'cdfj:mnpP:r:')
normalize = False
diagonal = False
filterDup = False
concat = False
ppdb = False
reg = "lasso"
jobs = 4
alpha = -1
multivariate = False
for opt in opts:
if opt[0] == '-n':
normalize = True
elif opt[0] == '-j':
jobs = int(opt[1])
elif opt[0] == '-d':
diagonal = True
elif opt[0] == '-f':
filterDup = True
elif opt[0] == '-r':
reg = opt[1]
if not (reg == "lasso" or reg == "ridge" or reg == "lars" or reg == "elastic" or reg == "multitask"):
sys.stderr.write("Error: regressor option not recognized; defaulting to 'lasso'\n")
reg = "lasso"
elif opt[0] == '-c': #concat model instead of outer product-based model
concat = True
elif opt[0] == '-p': #extract examples straight from PPDB
ppdb = True
elif opt[0] == '-P': #prior
alpha = int(opt[1])
elif opt[0] == '-m': #multivariate version of whatever algorithm chosen in -r
multivariate = True
if reg == "multitask":
sys.stderr.write("Note: can only do multivariate regression on lasso, ridge, lars, or elastic")
sys.exit()
if diagonal and concat:
sys.stderr.write("Error: cannot have diagonal parametrization and concatenative model together; setting diagonalization to false\n")
diagonal = False
if alpha >= 0 and (reg != "ridge" or not concat):
sys.stderr.write("Error: linguistic regularization only works for L-2 prior (ridge regression) and concatenative models; setting regularizer to ridge and turning concatenation on\n")
concat = True
reg = "ridge"
wordVecs = readVecFile(args[0], normalize)
training_data = createTraining(args[1], wordVecs, filterDup) if ppdb else readTrainingFromFile(args[1], wordVecs, filterDup)
print "Regressor chosen: %s"%reg
parameters = {}
for pos_pair in training_data:
parameters[pos_pair] = learnParameters(training_data[pos_pair], pos_pair, jobs, diagonal, concat, reg, multivariate, alpha)
print "Completed parameter learning for POS pair %s"%pos_pair
cPickle.dump(parameters, open(args[2], "wb"))
if __name__ == "__main__":
main()
|
|
#
# dp.py -- Data pipeline and reduction routines
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
from collections import OrderedDict
from ginga import AstroImage, colors
from ginga.RGBImage import RGBImage
from ginga.util import wcs
# counter used to name anonymous images
prefixes = dict(dp=0)
def get_image_name(image, pfx='dp'):
global prefixes
name = image.get('name', None)
if name is None:
if not pfx in prefixes:
prefixes[pfx] = 0
name = '{0}{1:d}'.format(pfx, prefixes[pfx])
prefixes[pfx] += 1
image.set(name=name)
return name
def make_image(data_np, oldimage, header, pfx='dp'):
# Prepare a new image with the numpy array as data
image = AstroImage.AstroImage()
image.set_data(data_np)
# Set the header to be the old image header updated
# with items from the new header
oldhdr = oldimage.get_header()
oldhdr.update(header)
image.update_keywords(oldhdr)
# give the image a name
get_image_name(image, pfx=pfx)
return image
def create_blank_image(ra_deg, dec_deg, fov_deg, px_scale, rot_deg,
cdbase=[1, 1], dtype=None, logger=None, pfx='dp'):
# ra and dec in traditional format
ra_txt = wcs.raDegToString(ra_deg, format='%02d:%02d:%06.3f')
dec_txt = wcs.decDegToString(dec_deg, format='%s%02d:%02d:%05.2f')
# Create an empty image
imagesize = int(round(fov_deg / px_scale))
# round to an even size
if imagesize % 2 != 0:
imagesize += 1
## # round to an odd size
## if imagesize % 2 == 0:
## imagesize += 1
width = height = imagesize
if dtype is None:
dtype = numpy.float32
data = numpy.zeros((height, width), dtype=dtype)
crpix = float(imagesize // 2)
header = OrderedDict((('SIMPLE', True),
('BITPIX', -32),
('EXTEND', True),
('NAXIS', 2),
('NAXIS1', imagesize),
('NAXIS2', imagesize),
('RA', ra_txt),
('DEC', dec_txt),
('EQUINOX', 2000.0),
('OBJECT', 'MOSAIC'),
('LONPOLE', 180.0),
))
# Add basic WCS keywords
wcshdr = wcs.simple_wcs(crpix, crpix, ra_deg, dec_deg, px_scale,
rot_deg, cdbase=cdbase)
header.update(wcshdr)
# Create image container
image = AstroImage.AstroImage(data, logger=logger)
image.update_keywords(header)
# give the image a name
get_image_name(image, pfx=pfx)
return image
def recycle_image(image, ra_deg, dec_deg, fov_deg, px_scale, rot_deg,
cdbase=[1, 1], logger=None, pfx='dp'):
# ra and dec in traditional format
ra_txt = wcs.raDegToString(ra_deg, format='%02d:%02d:%06.3f')
dec_txt = wcs.decDegToString(dec_deg, format='%s%02d:%02d:%05.2f')
header = image.get_header()
pointing = OrderedDict((('RA', ra_txt),
('DEC', dec_txt),
))
header.update(pointing)
# Update WCS keywords and internal wcs objects
wd, ht = image.get_size()
crpix1 = wd // 2
crpix2 = ht // 2
wcshdr = wcs.simple_wcs(crpix1, crpix2, ra_deg, dec_deg, px_scale,
rot_deg, cdbase=cdbase)
header.update(wcshdr)
# this should update the wcs
image.update_keywords(header)
# zero out data array
data = image.get_data()
data.fill(0)
## # Create new image container sharing same data
## new_image = AstroImage.AstroImage(data, logger=logger)
## new_image.update_keywords(header)
## # give the image a name
## get_image_name(new_image, pfx=pfx)
new_image = image
return new_image
def make_flat(imglist, bias=None):
flats = [ image.get_data() for image in imglist ]
flatarr = numpy.array(flats)
# Take the median of the individual frames
flat = numpy.median(flatarr, axis=0)
# Normalize flat
# mean or median?
#norm = numpy.mean(flat.flat)
norm = numpy.median(flat.flat)
flat = flat / norm
# no zero divisors
flat[flat == 0.0] = 1.0
img_flat = make_image(flat, imglist[0], {}, pfx='flat')
return img_flat
def make_bias(imglist):
biases = [ image.get_data() for image in imglist ]
biasarr = numpy.array(biases)
# Take the median of the individual frames
bias = numpy.median(biasarr, axis=0)
img_bias = make_image(bias, imglist[0], {}, pfx='bias')
return img_bias
def add(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np + data2_np
image = make_image(result, image1, {}, pfx='add')
return image
def subtract(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np - data2_np
image = make_image(result, image1, {}, pfx='sub')
return image
def divide(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np / data2_np
image = make_image(result, image1, {}, pfx='div')
return image
# https://gist.github.com/stscieisenhamer/25bf6287c2c724cb9cc7
def masktorgb(mask, color='lightgreen', alpha=1.0):
"""Convert boolean mask to RGB image object for canvas overlay.
Parameters
----------
mask : ndarray
Boolean mask to overlay. 2D image only.
color : str
Color name accepted by Ginga.
alpha : float
Opacity. Unmasked data are always transparent.
Returns
-------
rgbobj : RGBImage
RGB image for canvas Image object.
Raises
------
ValueError
Invalid mask dimension.
"""
mask = numpy.asarray(mask)
if mask.ndim != 2:
raise ValueError('ndim={0} is not supported'.format(mask.ndim))
ht, wd = mask.shape
r, g, b = colors.lookup_color(color)
rgbobj = RGBImage(data_np = numpy.zeros((ht, wd, 4), dtype=numpy.uint8))
rc = rgbobj.get_slice('R')
gc = rgbobj.get_slice('G')
bc = rgbobj.get_slice('B')
ac = rgbobj.get_slice('A')
ac[:] = 0 # Transparent background
rc[mask] = int(r * 255)
gc[mask] = int(g * 255)
bc[mask] = int(b * 255)
ac[mask] = int(alpha * 255)
# For debugging
#rgbobj.save_as_file('ztmp_rgbobj.png')
return rgbobj
def split_n(lst, sz):
n = len(lst)
k, m = n // sz, n % sz
return [ lst[i * k + min(i, m):(i + 1) * k + min(i + 1, m)]
for i in range(sz) ]
# END
|
|
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from corehq.apps.reminders.models import REMINDER_TYPE_ONE_TIME
from corehq.apps.reports.commconnect import div, CommConnectReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
from corehq.apps.reports.graph_models import Axis, LineChart
from corehq.apps.sms.models import WORKFLOW_KEYWORD, WORKFLOW_REMINDER, WORKFLOW_BROADCAST
from corehq.elastic import es_query, ES_URLS, es_histogram
from dimagi.utils.couch.database import get_db
WORKFLOWS = [WORKFLOW_KEYWORD, WORKFLOW_REMINDER, WORKFLOW_BROADCAST]
NA = 'N/A'
class BaseSystemOverviewReport(CommConnectReport):
fields = [
'corehq.apps.reports.filters.select.MultiGroupFilter',
'corehq.apps.reports.filters.select.MultiCaseGroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
]
class SystemOverviewReport(BaseSystemOverviewReport):
slug = 'system_overview'
name = ugettext_noop("Overview")
description = ugettext_noop("Summary of the different types of messages sent and received by the system.")
section_name = ugettext_noop("Overview")
def workflow_query(self, workflow=None, additional_facets=None):
additional_facets = additional_facets or []
q = self.base_query
if workflow:
q["filter"] = {"and": [{"term": {"workflow": workflow.lower()}}]}
else:
q["filter"] = {"and": [{"not": {"in": {"workflow": [w.lower() for w in WORKFLOWS]}}}]}
facets = ['couch_recipient_doc_type', 'direction'] + additional_facets
return es_query(q=q, facets=facets, es_url=ES_URLS['sms'], size=0)
@property
def headers(self):
columns = [
DataTablesColumn("", sortable=False),
DataTablesColumn(_("Number"), help_text=_("Number of individual items")),
]
columns.append(DataTablesColumnGroup("",
DataTablesColumn(_("Mobile Worker Messages"),
help_text=_("SMS Messages to or from mobile workers' phones, incoming and outgoing")),
DataTablesColumn(_("Case Messages"),
help_text=_("SMS Messages to or from a phone number in a case, incoming and outgoing"))))
columns.append(DataTablesColumnGroup("",
DataTablesColumn(_("Incoming"), help_text=_("Total incoming SMS")),
DataTablesColumn(_("Outgoing"), help_text=_("Total outgoing SMS"))))
return DataTablesHeader(*columns)
@property
def rows(self):
def row(rowname, workflow=None):
additional_workflow_facets = {
WORKFLOW_KEYWORD: ['xforms_session_couch_id'],
WORKFLOW_REMINDER: ['reminder_id'],
}
additional_facets = additional_workflow_facets.get(workflow)
facets = self.workflow_query(workflow, additional_facets)['facets']
to_cases, to_users, outgoing, incoming = 0, 0, 0, 0
for term in facets['couch_recipient_doc_type']['terms']:
if term['term'] == 'commcarecase':
to_cases = term['count']
elif term['term'] == 'commcareuser':
to_users = term['count']
for term in facets['direction']['terms']:
if term['term'] == 'o':
outgoing = term['count']
elif term['term'] == 'i':
incoming = term['count']
number = NA
if workflow in additional_workflow_facets:
number = len(facets[additional_workflow_facets[workflow][0]]["terms"])
elif workflow == WORKFLOW_BROADCAST:
key = [self.domain, REMINDER_TYPE_ONE_TIME]
data = get_db().view('reminders/handlers_by_reminder_type',
reduce=True,
startkey=key + [self.datespan.startdate_param_utc],
endkey=key + [self.datespan.enddate_param_utc],
).one()
number = data["value"] if data else 0
return [rowname, number, to_users, to_cases, incoming, outgoing]
rows = [
row(_("Keywords"), WORKFLOW_KEYWORD),
row(_("Reminders"), WORKFLOW_REMINDER),
row(_("Broadcasts"), WORKFLOW_BROADCAST),
row(_("Other")),
]
def total(index):
return sum([l[index] for l in rows if l[index] != NA])
self.total_row = [_("Total"), total(1), total(2), total(3), total(4), total(5)]
return rows
def es_histogram(self, workflow):
q = {"query": {"bool": {"must": [{"term": {"workflow": workflow.lower()}}]}}}
return es_histogram(histo_type="sms", domains=[self.domain], q=self.add_recipients_to_query(q),
startdate=self.datespan.startdate_display, enddate=self.datespan.enddate_display)
@property
def charts(self):
chart = LineChart(_("Messages over time"), None, Axis(_('# of Messages'), ',.1d'))
chart.data = {
_("Keywords"): self.es_histogram(WORKFLOW_KEYWORD),
_("Reminders"): self.es_histogram(WORKFLOW_REMINDER),
_("Broadcasts"): self.es_histogram(WORKFLOW_BROADCAST),
}
chart.data_needs_formatting = True
chart.x_axis_uses_dates = True
return [chart]
class SystemUsersReport(BaseSystemOverviewReport):
slug = 'user_summary'
name = ugettext_noop("User Summary")
description = ugettext_noop("Summary of recipient information including number of active recipients and message usage by type of recipient (case vs. mobile worker)")
section_name = ugettext_noop("User Summary")
def active_query(self, recipient_type):
q = self.base_query
q["query"]["bool"]["must"].append({"term": {"direction": "i"}})
q["query"]["bool"]["must"].append({"term": {"couch_recipient_doc_type": recipient_type}})
return es_query(q=q, facets=['couch_recipient'], es_url=ES_URLS['sms'], size=0)
def messages_query(self):
q = self.base_query
facets = ['couch_recipient_doc_type']
return es_query(q=q, facets=facets, es_url=ES_URLS['sms'], size=0)
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Users", sortable=False),
DataTablesColumn(_("Mobile Workers"), help_text=_("SMS Messaging Statistics for Mobile Workers")),
DataTablesColumn(_("Cases"), help_text=_("SMS Messaging Statistics for Cases")),
DataTablesColumn(_("Total"), help_text=_("SMS Messaging Statistics for Mobile Workers and Cases")),
)
@property
def rows(self):
def row(header, mw_val, case_val):
return [_(header), mw_val, case_val, mw_val + case_val]
def verified_numbered_users(owner_type, ids=None, check_filters=False):
if not ids and not check_filters:
data = get_db().view('sms/verified_number_by_domain',
reduce=True,
startkey=[self.domain, owner_type],
endkey=[self.domain, owner_type, {}],
).one()
return data["value"] if data else 0
else:
owners = get_db().view('sms/verified_number_by_domain',
reduce=False,
startkey=[self.domain, owner_type],
endkey=[self.domain, owner_type, {}],
).all()
return len(filter(lambda oid: oid in ids, [o["key"][2] for o in owners]))
owner_ids = self.combined_user_ids if self.users_by_group else []
case_ids = self.cases_by_case_group if self.cases_by_case_group else []
check_filters = True if owner_ids or case_ids else False
number = row("Number", verified_numbered_users("CommCareUser", owner_ids, check_filters=check_filters),
verified_numbered_users("CommCareCase", case_ids, check_filters=check_filters))
def get_actives(recipient_type):
return len(self.active_query(recipient_type)['facets']['couch_recipient']['terms'])
active = row(_("Number Active"), get_actives("commcareuser"), get_actives("commcarecase"))
perc_active = [_("% Active"),
div(active[1], number[1], True), div(active[2], number[2], True), div(active[3], number[3], True)]
facets = self.messages_query()['facets']
to_users, to_cases = 0, 0
for term in facets['couch_recipient_doc_type']['terms']:
if term['term'] == 'commcarecase':
to_cases = term['count']
elif term['term'] == 'commcareuser':
to_users = term['count']
messages = row(_("Number of SMS Messages, incoming and outgoing"), to_users, to_cases)
avg_per_user = [_("Avg SMS Messages per User"),
div(messages[1], number[1]), div(messages[2], number[2]), div(messages[3], number[3])]
avg_per_act_user = [_("Avg SMS Messages per Active User"),
div(messages[1], active[1]), div(messages[2], active[2]), div(messages[3], active[3])]
return [number, active, perc_active, messages, avg_per_user, avg_per_act_user]
|
|
"""Support for a ScreenLogic heating device."""
import logging
from screenlogicpy.const import DATA as SL_DATA, EQUIPMENT, HEAT_MODE
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from . import ScreenlogicEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SUPPORTED_FEATURES = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORTED_MODES = [HVAC_MODE_OFF, HVAC_MODE_HEAT]
SUPPORTED_PRESETS = [
HEAT_MODE.SOLAR,
HEAT_MODE.SOLAR_PREFERRED,
HEAT_MODE.HEATER,
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entry."""
entities = []
coordinator = hass.data[DOMAIN][config_entry.entry_id]
for body in coordinator.data[SL_DATA.KEY_BODIES]:
entities.append(ScreenLogicClimate(coordinator, body))
async_add_entities(entities)
class ScreenLogicClimate(ScreenlogicEntity, ClimateEntity, RestoreEntity):
"""Represents a ScreenLogic climate entity."""
def __init__(self, coordinator, body):
"""Initialize a ScreenLogic climate entity."""
super().__init__(coordinator, body)
self._configured_heat_modes = []
# Is solar listed as available equipment?
if self.coordinator.data["config"]["equipment_flags"] & EQUIPMENT.FLAG_SOLAR:
self._configured_heat_modes.extend(
[HEAT_MODE.SOLAR, HEAT_MODE.SOLAR_PREFERRED]
)
self._configured_heat_modes.append(HEAT_MODE.HEATER)
self._last_preset = None
@property
def name(self) -> str:
"""Name of the heater."""
ent_name = self.body["heat_status"]["name"]
return f"{self.gateway_name} {ent_name}"
@property
def min_temp(self) -> float:
"""Minimum allowed temperature."""
return self.body["min_set_point"]["value"]
@property
def max_temp(self) -> float:
"""Maximum allowed temperature."""
return self.body["max_set_point"]["value"]
@property
def current_temperature(self) -> float:
"""Return water temperature."""
return self.body["last_temperature"]["value"]
@property
def target_temperature(self) -> float:
"""Target temperature."""
return self.body["heat_set_point"]["value"]
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
if self.config_data["is_celsius"]["value"] == 1:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def hvac_mode(self) -> str:
"""Return the current hvac mode."""
if self.body["heat_mode"]["value"] > 0:
return HVAC_MODE_HEAT
return HVAC_MODE_OFF
@property
def hvac_modes(self):
"""Return th supported hvac modes."""
return SUPPORTED_MODES
@property
def hvac_action(self) -> str:
"""Return the current action of the heater."""
if self.body["heat_status"]["value"] > 0:
return CURRENT_HVAC_HEAT
if self.hvac_mode == HVAC_MODE_HEAT:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@property
def preset_mode(self) -> str:
"""Return current/last preset mode."""
if self.hvac_mode == HVAC_MODE_OFF:
return HEAT_MODE.NAME_FOR_NUM[self._last_preset]
return HEAT_MODE.NAME_FOR_NUM[self.body["heat_mode"]["value"]]
@property
def preset_modes(self):
"""All available presets."""
return [
HEAT_MODE.NAME_FOR_NUM[mode_num] for mode_num in self._configured_heat_modes
]
@property
def supported_features(self):
"""Supported features of the heater."""
return SUPPORTED_FEATURES
async def async_set_temperature(self, **kwargs) -> None:
"""Change the setpoint of the heater."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
raise ValueError(f"Expected attribute {ATTR_TEMPERATURE}")
if await self.gateway.async_set_heat_temp(
int(self._data_key), int(temperature)
):
await self._async_refresh()
else:
raise HomeAssistantError(
f"Failed to set_temperature {temperature} on body {self.body['body_type']['value']}"
)
async def async_set_hvac_mode(self, hvac_mode) -> None:
"""Set the operation mode."""
if hvac_mode == HVAC_MODE_OFF:
mode = HEAT_MODE.OFF
else:
mode = HEAT_MODE.NUM_FOR_NAME[self.preset_mode]
if await self.gateway.async_set_heat_mode(int(self._data_key), int(mode)):
await self._async_refresh()
else:
raise HomeAssistantError(
f"Failed to set_hvac_mode {mode} on body {self.body['body_type']['value']}"
)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode."""
_LOGGER.debug("Setting last_preset to %s", HEAT_MODE.NUM_FOR_NAME[preset_mode])
self._last_preset = mode = HEAT_MODE.NUM_FOR_NAME[preset_mode]
if self.hvac_mode == HVAC_MODE_OFF:
return
if await self.gateway.async_set_heat_mode(int(self._data_key), int(mode)):
await self._async_refresh()
else:
raise HomeAssistantError(
f"Failed to set_preset_mode {mode} on body {self.body['body_type']['value']}"
)
async def async_added_to_hass(self):
"""Run when entity is about to be added."""
await super().async_added_to_hass()
_LOGGER.debug("Startup last preset is %s", self._last_preset)
if self._last_preset is not None:
return
prev_state = await self.async_get_last_state()
if (
prev_state is not None
and prev_state.attributes.get(ATTR_PRESET_MODE) is not None
):
_LOGGER.debug(
"Startup setting last_preset to %s from prev_state",
HEAT_MODE.NUM_FOR_NAME[prev_state.attributes.get(ATTR_PRESET_MODE)],
)
self._last_preset = HEAT_MODE.NUM_FOR_NAME[
prev_state.attributes.get(ATTR_PRESET_MODE)
]
else:
_LOGGER.debug(
"Startup setting last_preset to default (%s)",
self._configured_heat_modes[0],
)
self._last_preset = self._configured_heat_modes[0]
@property
def body(self):
"""Shortcut to access body data."""
return self.coordinator.data[SL_DATA.KEY_BODIES][self._data_key]
|
|
"""Tests Holocron CLI."""
import logging
import pathlib
import subprocess
import sys
import textwrap
import mock
import pytest
import yaml
@pytest.fixture(autouse=True)
def _fake_root_logger(monkeypatch):
"""Prevent modifying global root instance."""
monkeypatch.setattr(logging, "root", logging.getLogger("fakeroot"))
@pytest.fixture(scope="function")
def create_site(tmpdir):
def create(structure):
for path, content in structure:
tmpdir.ensure(path).write_binary(content)
return create
@pytest.fixture(scope="function")
def example_site(create_site):
holocron_yml = yaml.safe_dump(
{
"metadata": {"url": "https://yoda.ua"},
"pipes": {"test": [{"name": "source"}, {"name": "save"}]},
},
encoding="UTF-8",
default_flow_style=False,
)
return create_site(
[
(pathlib.Path("cv.md"), b"yoda"),
(pathlib.Path("about", "photo.png"), b""),
(
pathlib.Path("2019", "02", "12", "skywalker", "index.html"),
b"luke",
),
(pathlib.Path(".holocron.yml"), holocron_yml),
]
)
@pytest.fixture(scope="function")
def execute(capsys):
def execute(args, as_subprocess=True):
if as_subprocess:
return subprocess.check_output(["holocron"] + args, stderr=subprocess.PIPE)
from holocron.__main__ import main
main(args)
return capsys.readouterr().out
return execute
def test_run_progress_info(monkeypatch, tmpdir, execute, example_site):
"""Built items are shown on standard output."""
monkeypatch.chdir(tmpdir)
assert set(execute(["run", "test"]).splitlines()) == {
b"==> .holocron.yml",
b"==> cv.md",
b"==> 2019/02/12/skywalker/index.html",
b"==> about/photo.png",
}
def test_run_progress_info_colored(monkeypatch, tmpdir, execute, example_site):
"""Built items are shown and colorized on standard output."""
# colorama strips away ANSI escape sequences if a standard output is not
# connected to a tty; since pytest mocks standard i/o streams, these mocked
# streams have to be patches to simulate tty connection.
monkeypatch.setattr(sys.stdout, "isatty", mock.Mock(return_value=True))
monkeypatch.chdir(tmpdir)
assert set(execute(["run", "test"], as_subprocess=False).splitlines()) == {
"\x1b[1m\x1b[32m==>\x1b[0m \x1b[1m.holocron.yml\x1b[0m",
"\x1b[1m\x1b[32m==>\x1b[0m \x1b[1mcv.md\x1b[0m",
"\x1b[1m\x1b[32m==>\x1b[0m \x1b[1m2019/02/12/skywalker/index.html\x1b[0m",
"\x1b[1m\x1b[32m==>\x1b[0m \x1b[1mabout/photo.png\x1b[0m",
}
def test_run_conf_yml_not_found(monkeypatch, tmpdir, execute, example_site):
"""Proceed with default settings."""
monkeypatch.chdir(tmpdir)
tmpdir.join(".holocron.yml").remove()
# Because Holocron has no built-in pipes, there's nothing we can run and
# thus exception is expected.
with pytest.raises(subprocess.CalledProcessError):
execute(["run", "test"])
def test_run_conf_yml_malformed(monkeypatch, tmpdir, execute, example_site):
"""Error message is printed."""
monkeypatch.chdir(tmpdir)
tmpdir.join(".holocron.yml").write_text(
textwrap.dedent(
"""\
metadata:
crap
key: value
"""
),
encoding="UTF-8",
)
with pytest.raises(subprocess.CalledProcessError) as excinfo:
execute(["run", "test"])
assert str(excinfo.value.stderr.decode("UTF-8").strip()) == (
"Cannot parse a configuration file. Context: mapping values are not "
"allowed here\n"
' in ".holocron.yml", line 3, column 6'
)
def test_run_conf_yml_directory(monkeypatch, tmpdir, execute, example_site):
"""Error message is printed."""
monkeypatch.chdir(tmpdir)
tmpdir.join(".holocron.yml").remove()
tmpdir.mkdir(".holocron.yml")
with pytest.raises(subprocess.CalledProcessError) as excinfo:
execute(["run", "test"])
assert (
str(excinfo.value.stderr.decode("UTF-8").strip())
== "[Errno 21] Is a directory: '.holocron.yml'"
)
def test_run_conf_yml_interpolate(monkeypatch, tmpdir, execute):
"""Values such as '%(here)s' are interpolated."""
monkeypatch.chdir(tmpdir)
tmpdir.join(".holocron.yml").write_binary(
yaml.safe_dump(
{
"metadata": {"url": "https://yoda.ua"},
"pipes": {
"test": [
{"name": "source"},
{
"name": "metadata",
"args": {
"metadata": {"content": "%(here)s/secret"},
},
},
{"name": "save"},
]
},
},
encoding="UTF-8",
default_flow_style=False,
)
)
tmpdir.join("test.txt").write_binary(b"")
execute(["run", "test"])
assert (
tmpdir.join("_site", "test.txt").read_text(encoding="UTF-8")
== tmpdir.join("secret").strpath
)
def test_run_conf_yml_interpolate_in_path(monkeypatch, tmpdir, execute, example_site):
"""Values such as '%(here)s' are interpolated."""
tmpdir.join(".holocron.yml").write_binary(
yaml.safe_dump(
{
"metadata": {"url": "https://yoda.ua"},
"pipes": {
"test": [
{"name": "source", "args": {"path": "%(here)s"}},
{"name": "save", "args": {"to": "%(here)s/_compiled"}},
]
},
},
encoding="UTF-8",
default_flow_style=False,
)
)
execute(["-c", tmpdir.join(".holocron.yml").strpath, "run", "test"])
assert tmpdir.join("_compiled", "cv.md").read_binary() == b"yoda"
|
|
from test import test_support
import unittest
import codecs
import locale
import sys, StringIO, _testcapi
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = ""
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = ""
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class ReadTest(unittest.TestCase):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue()
r = codecs.getreader(self.encoding)(q)
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(c)
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), u"")
self.assertEqual(r.bytebuffer, "")
self.assertEqual(r.charbuffer, u"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# Check whether the reset method works properly
d.reset()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
u"".join(codecs.iterdecode(encoded, self.encoding))
)
def test_readline(self):
def getreader(input):
stream = StringIO.StringIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = u"foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(u"\n \r\n \r \u2028".split()):
vw.append((i*200)*u"\3042" + lineend)
vwo.append((i*200)*u"\3042")
self.assertEqual(readalllines("".join(vw), True), "".join(vw))
self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
for lineend in u"\n \r\n \r \u2028".split():
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=True),
size*u"a" + lineend,
)
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = StringIO.StringIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue()
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=False), u"foo")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=False), u"")
self.assertEqual(reader.readline(keepends=False), u"bar")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=False), u"baz")
self.assertEqual(reader.readline(keepends=False), u"")
# Lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=True), u"foo\r")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=True), u"\n")
self.assertEqual(reader.readline(keepends=True), u"bar\r")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=True), u"baz")
self.assertEqual(reader.readline(keepends=True), u"")
writer.write(u"foo\r\n")
self.assertEqual(reader.readline(keepends=True), u"foo\r\n")
def test_bug1098990_a(self):
s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = u"next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), u"")
def test_bug1098990_b(self):
s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = u"stillokay:bbbbxx\r\n"
s4 = u"broken!!!!badbad\r\n"
s5 = u"againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), u"")
class UTF32Test(ReadTest):
encoding = "utf-32"
spamle = ('\xff\xfe\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = ('\x00\x00\xfe\xff'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO(4*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO(8*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"", # first byte of BOM read
u"", # second byte of BOM read
u"", # third byte of BOM read
u"", # fourth byte of BOM read => byteorder known
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_32_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_32_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = '\xff\xfe\x00\x00' + '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = '\x00\x00\xfe\xff' + '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest):
encoding = "utf-16"
spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO("\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO("\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"", # first byte of BOM read
u"", # second byte of BOM read => byteorder known
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_16_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_16_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode, "\xff", "strict", True)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = u'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(test_support.unlink, test_support.TESTFN)
with open(test_support.TESTFN, 'wb') as fp:
fp.write(s)
with codecs.open(test_support.TESTFN, 'U', encoding=self.encoding) as reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', u'\ufffd'),
(b'A\x00Z', u'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', u'ABCD\ufffd'),
(b'\x00\xd8', u'\ufffd'),
(b'\x00\xd8A', u'\ufffd'),
(b'\x00\xd8A\x00', u'\ufffdA'),
(b'\x00\xdcA\x00', u'\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', u'\ufffd'),
(b'\x00A\xff', u'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', u'ABCD\ufffd'),
(b'\xd8\x00', u'\ufffd'),
(b'\xd8\x00\xdc', u'\ufffd'),
(b'\xd8\x00\x00A', u'\ufffdA'),
(b'\xdc\x00\x00A', u'\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
u"\x00\xff\u07ff\u0800\uffff\U00010000",
[
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
class UTF7Test(ReadTest):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
u"a+-b",
[
u"a",
u"a",
u"a+",
u"a+-",
u"a+-b",
]
)
def test_errors(self):
tests = [
('a\xffb', u'a\ufffdb'),
('a+IK', u'a\ufffd'),
('a+IK-b', u'a\ufffdb'),
('a+IK,b', u'a\ufffdb'),
('a+IKx', u'a\u20ac\ufffd'),
('a+IKx-b', u'a\u20ac\ufffdb'),
('a+IKwgr', u'a\u20ac\ufffd'),
('a+IKwgr-b', u'a\u20ac\ufffdb'),
('a+IKwgr,', u'a\u20ac\ufffd'),
('a+IKwgr,-b', u'a\u20ac\ufffd-b'),
('a+IKwgrB', u'a\u20ac\u20ac\ufffd'),
('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'),
('a+/,+IKw-b', u'a\ufffd\u20acb'),
('a+//,+IKw-b', u'a\ufffd\u20acb'),
('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'),
('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual(u'\U000104A0'.encode(self.encoding), '+2AHcoA-')
self.assertEqual(u'\ud801\udca0'.encode(self.encoding), '+2AHcoA-')
self.assertEqual('+2AHcoA-'.decode(self.encoding), u'\U000104A0')
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, "\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("c", "spam")),
("spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class CharBufferTest(unittest.TestCase):
def test_string(self):
self.assertEqual(codecs.charbuffer_encode("spam"), ("spam", 4))
def test_empty(self):
self.assertEqual(codecs.charbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.charbuffer_encode)
self.assertRaises(TypeError, codecs.charbuffer_encode, 42)
class UTF8SigTest(ReadTest):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
u"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
u"",
u"",
u"", # First BOM has been read and skipped
u"",
u"",
u"\ufeff", # Second BOM has been read and emitted
u"\ufeff\x00", # "\x00" read and emitted
u"\ufeff\x00", # First byte of encoded u"\xff" read
u"\ufeff\x00\xff", # Second byte of encoded u"\xff" read
u"\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
unicode("\xef\xbb\xbf", "utf-8-sig")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = u"spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(""), ("", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = chr(b)
if b != '\\':
self.assertEqual(decode(b + '0'), (b + '0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", br"[\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\8]", br"[\8]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\X41]", br"[\X41]")
check(br"[\x410]", b"[A0]")
for b in range(256):
b = chr(b)
if b not in '\n"\'\\abtnvfr01234567x':
check('\\' + b, '\\' + b)
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write(u"a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
(u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
(u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
(u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
(u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
u"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
(u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
u"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
(u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
u"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
(u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
(u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
(u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
u"\u0438",
"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
(u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
u"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
(u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
u"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
u"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
(u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
(u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
(u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
u"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
(u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
(u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp"),
# (S) -> $1.00 <-
(u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
u"\u003C\u002D",
"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print repr(i)
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(uni.encode("punycode").lower(), puny.lower())
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
if sys.maxunicode > 0xffff:
ok = [
("\x00\x10\xff\xff", u"\U0010ffff"),
("\x00\x00\x01\x01", u"\U00000101"),
("", u""),
]
not_ok = [
"\x7f\xff\xff\xff",
"\x80\x00\x00\x00",
"\x81\x00\x00\x00",
"\x00",
"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
def test_decode_error_attributes(self):
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError, ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
def test_decode_callback(self):
if sys.maxunicode > 0xffff:
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
ab = u"ab".encode("unicode_internal")
ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"UnicodeInternalTest")
self.assertEqual((u"ab", 12), ignored)
def test_encode_length(self):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder(u"a")[1], 1)
self.assertEqual(encoder(u"\xe9\u0142")[1], 2)
encoder = codecs.getencoder("string-escape")
self.assertEqual(encoder(r'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
'\xb8\x8f\xef\xbb\xbf',
'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
('CAFE',
'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
('\xc3\x9f',
'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
('\xc4\xb0',
'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
('\xc5\x83\xcd\xba',
'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
('j\xcc\x8c\xc2\xa0\xc2\xaa',
'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
('\xe1\xbe\xb7',
'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
('\xc7\xb0',
'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
('\xce\x90',
'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
('\xce\xb0',
'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
('\xe1\xba\x96',
'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
('\xe1\xbd\x96',
'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(' ',
' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
('\xc2\xa0',
' '),
# 3.16 Non-ASCII multibyte space character U+1680.
('\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
('\xe2\x80\x80',
' '),
# 3.18 Zero Width Space U+200b.
('\xe2\x80\x8b',
''),
# 3.19 Non-ASCII multibyte space character U+3000.
('\xe3\x80\x80',
' '),
# 3.20 ASCII control characters U+0010 U+007F.
('\x10\x7f',
'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
('\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
('\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
('\xef\xbb\xbf',
''),
# 3.24 Non-ASCII control character U+1D175.
('\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
('\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
('\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
('\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
('\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
('\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
('\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
('\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
('\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
('\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
('\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
('foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
('foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
('foo\xef\xb9\xb6bar',
'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
('\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
('\xd8\xa71\xd8\xa8',
'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#('\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
'\xaa\xce\xb0\xe2\x80\x80',
'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
'\x80',
'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = unicode(orig, "utf-8")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = unicode(prepped, "utf-8")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(unicode("python.org", "idna"), u"python.org")
self.assertEqual(unicode("python.org.", "idna"), u"python.org.")
self.assertEqual(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
self.assertEqual(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual(u"python.org".encode("idna"), "python.org")
self.assertEqual("python.org.".encode("idna"), "python.org.")
self.assertEqual(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
self.assertEqual(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
def test_stream(self):
import StringIO
r = codecs.getreader("idna")(StringIO.StringIO("abc"))
r.read(3)
self.assertEqual(r.read(), u"")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode("python.org", "idna")),
u"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode("python.org.", "idna")),
u"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode(u"rg"), u"")
self.assertEqual(decoder.decode(u"", True), u"org")
decoder.reset()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode("rg."), u"org.")
self.assertEqual(decoder.decode("", True), u"")
def test_incremental_encode(self):
self.assertEqual(
"".join(codecs.iterencode(u"python.org", "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterencode(u"python.org.", "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org"), "xn--xample-9ta.")
self.assertEqual(encoder.encode(u"", True), "org")
encoder.reset()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
self.assertEqual(encoder.encode(u"", True), "")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode('\xe4\xf6\xfc', 'latin-1'),
u'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode('abc'), u'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode(u'abc'), 'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as a dotless "i"
oldlocale = locale.getlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), [u'\ud55c\n', u'\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), '\\\xd5\n\x00\x00\xae')
f = StringIO.StringIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin1')
ef.write('\xc3\xbc')
self.assertEqual(f.getvalue(), '\xfc')
class Str2StrTest(unittest.TestCase):
def test_read(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.read()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
def test_readline(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.readline()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
all_unicode_encodings = [
"ascii",
"base64_codec",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hex_codec",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"rot_13",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encodings work only with str, not unicode
all_string_encodings = [
"quopri_codec",
"string_escape",
"uu_codec",
]
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"base64_codec",
"hex_codec",
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams[:]
# The following encodings only support "strict" mode
only_strict_mode = [
"idna",
"zlib_codec",
"bz2_codec",
]
try:
import bz2
except ImportError:
pass
else:
all_unicode_encodings.append("bz2_codec")
broken_unicode_with_streams.append("bz2_codec")
try:
import zlib
except ImportError:
pass
else:
all_unicode_encodings.append("zlib_codec")
broken_unicode_with_streams.append("zlib_codec")
class BasicUnicodeTest(unittest.TestCase):
def test_basics(self):
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue()
writer = codecs.getwriter(encoding)(q)
encodedresult = ""
for c in s:
writer.write(c)
encodedresult += q.read()
q = Queue()
reader = codecs.getreader(encoding)(q)
decodedresult = u""
for c in encodedresult:
q.write(c)
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder (fetched via the Python
# and C API) and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = ""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode(u"", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = u""
for c in encodedresult:
decodedresult += decoder.decode(c)
decodedresult += decoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check C API
encodedresult = ""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode(u"", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = u""
for c in encodedresult:
decodedresult += cdecoder.decode(c)
decodedresult += cdecoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check iterencode()/iterdecode()
result = u"".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
# check iterencode()/iterdecode() with empty string
result = u"".join(codecs.iterdecode(codecs.iterencode(u"", encoding), encoding))
self.assertEqual(result, u"")
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = "".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = u"".join(decoder.decode(c) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
encodedresult = "".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = u"".join(cdecoder.decode(c) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
def test_seek(self):
# all codecs should be able to encode these
s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding)))
for t in xrange(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
line = reader.readline()
self.assertEqual(s[:len(line)], line)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
class BasicStrTest(unittest.TestCase):
def test_basics(self):
s = "abc123"
for encoding in all_string_encodings:
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"),
(u"abc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", u"ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict", u"ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", u""),
(u"", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u'c'}),
(u"abc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'Aa', 1: u'Bb', 2: u'Cc'}),
(u"AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'\U0010FFFF', 1: u'b', 2: u'c'}),
(u"\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u''}),
(u"ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u'\ufffe'}
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b'}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b', 2: None}),
(u"ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b', 2: u'\ufffe'}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b'}),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b', 2: None}),
(u"ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b', 2: u'\ufffe'}),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
(u"", len(allbytes))
)
def test_decode_with_int2int_map(self):
a = ord(u'a')
b = ord(u'b')
c = ord(u'c')
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
(u"abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
(u"\U0010FFFFbc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: 0x110000, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: a, 1: b}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: a, 1: b}),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
(u"ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = StringIO.StringIO("\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), "\xfc")
def test_streamreaderwriter(self):
f = StringIO.StringIO("\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), u"\xfc")
class UnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(u""), ("", 0))
self.assertEqual(codecs.unicode_escape_decode(""), (u"", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != ord('\\'):
self.assertEqual(encode(unichr(b)), (chr(b), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != ord('\\'):
self.assertEqual(decode(chr(b) + '0'), (unichr(b) + u'0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check(u'\t', r'\t')
check(u'\n', r'\n')
check(u'\r', r'\r')
check(u'\\', r'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(unichr(b), '\\x%02x' % b)
for b in range(127, 256):
check(unichr(b), '\\x%02x' % b)
check(u'\u20ac', r'\u20ac')
check(u'\U0001d120', r'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check("[\\\n]", u"[]")
check(r'[\"]', u'["]')
check(r"[\']", u"[']")
check(r"[\\]", ur"[\]")
check(r"[\a]", u"[\x07]")
check(r"[\b]", u"[\x08]")
check(r"[\t]", u"[\x09]")
check(r"[\n]", u"[\x0a]")
check(r"[\v]", u"[\x0b]")
check(r"[\f]", u"[\x0c]")
check(r"[\r]", u"[\x0d]")
check(r"[\7]", u"[\x07]")
check(r"[\8]", ur"[\8]")
check(r"[\78]", u"[\x078]")
check(r"[\41]", u"[!]")
check(r"[\418]", u"[!8]")
check(r"[\101]", u"[A]")
check(r"[\1010]", u"[A0]")
check(r"[\x41]", u"[A]")
check(r"[\x410]", u"[A0]")
check(r"\u20ac", u"\u20ac")
check(r"\U0001d120", u"\U0001d120")
for b in range(256):
if chr(b) not in '\n"\'\\abtnvfr01234567xuUN':
check('\\' + chr(b), u'\\' + unichr(b))
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in ('x', 2), ('u', 4), ('U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
"\\" + c + "0"*i)
self.assertRaises(UnicodeDecodeError, decode,
"[\\" + c + "0"*i + "]")
data = "[\\" + c + "0"*i + "]\\" + c + "0"*i
self.assertEqual(decode(data, "ignore"), (u"[]", len(data)))
self.assertEqual(decode(data, "replace"),
(u"[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, r"\U00110000")
self.assertEqual(decode(r"\U00110000", "ignore"), (u"", 10))
self.assertEqual(decode(r"\U00110000", "replace"), (u"\ufffd", 10))
class RawUnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(u""), ("", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(""), (u"", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(unichr(b)), (chr(b), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(chr(b) + '0'), (unichr(b) + u'0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if chr(b) not in 'uU':
check(u'\\' + unichr(b), '\\' + chr(b))
check(u'\u20ac', r'\u20ac')
check(u'\U0001d120', r'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if chr(b) not in 'uU':
check('\\' + chr(b), u'\\' + unichr(b))
check(r"\u20ac", u"\u20ac")
check(r"\U0001d120", u"\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in ('u', 4), ('U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
"\\" + c + "0"*i)
self.assertRaises(UnicodeDecodeError, decode,
"[\\" + c + "0"*i + "]")
data = "[\\" + c + "0"*i + "]\\" + c + "0"*i
self.assertEqual(decode(data, "ignore"), (u"[]", len(data)))
self.assertEqual(decode(data, "replace"),
(u"[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, r"\U00110000")
self.assertEqual(decode(r"\U00110000", "ignore"), (u"", 10))
self.assertEqual(decode(r"\U00110000", "replace"), (u"\ufffd", 10))
class BomTest(unittest.TestCase):
def test_seek0(self):
data = u"1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(test_support.unlink, test_support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
def test_main():
test_support.run_unittest(
UTF32Test,
UTF32LETest,
UTF32BETest,
UTF16Test,
UTF16LETest,
UTF16BETest,
UTF8Test,
UTF8SigTest,
UTF7Test,
UTF16ExTest,
ReadBufferTest,
CharBufferTest,
EscapeDecodeTest,
RecodingTest,
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
EncodedFileTest,
Str2StrTest,
BasicUnicodeTest,
BasicStrTest,
CharmapTest,
WithStmtTest,
UnicodeEscapeTest,
RawUnicodeEscapeTest,
BomTest,
)
if __name__ == "__main__":
test_main()
|
|
# Copyright (c) 2014 Tom Carroll
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Support for the Openstack provisioner.
This module's main contents are a class that captures the Openstack ids
of Openstack resources that have been provisioned for a model, and a class that
can retrieves and caches identifiers for Openstack resources that may be needed
when provisioning infra for a model.
'''
from actuator.provisioners.core import BaseProvisioningRecord
class OpenstackProvisioningRecord(BaseProvisioningRecord):
"""
Primitive record of provisioned Openstack resources. Currently only capture
the ids of the resources.
"""
def __init__(self, id):
super(OpenstackProvisioningRecord, self).__init__(id)
self.network_ids = dict()
self.subnet_ids = dict()
self.floating_ip_ids = dict()
self.router_ids = dict()
self.router_iface_ids = dict()
self.secgroup_ids = dict()
self.secgroup_rule_ids = dict()
self.server_ids = dict()
self.port_ids = dict()
def __getstate__(self):
d = super(OpenstackProvisioningRecord, self).__getstate__()
d.update( {"network_ids":self.network_ids,
"subnet_ids":self.subnet_ids,
"floating_ip_ids":self.floating_ip_ids,
"router_ids":self.router_ids,
"router_iface_ids":self.router_iface_ids,
"secgroup_ids":self.secgroup_ids,
"server_ids":self.server_ids,
"port_ids":self.port_ids} )
return d
def __setstate__(self, d):
super(OpenstackProvisioningRecord, self).__setstate__(d)
keys = d.keys()
for k in keys:
setattr(self, k, set(d[k]))
del d[k]
def add_port_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.port_ids[rid] = osid
def add_server_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.server_ids[rid] = osid
def add_secgroup_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.secgroup_ids[rid] = osid
def add_secgroup_rule_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.secgroup_rule_ids[rid] = osid
def add_router_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.router_ids[rid] = osid
def add_router_iface_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.router_iface_ids[rid] = osid
def add_floating_ip_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.floating_ip_ids[rid] = osid
def add_subnet_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.subnet_ids[rid] = osid
def add_network_id(self, rid, osid):
"""
Maps an Actuator resource id to the associated Openstack resource id
@param rid: Actuator resource id
@param osid: Openstack resource id
"""
self.network_ids[rid] = osid
class _OSMaps(object):
"""
Utility class that creates a cache of Openstack resources. The resources
are mapped by their "natural" key to their appropriate Openstack API
client object (nova, neutron, etc).
"""
def __init__(self, os_provisioner):
self.os_provisioner = os_provisioner
self.image_map = {}
self.flavor_map = {}
self.network_map = {}
self.secgroup_map = {}
self.secgroup_rule_map = {}
self.router_map = {}
self.subnet_map = {}
def refresh_all(self):
"""
Refresh all maps
"""
self.refresh_flavors()
self.refresh_images()
self.refresh_networks()
self.refresh_secgroups()
self.refresh_routers()
self.refresh_subnets()
def refresh_subnets(self):
"""
Refresh the subnets map, subnet_map.
Keys are the subnet name, value is the neutron subnet dict.
"""
response = self.os_provisioner.nuclient.list_subnets()
self.subnet_map = {d['name']:d for d in response['subnets']}
def refresh_routers(self):
"""
Refresh the routers map, router_map
Keys are the Openstack ID for the router, values are the same ID
"""
response = self.os_provisioner.nuclient.list_routers()
self.router_map = {d['id']:d['id'] for d in response["routers"]}
def refresh_networks(self):
"""
Refresh the networks map, network_map.
Keys are the network id, values are nova Network objects
"""
networks = self.os_provisioner.nvclient.networks.list()
self.network_map = {n.label:n for n in networks}
for network in networks:
self.network_map[network.id] = network
def refresh_images(self):
"""
Refresh the images map, image_map
Keys are image names, values are nova Image objects.
"""
self.image_map = {i.name:i for i in self.os_provisioner.nvclient.images.list()}
def refresh_flavors(self):
"""
Refresh the flavors map, flavor_map
Keys are flavor names, values are nova Flavor objects
"""
self.flavor_map = {f.name:f for f in self.os_provisioner.nvclient.flavors.list()}
def refresh_secgroups(self):
"""
Refresh the sec groups map, secgroup_map
Keys are secgroup names and secgroup ids, values are nova SecGroup
objects.
"""
secgroups = list(self.os_provisioner.nvclient.security_groups.list())
self.secgroup_map = {sg.name:sg for sg in secgroups}
self.secgroup_map.update({sg.id:sg for sg in secgroups})
|
|
from sqlalchemy import and_
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import join
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.orm import aliased
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import noload
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.fixtures import ComparableEntity
from sqlalchemy.testing.fixtures import fixture_session
class PartitionByFixture(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
cs = relationship("C")
class C(Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
partition = select(
B,
func.row_number()
.over(order_by=B.id, partition_by=B.a_id)
.label("index"),
).alias()
partitioned_b = aliased(B, alias=partition)
A.partitioned_bs = relationship(
partitioned_b,
primaryjoin=and_(
partitioned_b.a_id == A.id, partition.c.index < 10
),
)
@classmethod
def insert_data(cls, connection):
A, B, C = cls.classes("A", "B", "C")
s = Session(connection)
s.add_all([A(id=i) for i in range(1, 4)])
s.flush()
s.add_all(
[
B(a_id=i, cs=[C(), C()])
for i in range(1, 4)
for j in range(1, 21)
]
)
s.commit()
class AliasedClassRelationshipTest(
PartitionByFixture, testing.AssertsCompiledSQL
):
# TODO: maybe make this more backend agnostic
__requires__ = ("window_functions",)
__dialect__ = "default"
def test_lazyload(self):
A, B, C = self.classes("A", "B", "C")
s = Session(testing.db)
def go():
for a1 in s.query(A): # 1 query
eq_(len(a1.partitioned_bs), 9) # 3 queries
for b in a1.partitioned_bs:
eq_(len(b.cs), 2) # 9 * 3 = 27 queries
self.assert_sql_count(testing.db, go, 31)
def test_join_one(self):
A, B, C = self.classes("A", "B", "C")
s = Session(testing.db)
q = s.query(A).join(A.partitioned_bs)
self.assert_compile(
q,
"SELECT a.id AS a_id FROM a JOIN "
"(SELECT b.id AS id, b.a_id AS a_id, row_number() "
"OVER (PARTITION BY b.a_id ORDER BY b.id) "
"AS index FROM b) AS anon_1 "
"ON anon_1.a_id = a.id AND anon_1.index < :index_1",
)
def test_join_two(self):
A, B, C = self.classes("A", "B", "C")
s = Session(testing.db)
q = s.query(A, A.partitioned_bs.entity).join(A.partitioned_bs)
self.assert_compile(
q,
"SELECT a.id AS a_id, anon_1.id AS anon_1_id, "
"anon_1.a_id AS anon_1_a_id "
"FROM a JOIN "
"(SELECT b.id AS id, b.a_id AS a_id, row_number() "
"OVER (PARTITION BY b.a_id ORDER BY b.id) "
"AS index FROM b) AS anon_1 "
"ON anon_1.a_id = a.id AND anon_1.index < :index_1",
)
def test_selectinload_w_noload_after(self):
A, B, C = self.classes("A", "B", "C")
s = Session(testing.db)
def go():
for a1 in s.query(A).options(
noload("*"), selectinload(A.partitioned_bs)
):
for b in a1.partitioned_bs:
eq_(b.cs, [])
self.assert_sql_count(testing.db, go, 2)
def test_selectinload_w_joinedload_after(self):
A, B, C = self.classes("A", "B", "C")
s = Session(testing.db)
def go():
for a1 in s.query(A).options(
selectinload(A.partitioned_bs).joinedload("cs")
):
for b in a1.partitioned_bs:
eq_(len(b.cs), 2)
self.assert_sql_count(testing.db, go, 2)
class AltSelectableTest(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(ComparableEntity, Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
class B(ComparableEntity, Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
class C(ComparableEntity, Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
class D(ComparableEntity, Base):
__tablename__ = "d"
id = Column(Integer, primary_key=True)
c_id = Column(ForeignKey("c.id"))
b_id = Column(ForeignKey("b.id"))
# 1. set up the join() as a variable, so we can refer
# to it in the mapping multiple times.
j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id)
# 2. Create an AliasedClass to B
B_viacd = aliased(B, j, flat=True)
A.b = relationship(B_viacd, primaryjoin=A.b_id == j.c.b_id)
@classmethod
def insert_data(cls, connection):
A, B, C, D = cls.classes("A", "B", "C", "D")
sess = Session(connection)
for obj in [
B(id=1),
A(id=1, b_id=1),
C(id=1, a_id=1),
D(id=1, c_id=1, b_id=1),
]:
sess.add(obj)
sess.flush()
sess.commit()
def test_lazyload(self):
A, B = self.classes("A", "B")
sess = fixture_session()
a1 = sess.query(A).first()
with self.sql_execution_asserter() as asserter:
# note this is many-to-one. use_get is unconditionally turned
# off for relationship to aliased class for now.
eq_(a1.b, B(id=1))
asserter.assert_(
CompiledSQL(
"SELECT b.id AS b_id FROM b JOIN d ON d.b_id = b.id "
"JOIN c ON c.id = d.c_id WHERE :param_1 = b.id",
[{"param_1": 1}],
)
)
def test_joinedload(self):
A, B = self.classes("A", "B")
sess = fixture_session()
with self.sql_execution_asserter() as asserter:
# note this is many-to-one. use_get is unconditionally turned
# off for relationship to aliased class for now.
a1 = sess.query(A).options(joinedload(A.b)).first()
eq_(a1.b, B(id=1))
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.b_id AS a_b_id, b_1.id AS b_1_id "
"FROM a LEFT OUTER JOIN (b AS b_1 "
"JOIN d AS d_1 ON d_1.b_id = b_1.id "
"JOIN c AS c_1 ON c_1.id = d_1.c_id) ON a.b_id = b_1.id "
"LIMIT :param_1",
[{"param_1": 1}],
)
)
def test_selectinload(self):
A, B = self.classes("A", "B")
sess = fixture_session()
with self.sql_execution_asserter() as asserter:
# note this is many-to-one. use_get is unconditionally turned
# off for relationship to aliased class for now.
a1 = sess.query(A).options(selectinload(A.b)).first()
eq_(a1.b, B(id=1))
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.b_id AS a_b_id "
"FROM a LIMIT :param_1",
[{"param_1": 1}],
),
CompiledSQL(
"SELECT a_1.id AS a_1_id, b.id AS b_id FROM a AS a_1 "
"JOIN (b JOIN d ON d.b_id = b.id JOIN c ON c.id = d.c_id) "
"ON a_1.b_id = b.id WHERE a_1.id "
"IN ([POSTCOMPILE_primary_keys])",
[{"primary_keys": [1]}],
),
)
def test_join(self):
A, B = self.classes("A", "B")
sess = fixture_session()
self.assert_compile(
sess.query(A).join(A.b),
"SELECT a.id AS a_id, a.b_id AS a_b_id "
"FROM a JOIN (b JOIN d ON d.b_id = b.id "
"JOIN c ON c.id = d.c_id) ON a.b_id = b.id",
)
|
|
#!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
from __future__ import print_function
import os
import cmd
import getopt
import signal
import sys
import time
import logging
from neblina import *
from neblinaAPI import NeblinaAPI
###################################################################################
class GracefulKiller:
isKilled = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit)
signal.signal(signal.SIGTERM, self.exit)
def exit(self, signum, frame):
#print("Signal received: {0}.".format(signum))
self.isKilled = True
###################################################################################
class StreamMenu(cmd.Cmd):
"""docstring for StreamMenu"""
def __init__(self, address):
cmd.Cmd.__init__(self)
self.signalKiller = GracefulKiller()
self.bigLine = '-------------------------------------------------------------------\n'
self.prompt = '>>'
self.intro = "Welcome to the Neblina Streaming Menu!"
self.api = NeblinaAPI(Interface.UART)
print("Setting up the connection...") # initial delay needed for the device to synchronize its processors
time.sleep(1)
print('.')
time.sleep(1)
print('.')
time.sleep(1)
print('.')
self.api.open(address)
global initialmotionstate # the global variable that stores the initial motion engine state
initialmotionstate = self.api.getMotionStatus() # get the initial motion engine state
self.api.streamDisableAll() # disable all streaming options after storing the initial state
self.api.setDataPortState(Interface.BLE, False) # Close BLE streaming to prevent slowed streaming
self.api.setDataPortState(Interface.UART, True) # Open UART streaming
# If the user exits with Ctrl-C, try switching the interface back to BLE
def cmdloop(self, intro=None):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt as e:
self.api.setDataPortState(Interface.BLE, True)
## Command definitions ##
def do_hist(self, args):
"""Print a list of commands that have been entered"""
print(self._hist)
def do_exit(self, args):
"""Exits from the console"""
# Set the motion engine state back to its initial state by enabling the appropriate streaming features
print('Switching back to the BLE interface...')
self.api.setDataPortState(Interface.UART, False)
print('Setting the motion engine back to its initial state...')
if initialmotionstate.distance:
self.api.streamTrajectoryInfo(True)
if initialmotionstate.force:
self.api.streamExternalForce(True)
if initialmotionstate.euler:
self.api.streamEulerAngle(True)
if initialmotionstate.quaternion:
self.api.streamQuaternion(True)
if initialmotionstate.imuData:
self.api.streamIMU(True)
if initialmotionstate.motion:
self.api.streamMotionState(True)
if initialmotionstate.steps:
self.api.streamPedometer(True)
if initialmotionstate.magData:
self.api.streamMAG(True)
if initialmotionstate.sitStand:
self.api.streamSittingStanding(True)
# Make the module stream back towards its default interface (BLE)
self.api.setDataPortState(Interface.BLE, True)
return -1
## Command definitions to support Cmd object functionality ##
def do_EOF(self, args):
"""Exit on system end of file character"""
return self.do_exit(args)
def do_shell(self, args):
"""Pass command to a system shell when line begins with '!'"""
os.system(args)
def do_help(self, args):
"""Get help on commands
'help' or '?' with no arguments prints a list of commands for which help is available
'help <command>' or '? <command>' gives help on <command>
"""
## The only reason to define this method is for the help text in the doc string
cmd.Cmd.do_help(self, args)
def do_eepromWrite(self, args):
"""
Write 8-byte string to EEPROM
Usage: >>eepromWrite <pageNumber> <string>
:param pageNumber: EEPROM page number (Range: 0-255)
:param string: 8-byte string (Example: 01234567)
"""
arguments = args.split(' ')
if len(arguments) < 2:
print('EEPROMWrite <pageNumber> <8-byte string>')
return
if len(arguments[1]) > 8:
print('The data string must less than 8 bytes')
return
arguments[1] = arguments[1].rjust(8) # Pad the string to 8 bytes
writeBytes = arguments[1].encode('utf-8')
writePageNumber = int(arguments[0])
if writePageNumber < 0 or writePageNumber > 255:
print('Page number must be between 0 and 255 inclusively')
return
self.api.eepromWrite(writePageNumber, writeBytes)
print('Write to page #{0} of dataBytes {1} was successful.'\
.format(writePageNumber, writeBytes))
def do_eepromRead(self, args):
"""
Read 8-byte string from EEPROM
Usage: >>eepromRead <pageNumber>
:param pageNumber: EEPROM page number (Range: 0-255)
"""
arguments = args.split(' ')
if (arguments[0]) == '' or len(arguments) != 1:
print('EEPROMRead <pageNumber>')
return
readPageNumber = int(arguments[0])
if readPageNumber < 0 or readPageNumber > 255:
print('Page number must be between 0 and 255 inclusively')
return
dataBytes = self.api.eepromRead(readPageNumber)
try:
print('Got \'{0}\' at page #{1}'.format(dataBytes.decode('utf-8'), readPageNumber))
except UnicodeDecodeError as ude:
print('Got {0} at page #{1}'.format(dataBytes, readPageNumber))
def do_getMotionStatus(self, args):
"""
Retrieve motion streaming state
Usage: >>getMotionStatus
"""
states = self.api.getMotionStatus()
print("Distance: {0}\nForce:{1}\nEuler:{2}\nQuaternion:{3}\nIMUData:{4}\nMotion:{5}\nSteps:{6}\nMAGData:{7}\nSitStand:{8}"\
.format(states.distance, states.force, states.euler, states.quaternion,\
states.imuData, states.motion, states.steps, states.magData, states.sitStand))
def do_getBatteryLevel(self, args):
"""
Retrieve battery level
Usage: >>getBatteryLevel
"""
batteryLevel = self.api.getBatteryLevel()
print('Battery Level: {0}%'.format(batteryLevel))
def do_getTemperature(self, args):
"""
Retrieve board temperature
Usage: >>getTemperature
"""
temp = self.api.getTemperature()
print('Board Temperature: {0} degrees (Celsius)'.format(temp))
def do_streamEulerAngle(self, args):
"""
Stream EulerAngle until stopped with Ctrl+C
Usage: >>streamEulerAngle
"""
self.api.streamEulerAngle(True)
while not self.signalKiller.isKilled:
print(self.api.getEulerAngle())
self.api.streamEulerAngle(False)
def do_streamIMU(self, args):
"""
Stream 6-axis IMU (Inertial Measurement Unit) until stopped with Ctrl+C
Usage: >>streamIMU
"""
self.api.streamIMU(True)
while not self.signalKiller.isKilled:
print(self.api.getIMU())
self.api.streamIMU(False)
def do_streamQuaternion(self, args):
"""
Stream Quaternion until stopped with Ctrl+C
Usage: >>streamQuaternion
"""
self.api.streamQuaternion(True)
while not self.signalKiller.isKilled:
print(self.api.getQuaternion())
self.api.streamQuaternion(False)
def do_streamMAG(self, args):
"""
Stream MAG (Magnetometer) until stopped with Ctrl+C
Usage: >>streamMAG
"""
self.api.streamMAG(True)
while not self.signalKiller.isKilled:
print(self.api.getMAG())
self.api.streamMAG(False)
def do_streamExternalForce(self, args):
"""
Stream External Force until stopped with Ctrl+C
Usage: >>streamExternalForce
"""
self.api.streamExternalForce(True)
while not self.signalKiller.isKilled:
print(self.api.getExternalForce())
self.api.streamExternalForce(False)
def do_streamRotationInfo(self, args):
"""
Stream RotationInfo until stopped with Ctrl+C
Usage: >>streamRotationInfo
"""
self.api.streamRotationInfo(True)
while not self.signalKiller.isKilled:
print(self.api.getRotationInfo())
self.api.streamRotationInfo(False)
def do_streamPedometer(self, args):
"""
Stream Pedometer until stopped with Ctrl+C
Usage: >>streamPedometer
"""
self.api.streamPedometer(True)
while not self.signalKiller.isKilled:
print(self.api.getPedometer())
self.api.streamPedometer(False)
def do_streamFingerGesture(self, args):
"""
Stream Finger Gesture until stopped with Ctrl+C
Usage: >>streamFingerGesture
"""
self.api.streamFingerGesture(True)
while not self.signalKiller.isKilled:
print(self.api.getFingerGesture())
self.api.streamFingerGesture(False)
def do_streamTrajectoryInfo(self, args):
"""
Stream TrajectoryInfo until stopped with Ctrl+C
Usage: >>streamTrajectoryInfo
"""
self.api.recordTrajectory(True)
self.api.streamTrajectoryInfo(True)
while not self.signalKiller.isKilled:
print(self.api.getTrajectoryInfo())
self.api.streamTrajectoryInfo(False)
self.api.recordTrajectory(False)
def do_streamDisableAll(self, args):
"""
Disable all streams
Usage: >>streamDisableAll
"""
self.api.streamDisableAll()
def do_resetTimestamp(self, args):
"""
Reset motion timestamp
Usage: >>resetTimestamp
"""
self.api.resetTimestamp()
def do_setDownsample(self, args):
"""
Set downsample rate of motion streaming
Usage: >>setDownsample <factor>
:param factor: Downsampling factor (Range: [20, 40, 60, ..., 980, 1000])
"""
if(len(args) <= 0):
print('The argument should be a multiplicand of 20, i.e., 20, 40, 60, etc!')
return
n = int(args)
if ((n % 20)!=0):
print('The argument should be a multiplicand of 20, i.e., 20, 40, 60, etc!')
return
self.api.setDownsample(n)
def do_setAccelerometerRange(self, args):
"""
Set accelerometer range
Usage: >>setAccelerometerRange <range>
:param range: Accelerometer range (Possible values: [2, 4, 8, 16])
"""
possibleFactors = [2,4,8,16]
if(len(args) <= 0):
print('The argument should be 2, 4, 8, or 16, representing the accelerometer range in g')
return
factor = int(args)
if(factor not in possibleFactors):
print('The argument should be 2, 4, 8, or 16, representing the accelerometer range in g')
return
self.api.setAccelerometerRange(factor)
def do_setLED(self, args):
"""
Change a LED state.
Usage: >>setLED <number> <value>
:param number: LED number (Range: [0, 1])
:param value: LED state (0: close, 1: open)
"""
arguments = args.split(' ')
if len(arguments) != 2:
print('setled <ledNumber> <value>')
return
ledIndex = int(arguments[0])
ledValue = int(arguments[1])
if(ledIndex < 0 or ledIndex > 1):
print('Only led indices 0 or 1 are valid')
return
self.api.setLED(ledIndex, ledValue)
def do_getSessionCount(self, args):
"""
Retrieve number of stored session
Usage: >>getSessionCount
"""
sessions = self.api.getSessionCount()
print('Num of sessions: {0}'.format(sessions))
def do_getSessionInfo(self, args):
"""
Retrieve a session information
Usage: >>getSessionInfo <sessionId>
:param sessionId: Session identifier (Range: 0-65535)
"""
sessionID = 65535
if(len(args) <= 0):
sessionID = 65535
elif(len(args) > 0):
sessionID = int(args)
packet = self.api.getSessionInfo(sessionID)
if(packet == None):
print('Session {0} does not exist on the flash'\
.format(sessionID))
else:
print( "Session %d: %d packets (%d bytes)"\
%(packet.sessionID, packet.sessionLength, packet.sessionLengthBytes) )
def do_eraseStorage(self, args):
"""
Erase storage
Usage: >>eraseStorage <type>
:note This can take up to 3 minutes. Do not power down the device during erasing.
:param type: Type of erasing (0: Quick, 1: Full)
"""
self.api.eraseStorage(int(args))
print('Flash erase has completed successfully!')
def do_sessionRecordIMU(self, args):
"""
Record an IMU session for a number of sample
Usage: >>sessionRecordIMU <count>
:param count: Number of sample to record
"""
if(len(args) <= 0):
numSamples = 1000
else:
numSamples = int(args)
sessionId = self.api.sessionRecord(True)
print("Recording session {0}.".format(sessionId))
self.api.streamIMU(True)
sampleCount = 0
while not self.signalKiller.isKilled and sampleCount < numSamples:
self.api.getIMU()
sampleCount += 1
print('Received {0} packets'.format(sampleCount), end="\r", flush=True)
print("\r\n")
self.api.streamIMU(False)
self.api.sessionRecord(False)
def do_sessionRecordEuler(self, args):
"""
Record an EulerAngle session for a number of sample
Usage: >>sessionRecordEuler <count>
:param count: Number of sample to record
"""
if (len(args) <= 0):
numSamples = 1000
else:
numSamples = int(args)
sessionId = self.api.sessionRecord(True)
print("Recording session {0}.".format(sessionId))
self.api.streamEulerAngle(True)
sampleCount = 0
while not self.signalKiller.isKilled and sampleCount < numSamples:
self.api.getEulerAngle()
sampleCount += 1
print('Received {0} packets'.format(sampleCount), end="\r", flush=True)
print("\r\n")
self.api.streamEulerAngle(False)
self.api.sessionRecord(False)
def do_sessionRecordQuaternion(self, args):
"""
Record a Quaternion session for a number of sample
Usage: >>sessionRecordQuaternion <count>
:param count: Number of sample to record
"""
if (len(args) <= 0):
numSamples = 1000
else:
numSamples = int(args)
sessionId = self.api.sessionRecord(True)
print("Recording session {0}.".format(sessionId))
self.api.streamQuaternion(True)
sampleCount = 0
while not self.signalKiller.isKilled and sampleCount < numSamples:
self.api.getQuaternion()
sampleCount += 1
print('Received {0} packets'.format(sampleCount), end="\r", flush=True)
print("\r\n")
self.api.streamQuaternion(False)
self.api.sessionRecord(False)
def do_sessionPlayback(self, args):
"""
Playback a recorded session
Usage: >>sessionPlayback <sessionId> <dump>
:param sessionId: Session identifier (Range: 0-65535)
:param dump: Dump packet to file ? (True or False)
"""
arguments = args.split(' ')
dump = False
mySessionID = 65535
if(len(args) <= 0):
mySessionID = 65535
dump = False
elif(len(arguments) == 1):
mySessionID = int(arguments[0])
dump = False
elif(len(arguments) >= 2 ):
mySessionID = int(arguments[0])
if arguments[1] == 'True' or arguments[1] == '1':
dump = True
else:
dump = False
self.api.sessionPlayback(mySessionID, dump)
print("sessionPlayback completed")
def do_getFirmwareVersions(self, args):
"""
Retrieve firmware versions
Usage: >>getFirmwareVersions
"""
packet = self.api.getFirmwareVersions()
apiRelease = packet.apiRelease
mcuFWVersion = packet.mcuFWVersion
bleFWVersion = packet.bleFWVersion
deviceID = packet.deviceID
print(packet)
## Override methods in Cmd object ##
def preloop(self):
"""Initialization before prompting user for commands.
Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub.
"""
cmd.Cmd.preloop(self) ## sets up command completion
self._hist = [] ## No history yet
self._locals = {} ## Initialize execution namespace for user
self._globals = {}
def postloop(self):
"""Take care of any unfinished business.
Despite the claims in the Cmd documentaion, Cmd.postloop() is not a stub.
"""
self.api.close()
cmd.Cmd.postloop(self) ## Clean up command completion
print ("Exiting...")
def precmd(self, line):
""" This method is called after the line has been input but before
it has been interpreted. If you want to modify the input line
before execution (for example, variable substitution) do it here.
"""
# This is added to ensure that the pending bytes in the COM buffer are discarded for each new command.
# This is crucial to avoid missing Acknowledge packets in the beginning, if Neblina is already streaming.
self._hist += [ line.strip() ]
return line
def postcmd(self, stop, line):
"""If you want to stop the console, return something that evaluates to true.
If you want to do some post command processing, do it here.
"""
self.signalKiller.isKilled = False
return stop
def emptyline(self):
"""Do nothing on empty input line"""
pass
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
In that case we execute the line as Python code.
"""
try:
exec(line) in self._locals, self._globals
except Exception as e:
print (e.__class__, ":", e)
###################################################################################
def printArguments():
print("Neblina stream menu")
print("Copyright Motsai 2010-2016")
print("")
print("Neblina commands:")
print(" -h --help : Display available commands.")
print(" -a --address: Device address to use (COM port)")
###################################################################################
if __name__ == '__main__':
#logging.basicConfig(level=logging.DEBUG, format='%(message)s')
try:
opts, args = getopt.getopt(sys.argv[1:], "ha:")
except getopt.GetoptError:
printArguments()
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
printArguments()
sys.exit()
elif opt in ("-a", "--address"):
console = StreamMenu(arg)
console.cmdloop()
sys.exit()
print("No device address specified. Exiting.")
|
|
'''
mrtparse.py - MRT format data parser
Copyright (C) 2019 Tetsumune KISO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
Tetsumune KISO <[email protected]>
Yoshiyuki YAMAUCHI <[email protected]>
Nobuhiro ITOU <[email protected]>
'''
import sys
import struct
import socket
import gzip
import bz2
import collections
import signal
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
__version__ = '1.7'
# Magic Number
GZIP_MAGIC = b'\x1f\x8b'
BZ2_MAGIC = b'\x42\x5a\x68'
def reverse_defaultdict(d):
'''
Reverse the keys and values of dictionaries.
'''
for k in list(d.keys()):
d[d[k]] = k
d = collections.defaultdict(lambda: "Unknown", d)
return d
# Error codes for MrtFormatError exception
MRT_ERR_C = reverse_defaultdict({
1:'MRT Header Error',
2:'MRT Data Error',
})
# AFI Types
# Assigend by IANA
AFI_T = reverse_defaultdict({
1:'IPv4',
2:'IPv6',
25: 'L2VPN',
})
# SAFI Types
# Assigend by IANA
SAFI_T = reverse_defaultdict({
1:'UNICAST',
2:'MULTICAST',
65:'VPLS',
70:'EVPN',
128:'L3VPN_UNICAST',
129:'L3VPN_MULTICAST',
})
# MRT Message Types
# Defined in RFC6396
MRT_T = reverse_defaultdict({
0:'NULL', # Deprecated in RFC6396
1:'START', # Deprecated in RFC6396
2:'DIE', # Deprecated in RFC6396
3:'I_AM_DEAD', # Deprecated in RFC6396
4:'PEER_DOWN', # Deprecated in RFC6396
5:'BGP', # Deprecated in RFC6396
6:'RIP', # Deprecated in RFC6396
7:'IDRP', # Deprecated in RFC6396
8:'RIPNG', # Deprecated in RFC6396
9:'BGP4PLUS', # Deprecated in RFC6396
10:'BGP4PLUS_01', # Deprecated in RFC6396
11:'OSPFv2',
12:'TABLE_DUMP',
13:'TABLE_DUMP_V2',
16:'BGP4MP',
17:'BGP4MP_ET',
32:'ISIS',
33:'ISIS_ET',
48:'OSPFv3',
49:'OSPFv3_ET',
})
# BGP,BGP4PLUS,BGP4PLUS_01 Subtypes
# Deprecated in RFC6396
BGP_ST = reverse_defaultdict({
0:'BGP_NULL',
1:'BGP_UPDATE',
2:'BGP_PREF_UPDATE',
3:'BGP_STATE_CHANGE',
4:'BGP_SYNC',
5:'BGP_OPEN',
6:'BGP_NOTIFY',
7:'BGP_KEEPALIVE',
})
# TABLE_DUMP Subtypes
# Defined in RFC6396
TD_ST = reverse_defaultdict({
1:'AFI_IPv4',
2:'AFI_IPv6',
})
# TABLE_DUMP_V2 Subtypes
# Defined in RFC6396
TD_V2_ST = reverse_defaultdict({
1:'PEER_INDEX_TABLE',
2:'RIB_IPV4_UNICAST',
3:'RIB_IPV4_MULTICAST',
4:'RIB_IPV6_UNICAST',
5:'RIB_IPV6_MULTICAST',
6:'RIB_GENERIC',
7:'GEO_PEER_TABLE', # Defined in RFC6397
8:'RIB_IPV4_UNICAST_ADDPATH', # Defined in RFC8050
9:'RIB_IPV4_MULTICAST_ADDPATH', # Defined in RFC8050
10:'RIB_IPV6_UNICAST_ADDPATH', # Defined in RFC8050
11:'RIB_IPV6_MULTICAST_ADDPATH', # Defined in RFC8050
12:'RIB_GENERIC_ADDPATH', # Defined in RFC8050
})
# BGP4MP,BGP4MP_ET Subtypes
# Defined in RFC6396
BGP4MP_ST = reverse_defaultdict({
0:'BGP4MP_STATE_CHANGE',
1:'BGP4MP_MESSAGE',
2:'BGP4MP_ENTRY', # Deprecated in RFC6396
3:'BGP4MP_SNAPSHOT', # Deprecated in RFC6396
4:'BGP4MP_MESSAGE_AS4',
5:'BGP4MP_STATE_CHANGE_AS4',
6:'BGP4MP_MESSAGE_LOCAL',
7:'BGP4MP_MESSAGE_AS4_LOCAL',
8:'BGP4MP_MESSAGE_ADDPATH', # Defined in RFC8050
9:'BGP4MP_MESSAGE_AS4_ADDPATH', # Defined in RFC8050
10:'BGP4MP_MESSAGE_LOCAL_ADDPATH', # Defined in RFC8050
11:'BGP4MP_MESSAGE_AS4_LOCAL_ADDPATH', # Defined in RFC8050
})
# MRT Message Subtypes
# Defined in RFC6396
MRT_ST = collections.defaultdict(lambda: dict(), {
9:BGP_ST,
10:BGP_ST,
12:AFI_T,
13:TD_V2_ST,
16:BGP4MP_ST,
17:BGP4MP_ST,
})
# BGP FSM States
# Defined in RFC4271
BGP_FSM = reverse_defaultdict({
1:'Idle',
2:'Connect',
3:'Active',
4:'OpenSent',
5:'OpenConfirm',
6:'Established',
7:'Clearing', # Used only in quagga?
8:'Deleted', # Used only in quagga?
})
# BGP Attribute Types
# Defined in RFC4271
BGP_ATTR_T = reverse_defaultdict({
1:'ORIGIN',
2:'AS_PATH',
3:'NEXT_HOP',
4:'MULTI_EXIT_DISC',
5:'LOCAL_PREF',
6:'ATOMIC_AGGREGATE',
7:'AGGREGATOR',
8:'COMMUNITY', # Defined in RFC1997
9:'ORIGINATOR_ID', # Defined in RFC4456
10:'CLUSTER_LIST', # Defined in RFC4456
11:'DPA', # Deprecated in RFC6938
12:'ADVERTISER', # Deprecated in RFC6938
13:'RCID_PATH/CLUSTER_ID', # Deprecated in RFC6938
14:'MP_REACH_NLRI', # Defined in RFC4760
15:'MP_UNREACH_NLRI', # Defined in RFC4760
16:'EXTENDED_COMMUNITIES', # Defined in RFC4360
17:'AS4_PATH', # Defined in RFC6793
18:'AS4_AGGREGATOR', # Defined in RFC6793
26:'AIGP', # Defined in RFC7311
32:'LARGE_COMMUNITY', # Defined in draft-ietf-idr-large-community
128:'ATTR_SET', # Defined in RFC6368
})
# BGP ORIGIN Types
# Defined in RFC4271
ORIGIN_T = reverse_defaultdict({
0:'IGP',
1:'EGP',
2:'INCOMPLETE',
})
# BGP AS_PATH Types
# Defined in RFC4271
AS_PATH_SEG_T = reverse_defaultdict({
1:'AS_SET',
2:'AS_SEQUENCE',
3:'AS_CONFED_SEQUENCE', # Defined in RFC5065
4:'AS_CONFED_SET', # Defined in RFC5065
})
# Reserved BGP COMMUNITY Types
# Defined in RFC1997
COMM_T = reverse_defaultdict({
0xffffff01:'NO_EXPORT',
0xffffff02:'NO_ADVERTISE',
0xffffff03:'NO_EXPORT_SCONFED',
0xffffff04:'NO_PEER', # Defined in RFC3765
})
# BGP Message Types
# Defined in RFC4271
BGP_MSG_T = reverse_defaultdict({
1:'OPEN',
2:'UPDATE',
3:'NOTIFICATION',
4:'KEEPALIVE',
5:'ROUTE-REFRESH', # Defined in RFC2918
})
# BGP Error Codes
# Defined in RFC4271
BGP_ERR_C = reverse_defaultdict({
1:'Message Header Error',
2:'OPEN Message Error',
3:'UPDATE Message Error',
4:'Hold Timer Expired',
5:'Finite State Machine Error',
6:'Cease',
7:'ROUTE-REFRESH Message Error', # Defined in RFC7313
})
# BGP Message Header Error Subcodes
# Defined in RFC4271
BGP_HDR_ERR_SC = reverse_defaultdict({
1:'Connection Not Synchronized',
2:'Bad Message Length',
3:'Bad Message Type',
})
# OPEN Message Error Subcodes
# Defined in RFC4271
BGP_OPEN_ERR_SC = reverse_defaultdict({
1:'Unsupported Version Number',
2:'Bad Peer AS',
3:'Bad BGP Identifier',
4:'Unsupported Optional Parameter',
5:'[Deprecated]',
6:'Unacceptable Hold Time',
7:'Unsupported Capability', # Defined in RFC5492
})
# UPDATE Message Error Subcodes
# Defined in RFC4271
BGP_UPDATE_ERR_SC = reverse_defaultdict({
1:'Malformed Attribute List',
2:'Unrecognized Well-known Attribute',
3:'Missing Well-known Attribute',
4:'Attribute Flags Error',
5:'Attribute Length Error',
6:'Invalid ORIGIN Attribute',
7:'[Deprecated]',
8:'Invalid NEXT_HOP Attribute',
9:'Optional Attribute Error',
10:'Invalid Network Field',
11:'Malformed AS_PATH',
})
# BGP Finite State Machine Error Subcodes
# Defined in RFC6608
BGP_FSM_ERR_SC = reverse_defaultdict({
0:'Unspecified Error',
1:'Receive Unexpected Message in OpenSent State',
2:'Receive Unexpected Message in OpenConfirm State',
3:'Receive Unexpected Message in Established State',
})
# BGP Cease NOTIFICATION Message Subcodes
# Defined in RFC4486
BGP_CEASE_ERR_SC = reverse_defaultdict({
1:'Maximum Number of Prefixes Reached',
2:'Administrative Shutdown',
3:'Peer De-configured',
4:'Administrative Reset',
5:'Connection Rejected',
6:'Other Configuration Change',
7:'Connection Collision Resolution',
8:'Out of Resources',
})
# BGP ROUTE-REFRESH Message Error subcodes
# Defined in RFC7313
BGP_ROUTE_REFRESH_ERR_SC = reverse_defaultdict({
1:'Invalid Message Length',
})
# BGP Error Subcodes
BGP_ERR_SC = collections.defaultdict(lambda: dict(), {
1:BGP_HDR_ERR_SC,
2:BGP_UPDATE_ERR_SC,
3:BGP_OPEN_ERR_SC,
4:BGP_UPDATE_ERR_SC,
5:BGP_FSM_ERR_SC,
6:BGP_CEASE_ERR_SC,
7:BGP_ROUTE_REFRESH_ERR_SC,
})
# BGP OPEN Optional Parameter Types
# Defined in RFC5492
BGP_OPT_PARAMS_T = reverse_defaultdict({
1:'Authentication', # Deprecated
2:'Capabilities',
})
# Capability Codes
# Defined in RFC5492
BGP_CAP_C = reverse_defaultdict({
1:'Multiprotocol Extensions for BGP-4', # Defined in RFC2858
2:'Route Refresh Capability for BGP-4', # Defined in RFC2918
3:'Outbound Route Filtering Capability', # Defined in RFC5291
4:'Multiple routes to a destination capability', # Defined in RFC3107
5:'Extended Next Hop Encoding', # Defined in RFC5549
64:'Graceful Restart Capability', # Defined in RFC4724
65:'Support for 4-octet AS number capability', # Defined in RFC6793
66:'[Deprecated]',
# draft-ietf-idr-dynamic-cap
67:'Support for Dynamic Capability (capability specific)',
# draft-ietf-idr-bgp-multisession
68:'Multisession BGP Capability',
# Defined in RFC7911
69:'ADD-PATH Capability',
# Defined in RFC7313
70:'Enhanced Route Refresh Capability',
# draft-uttaro-idr-bgp-persistence
71:'Long-Lived Graceful Restart (LLGR) Capability',
})
# Outbound Route Filtering Capability
# Defined in RFC5291
ORF_T = reverse_defaultdict({
64:'Address Prefix ORF', # Defined in RFC5292
65: 'CP-ORF', # Defined in RFC7543
})
ORF_SEND_RECV = reverse_defaultdict({
1:'Receive',
2:'Send',
3:'Both',
})
# ADD-PATH Capability
# Defined in RFC7911
ADD_PATH_SEND_RECV = reverse_defaultdict({
1:'Receive',
2:'Send',
3:'Both',
})
# AS Number Representation
AS_REPR = reverse_defaultdict({
1:'asplain',
2:'asdot+',
3:'asdot',
})
# MPLS Label
LBL_BOTTOM = 0x01 # Defined in RFC3032
LBL_WITHDRAWN = 0x800000 # Defined in RFC3107
def as_len(n=None):
'''
AS number length for AS_PATH attribute.
'''
if n is not None:
as_len.n = n
try:
return as_len.n
except AttributeError:
return 4
def as_repr(n=None):
'''
AS number representation.
Default is 'asplain'(defined in RFC5396).
'''
if n is not None:
as_repr.n = n
try:
return as_repr.n
except AttributeError:
return AS_REPR['asplain']
def af_num(afi=None, safi=None):
'''
the values of AFI/SAFI.
'''
if afi is not None:
af_num.afi = afi
af_num.safi = safi
try:
return (af_num.afi, af_num.safi)
except AttributeError:
return (0, 0)
def is_add_path(f=None):
'''
Flag for add-path.
'''
if f is not None:
is_add_path.f = f
try:
return is_add_path.f
except AttributeError:
return False
class MrtFormatError(Exception):
'''
Exception for invalid MRT formatted data.
'''
def __init__(self, msg=''):
Exception.__init__(self)
self.msg = msg
class Base:
'''
Super class for all other classes.
'''
__slots__ = ['buf', 'p']
def __init__(self):
for slot in self.__slots__:
setattr(self, slot, None)
self.p = 0
def chk_buf(self, n):
'''
Check whether there is sufficient buffers.
'''
if len(self.buf) - self.p < n:
raise MrtFormatError(
'Insufficient buffer %d < %d byte'
% (len(self.buf) - self.p, n))
def val_num(self, n):
'''
Convert buffers to integer.
'''
self.chk_buf(n)
val = 0
for i in self.buf[self.p:self.p+n]:
val <<= 8
# for Python3
if isinstance(i, int):
val += i
# for Python2
else:
val += struct.unpack('>B', i)[0]
self.p += n
return val
def val_bytes(self, n):
'''
Convert buffers to bytes.
'''
self.chk_buf(n)
val = self.buf[self.p:self.p+n]
self.p += n
return val
def val_str(self, n):
'''
Convert buffers to string.
'''
self.chk_buf(n)
val = self.buf[self.p:self.p+n]
self.p += n
# for Python2
if isinstance(val, str):
return val
# for Python3
else:
return val.decode('utf-8')
def val_addr(self, af, n=-1):
'''
Convert buffers to IP address.
'''
if af == AFI_T['IPv4']:
m = 4
_af = socket.AF_INET
elif af == AFI_T['IPv6']:
m = 16
_af = socket.AF_INET6
else:
raise MrtFormatError('Unsupported AFI %d(%s)' % (af, AFI_T[af]))
n = m if n < 0 else (n + 7) // 8
self.chk_buf(n)
addr = socket.inet_ntop(
_af, self.buf[self.p:self.p+n] + b'\x00'*(m - n))
self.p += n
return addr
def val_asn(self, n):
'''
Convert buffers to AS number.
'''
asn = self.val_num(n)
if as_repr() == AS_REPR['asplain'] \
or (as_repr() == AS_REPR['asdot'] and asn < 0x10000):
return str(asn)
else:
return str(asn >> 16) + '.' + str(asn & 0xffff)
def val_rd(self):
'''
Convert buffers to route distinguisher.
'''
rd = self.val_num(8)
return str(rd >> 32) + ':' + str(rd & 0xffffffff)
def val_nlri(self, n, af, saf=0):
'''
Convert buffers to NLRI.
'''
try:
if is_add_path():
raise MrtFormatError
p = self.p
l = []
while p < n:
nlri = Nlri(self.buf[p:])
p += nlri.unpack(af, saf)
nlri.is_valid()
nlri.is_dup(l)
l.append(nlri)
self.p = p
except MrtFormatError:
l = []
while self.p < n:
nlri = Nlri(self.buf[self.p:])
self.p += nlri.unpack(af, saf, add_path=1)
nlri.is_valid()
l.append(nlri)
return l
class Reader(Base):
'''
Reader for MRT format data.
'''
__slots__ = ['buf', 'p', 'mrt', 'f']
def __init__(self, arg):
Base.__init__(self)
# for file instance
if hasattr(arg, 'read'):
self.f = arg
# for file path
elif isinstance(arg, str):
f = open(arg, 'rb')
hdr = f.read(max(len(BZ2_MAGIC), len(GZIP_MAGIC)))
f.close()
if hdr.startswith(BZ2_MAGIC):
self.f = bz2.BZ2File(arg, 'rb')
elif hdr.startswith(GZIP_MAGIC):
self.f = gzip.GzipFile(arg, 'rb')
else:
self.f = open(arg, 'rb')
else:
sys.stderr.write("Error: Unsupported instance type\n")
def close(self):
'''
Close file object and stop iteration.
'''
self.f.close()
raise StopIteration
def __iter__(self):
return self
def __next__(self):
try:
self.unpack_hdr()
except MrtFormatError as e:
self.mrt.err = MRT_ERR_C['MRT Header Error']
self.mrt.err_msg = e.msg
return self
try:
self.unpack_data()
except MrtFormatError as e:
self.mrt.err = MRT_ERR_C['MRT Data Error']
self.mrt.err_msg = e.msg
return self
return self
# for Python2 compatibility
next = __next__
def unpack_hdr(self):
'''
Decoder for MRT header.
'''
as_len(4)
af_num(0, 0)
is_add_path(False)
self.mrt = Mrt(self.f.read(12))
if len(self.mrt.buf) == 0:
self.close()
elif len(self.mrt.buf) < 12:
raise MrtFormatError(
'Invalid MRT header length %d < 12 byte'
% len(self.mrt.buf))
self.mrt.unpack()
def unpack_data(self):
'''
Decoder for MRT payload.
'''
data = self.f.read(self.mrt.len)
self.mrt.buf += data
if len(data) < self.mrt.len:
raise MrtFormatError(
'Invalid MRT data length %d < %d byte'
% (len(data), self.mrt.len))
if len(MRT_ST[self.mrt.type]) \
and MRT_ST[self.mrt.type][self.mrt.subtype] == 'Unknown':
raise MrtFormatError(
'Unsupported %s subtype %d(%s)'
% (self.mrt.type, self.mrt.subtype,
MRT_ST[self.mrt.type][self.mrt.subtype]))
if self.mrt.type == MRT_T['TABLE_DUMP_V2']:
self.unpack_td_v2(data)
elif self.mrt.type == MRT_T['BGP4MP'] \
or self.mrt.type == MRT_T['BGP4MP_ET']:
if self.mrt.subtype == BGP4MP_ST['BGP4MP_ENTRY'] \
or self.mrt.subtype == BGP4MP_ST['BGP4MP_SNAPSHOT']:
self.p += self.mrt.len
raise MrtFormatError(
'Unsupported %s subtype %d(%s)'
% (MRT_T[self.mrt.type], self.mrt.subtype,
BGP4MP_ST[self.mrt.subtype]))
else:
if self.mrt.type == MRT_T['BGP4MP_ET']:
self.mrt.micro_ts = self.val_num(4)
self.mrt.bgp = Bgp4Mp(data)
self.mrt.bgp.unpack(self.mrt.subtype)
elif self.mrt.type == MRT_T['TABLE_DUMP']:
self.mrt.td = TableDump(data)
self.mrt.td.unpack(self.mrt.subtype)
else:
self.p += self.mrt.len
raise MrtFormatError(
'Unsupported MRT type %d(%s)'
% (self.mrt.type, MRT_T[self.mrt.type]))
return self.p
def unpack_td_v2(self, data):
'''
Decoder for Table_Dump_V2 format.
'''
if self.mrt.subtype == TD_V2_ST['RIB_IPV4_UNICAST_ADDPATH'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV4_MULTICAST_ADDPATH'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV6_UNICAST_ADDPATH'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV6_MULTICAST_ADDPATH']:
is_add_path(True)
if self.mrt.subtype == TD_V2_ST['RIB_IPV4_UNICAST'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV4_MULTICAST'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV4_UNICAST_ADDPATH'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV4_MULTICAST_ADDPATH']:
af_num.afi = AFI_T['IPv4']
self.mrt.rib = AfiSpecRib(data)
self.mrt.rib.unpack()
elif self.mrt.subtype == TD_V2_ST['RIB_IPV6_UNICAST'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV6_MULTICAST'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV6_UNICAST_ADDPATH'] \
or self.mrt.subtype == TD_V2_ST['RIB_IPV6_MULTICAST_ADDPATH']:
af_num.afi = AFI_T['IPv6']
self.mrt.rib = AfiSpecRib(data)
self.mrt.rib.unpack()
elif self.mrt.subtype == TD_V2_ST['PEER_INDEX_TABLE']:
self.mrt.peer = PeerIndexTable(data)
self.mrt.peer.unpack()
elif self.mrt.subtype == TD_V2_ST['RIB_GENERIC'] \
or self.mrt.subtype == TD_V2_ST['RIB_GENERIC_ADDPATH']:
self.mrt.rib = RibGeneric(data)
self.mrt.rib.unpack()
else:
self.p += self.mrt.len
class Mrt(Base):
'''
Class for MRT header.
'''
__slots__ = [
'buf', 'p', 'ts', 'type', 'subtype', 'len', 'micro_ts', 'bgp', 'peer',
'td', 'rib', 'err', 'err_msg'
]
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self):
'''
Decoder for MRT header.
'''
self.ts = self.val_num(4)
self.type = self.val_num(2)
self.subtype = self.val_num(2)
self.len = self.val_num(4)
return self.p
class TableDump(Base):
'''
Class for Table_Dump format.
'''
__slots__ = [
'buf', 'p', 'view', 'seq', 'prefix', 'plen', 'status', 'org_time',
'peer_ip', 'peer_as', 'attr_len', 'attr'
]
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self, subtype):
'''
Decoder for Table_Dump format.
'''
self.view = self.val_num(2)
self.seq = self.val_num(2)
self.prefix = self.val_addr(subtype)
self.plen = self.val_num(1)
self.status = self.val_num(1)
self.org_time = self.val_num(4)
# Considering the IPv4 peers advertising IPv6 Prefixes, first,
# the Peer IP Address field is decoded as an IPv4 address.
self.peer_ip = self.val_addr(AFI_T['IPv4'])
if subtype == AFI_T['IPv6'] and self.val_num(12):
self.p -= 16
self.peer_ip = self.val_addr(subtype)
self.peer_as = self.val_asn(as_len(2))
attr_len = self.attr_len = self.val_num(2)
self.attr = []
while attr_len > 0:
attr = BgpAttr(self.buf[self.p:])
self.p += attr.unpack()
self.attr.append(attr)
attr_len -= attr.p
return self.p
class PeerIndexTable(Base):
'''
Class for PEER_INDEX_TABLE format.
'''
__slots__ = ['buf', 'p', 'collector', 'view_len', 'view', 'count', 'entry']
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self):
'''
Decoder for PEER_INDEX_TABLE format.
'''
self.collector = self.val_addr(AFI_T['IPv4'])
self.view_len = self.val_num(2)
self.view = self.val_str(self.view_len)
self.count = self.val_num(2)
self.entry = []
for _ in range(self.count):
entry = PeerEntries(self.buf[self.p:])
self.p += entry.unpack()
self.entry.append(entry)
return self.p
class PeerEntries(Base):
'''
Class for Peer Entries.
'''
__slots__ = ['buf', 'p', 'type', 'bgp_id', 'ip', 'asn']
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self):
'''
Decoder for Peer Entries.
'''
self.type = self.val_num(1)
self.bgp_id = self.val_addr(AFI_T['IPv4'])
af_num.afi = AFI_T['IPv6'] if self.type & 0x01 else AFI_T['IPv4']
self.ip = self.val_addr(af_num.afi)
self.asn = self.val_asn(4 if self.type & (0x01 << 1) else 2)
return self.p
class RibGeneric(Base):
'''
Class for RIB_GENERIC format.
'''
__slots__ = ['buf', 'p', 'seq', 'afi', 'safi', 'nlri', 'count', 'entry']
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self):
'''
Decoder for RIB_GENERIC format.
'''
self.seq = self.val_num(4)
af_num.afi = self.afi = self.val_num(2)
af_num.safi = self.safi = self.val_num(1)
n = self.val_num(1)
self.p -= 1
self.nlri = self.val_nlri(self.p+(n+7)//8, self.afi, self.safi)
self.count = self.val_num(2)
self.entry = []
for _ in range(self.count):
entry = RibEntries(self.buf[self.p:])
self.p += entry.unpack()
self.entry.append(entry)
return self.p
class AfiSpecRib(Base):
'''
Class for AFI/SAFI-Specific RIB format.
'''
__slots__ = ['buf', 'p', 'seq', 'plen', 'prefix', 'count', 'entry']
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self):
'''
Decoder for AFI/SAFI-Specific RIB format.
'''
self.seq = self.val_num(4)
self.plen = self.val_num(1)
self.prefix = self.val_addr(af_num.afi, self.plen)
self.count = self.val_num(2)
self.entry = []
for _ in range(self.count):
entry = RibEntries(self.buf[self.p:])
self.p += entry.unpack()
self.entry.append(entry)
return self.p
class RibEntries(Base):
'''
Class for Rib Entries format.
'''
__slots__ = [
'buf', 'p', 'peer_index', 'org_time', 'path_id', 'attr_len', 'attr'
]
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
self.peer_index = None
self.org_time = None
self.path_id = None
self.attr_len = None
self.attr = None
def unpack(self):
'''
Decoder for Rib Entries format.
'''
self.peer_index = self.val_num(2)
self.org_time = self.val_num(4)
if is_add_path():
self.path_id = self.val_num(4)
attr_len = self.attr_len = self.val_num(2)
self.attr = []
while attr_len > 0:
attr = BgpAttr(self.buf[self.p:])
self.p += attr.unpack()
self.attr.append(attr)
attr_len -= attr.p
return self.p
class Bgp4Mp(Base):
'''
Class for BGP4MP format.
'''
__slots__ = [
'buf', 'p', 'peer_as', 'local_as', 'ifindex', 'af', 'peer_ip',
'local_ip', 'old_state', 'new_state', 'msg'
]
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self, subtype):
'''
Decoder for BGP4MP format.
'''
if subtype == BGP4MP_ST['BGP4MP_STATE_CHANGE'] \
or subtype == BGP4MP_ST['BGP4MP_MESSAGE'] \
or subtype == BGP4MP_ST['BGP4MP_MESSAGE_LOCAL'] \
or subtype == BGP4MP_ST['BGP4MP_MESSAGE_ADDPATH'] \
or subtype == BGP4MP_ST['BGP4MP_MESSAGE_LOCAL_ADDPATH']:
as_len(2)
if subtype == BGP4MP_ST['BGP4MP_MESSAGE_ADDPATH'] \
or subtype == BGP4MP_ST['BGP4MP_MESSAGE_AS4_ADDPATH'] \
or subtype == BGP4MP_ST['BGP4MP_MESSAGE_LOCAL_ADDPATH'] \
or subtype == BGP4MP_ST['BGP4MP_MESSAGE_AS4_LOCAL_ADDPATH']:
is_add_path(True)
self.peer_as = self.val_asn(as_len())
self.local_as = self.val_asn(as_len())
self.ifindex = self.val_num(2)
af_num.afi = self.af = self.val_num(2)
self.peer_ip = self.val_addr(self.af)
self.local_ip = self.val_addr(self.af)
if subtype == BGP4MP_ST['BGP4MP_STATE_CHANGE'] \
or subtype == BGP4MP_ST['BGP4MP_STATE_CHANGE_AS4']:
self.old_state = self.val_num(2)
self.new_state = self.val_num(2)
else:
self.msg = BgpMessage(self.buf[self.p:])
self.p += self.msg.unpack()
return self.p
class BgpMessage(Base):
'''
Class for BGP Message.
'''
__slots__ = [
'buf', 'p', 'marker', 'len', 'type', 'ver', 'my_as', 'holdtime',
'bgp_id', 'opt_len', 'opt_params', 'wd_len', 'withdrawn', 'attr_len',
'attr', 'nlri', 'err_code', 'err_subcode', 'data', 'afi', 'rsvd', 'safi'
]
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self):
'''
Decoder for BGP Message.
'''
self.marker = self.val_bytes(16)
self.len = self.val_num(2)
self.type = self.val_num(1)
if self.type == BGP_MSG_T['OPEN']:
self.unpack_open()
elif self.type == BGP_MSG_T['UPDATE']:
self.unpack_update()
elif self.type == BGP_MSG_T['NOTIFICATION']:
self.unpack_notification()
elif self.type == BGP_MSG_T['ROUTE-REFRESH']:
self.unpack_route_refresh()
self.p += self.len - self.p
return self.p
def unpack_open(self):
'''
Decoder for BGP OPEN Message.
'''
self.ver = self.val_num(1)
self.my_as = self.val_num(2)
self.holdtime = self.val_num(2)
self.bgp_id = self.val_addr(AFI_T['IPv4'])
opt_len = self.opt_len = self.val_num(1)
self.opt_params = []
while opt_len > 0:
opt_params = OptParams(self.buf[self.p:])
self.p += opt_params.unpack()
self.opt_params.append(opt_params)
opt_len -= opt_params.p
def unpack_update(self):
'''
Decoder for BGP UPDATE Message.
'''
self.wd_len = self.val_num(2)
self.withdrawn = self.val_nlri(self.p+self.wd_len, af_num.afi)
self.attr_len = self.val_num(2)
attr_len = self.p + self.attr_len
self.attr = []
while self.p < attr_len:
attr = BgpAttr(self.buf[self.p:])
self.p += attr.unpack()
self.attr.append(attr)
self.nlri = self.val_nlri(self.len, af_num.afi)
def unpack_notification(self):
'''
Decoder for BGP NOTIFICATION Message.
'''
self.err_code = self.val_num(1)
self.err_subcode = self.val_num(1)
self.data = self.val_bytes(self.len - self.p)
def unpack_route_refresh(self):
'''
Decoder for BGP ROUTE-REFRESH Message.
'''
self.afi = self.val_num(2)
self.rsvd = self.val_num(1)
self.safi = self.val_num(1)
class OptParams(Base):
'''
Class for BGP OPEN Optional Parameters.
'''
__slots__ = [
'buf', 'p', 'type', 'len', 'cap_type', 'cap_len', 'multi_ext', 'orf',
'graceful_restart', 'support_as4', 'add_path'
]
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self):
'''
Decoder for BGP OPEN Optional Parameters.
'''
self.type = self.val_num(1)
self.len = self.val_num(1)
if self.type == BGP_OPT_PARAMS_T['Capabilities']:
self.unpack_capabilities()
else:
self.p += self.len
return self.p
def unpack_capabilities(self):
'''
Decoder for BGP Capabilities.
'''
self.cap_type = self.val_num(1)
self.cap_len = self.val_num(1)
if self.cap_type == BGP_CAP_C['Multiprotocol Extensions for BGP-4']:
self.unpack_multi_ext()
elif self.cap_type == BGP_CAP_C['Route Refresh Capability for BGP-4']:
self.p += self.len - 2
elif self.cap_type == BGP_CAP_C['Outbound Route Filtering Capability']:
self.unpack_orf()
elif self.cap_type == BGP_CAP_C['Graceful Restart Capability']:
self.unpack_graceful_restart()
elif self.cap_type == BGP_CAP_C['Support for 4-octet AS number capability']:
self.unpack_support_as4()
elif self.cap_type == BGP_CAP_C['ADD-PATH Capability']:
self.unpack_add_path()
else:
self.p += self.len - 2
def unpack_multi_ext(self):
'''
Decoder for Multiprotocol Extensions for BGP-4.
'''
self.multi_ext = {}
self.multi_ext['afi'] = self.val_num(2)
self.multi_ext['rsvd'] = self.val_num(1)
self.multi_ext['safi'] = self.val_num(1)
def unpack_orf(self):
'''
Decoder for Outbound Route Filtering Capability.
'''
self.orf = {}
self.orf['afi'] = self.val_num(2)
self.orf['rsvd'] = self.val_num(1)
self.orf['safi'] = self.val_num(1)
self.orf['number'] = self.val_num(1)
self.orf['entry'] = []
for _ in range(self.orf['number']):
entry = {}
entry['type'] = self.val_num(1)
entry['send_recv'] = self.val_num(1)
self.orf['entry'].append(entry)
def unpack_graceful_restart(self):
'''
Decoder for Graceful Restart Capability.
'''
self.graceful_restart = {}
n = self.val_num(2)
self.graceful_restart['flag'] = n & 0xf000
self.graceful_restart['sec'] = n & 0x0fff
self.graceful_restart['entry'] = []
cap_len = self.cap_len
while cap_len > 2:
entry = {}
entry['afi'] = self.val_num(2)
entry['safi'] = self.val_num(1)
entry['flag'] = self.val_num(1)
self.graceful_restart['entry'].append(entry)
cap_len -= 4
def unpack_support_as4(self):
'''
Decoder for Support for 4-octet AS number capability.
'''
self.support_as4 = self.val_asn(4)
def unpack_add_path(self):
'''
Decoder for ADD-PATH Capability
'''
self.add_path = []
cap_len = self.cap_len
while cap_len > 2:
entry = {}
entry['afi'] = self.val_num(2)
entry['safi'] = self.val_num(1)
entry['send_recv'] = self.val_num(1)
self.add_path.append(entry)
cap_len -= 4
class BgpAttr(Base):
'''
Class for BGP path attributes
'''
__slots__ = [
'buf', 'p', 'flag', 'type', 'len', 'origin', 'as_path', 'next_hop',
'med', 'local_pref', 'aggr', 'comm', 'org_id', 'cl_list', 'mp_reach',
'mp_unreach', 'ext_comm', 'as4_path', 'as4_aggr', 'aigp', 'attr_set',
'large_comm', 'val'
]
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self):
'''
Decoder for BGP path attributes
'''
self.flag = self.val_num(1)
self.type = self.val_num(1)
if self.flag & 0x01 << 4:
self.len = self.val_num(2)
else:
self.len = self.val_num(1)
if self.type == BGP_ATTR_T['ORIGIN']:
self.unpack_origin()
elif self.type == BGP_ATTR_T['AS_PATH']:
self.unpack_as_path()
elif self.type == BGP_ATTR_T['NEXT_HOP']:
self.unpack_next_hop()
elif self.type == BGP_ATTR_T['MULTI_EXIT_DISC']:
self.unpack_multi_exit_disc()
elif self.type == BGP_ATTR_T['LOCAL_PREF']:
self.unpack_local_pref()
elif self.type == BGP_ATTR_T['AGGREGATOR']:
self.unpack_aggregator()
elif self.type == BGP_ATTR_T['COMMUNITY']:
self.unpack_community()
elif self.type == BGP_ATTR_T['ORIGINATOR_ID']:
self.unpack_originator_id()
elif self.type == BGP_ATTR_T['CLUSTER_LIST']:
self.unpack_cluster_list()
elif self.type == BGP_ATTR_T['MP_REACH_NLRI']:
self.unpack_mp_reach_nlri()
elif self.type == BGP_ATTR_T['MP_UNREACH_NLRI']:
self.unpack_mp_unreach_nlri()
elif self.type == BGP_ATTR_T['EXTENDED_COMMUNITIES']:
self.unpack_extended_communities()
elif self.type == BGP_ATTR_T['AS4_PATH']:
self.unpack_as4_path()
elif self.type == BGP_ATTR_T['AS4_AGGREGATOR']:
self.unpack_as4_aggregator()
elif self.type == BGP_ATTR_T['AIGP']:
self.unpack_aigp()
elif self.type == BGP_ATTR_T['ATTR_SET']:
self.unpack_attr_set()
elif self.type == BGP_ATTR_T['LARGE_COMMUNITY']:
self.unpack_large_community()
else:
self.val = self.val_bytes(self.len)
return self.p
def unpack_origin(self):
'''
Decoder for ORIGIN attribute
'''
self.origin = self.val_num(1)
def unpack_as_path(self):
'''
Decoder for AS_PATH attribute
'''
attr_len = self.p + self.len
self.as_path = []
while self.p < attr_len:
path_seg = {}
path_seg['type'] = self.val_num(1)
path_seg['len'] = self.val_num(1)
path_seg['val'] = []
for _ in range(path_seg['len']):
path_seg['val'].append(self.val_asn(as_len()))
self.as_path.append(path_seg)
def unpack_next_hop(self):
'''
Decoder for NEXT_HOP attribute
'''
if self.len == 4:
self.next_hop = self.val_addr(AFI_T['IPv4'])
elif self.len == 16:
self.next_hop = self.val_addr(AFI_T['IPv6'])
else:
self.p += self.len
self.next_hop = None
def unpack_multi_exit_disc(self):
'''
Decoder for MULTI_EXIT_DISC attribute
'''
self.med = self.val_num(4)
def unpack_local_pref(self):
'''
Decoder for LOCAL_PREF attribute
'''
self.local_pref = self.val_num(4)
def unpack_aggregator(self):
'''
Decoder for AGGREGATOR attribute
'''
self.aggr = {}
n = 2 if self.len < 8 else 4
self.aggr['asn'] = self.val_asn(n)
self.aggr['id'] = self.val_addr(AFI_T['IPv4'])
def unpack_community(self):
'''
Decoder for COMMUNITY attribute
'''
attr_len = self.p + self.len
self.comm = []
while self.p < attr_len:
val = self.val_num(4)
self.comm.append(
'%d:%d' %
((val & 0xffff0000) >> 16, val & 0x0000ffff))
def unpack_originator_id(self):
'''
Decoder for ORIGINATOR_ID attribute
'''
self.org_id = self.val_addr(AFI_T['IPv4'])
def unpack_cluster_list(self):
'''
Decoder for CLUSTER_LIST attribute
'''
attr_len = self.p + self.len
self.cl_list = []
while self.p < attr_len:
self.cl_list.append(self.val_addr(AFI_T['IPv4']))
def unpack_mp_reach_nlri(self):
'''
Decoder for MP_REACH_NLRI attribute
'''
attr_len = self.p + self.len
self.mp_reach = {}
self.mp_reach['afi'] = self.val_num(2)
if AFI_T[self.mp_reach['afi']] != 'Unknown':
af_num.afi = self.mp_reach['afi']
af_num.safi = self.mp_reach['safi'] = self.val_num(1)
self.mp_reach['nlen'] = self.val_num(1)
if af_num.afi != AFI_T['IPv4'] and af_num.afi != AFI_T['IPv6']:
self.p = attr_len
return
if af_num.safi != SAFI_T['UNICAST'] \
and af_num.safi != SAFI_T['MULTICAST'] \
and af_num.safi != SAFI_T['L3VPN_UNICAST'] \
and af_num.safi != SAFI_T['L3VPN_MULTICAST']:
self.p = attr_len
return
if af_num.safi == SAFI_T['L3VPN_UNICAST'] \
or af_num.safi == SAFI_T['L3VPN_MULTICAST']:
self.mp_reach['rd'] = self.val_rd()
else:
self.p -= 2
self.mp_reach = {}
self.mp_reach['nlen'] = self.val_num(1)
self.mp_reach['next_hop'] = []
self.mp_reach['next_hop'].append(self.val_addr(af_num.afi))
if self.mp_reach['nlen'] == 32 and af_num.afi == AFI_T['IPv6']:
self.mp_reach['next_hop'].append(self.val_addr(af_num.afi))
if 'afi' in self.mp_reach:
self.mp_reach['rsvd'] = self.val_num(1)
self.mp_reach['nlri'] = self.val_nlri(
attr_len, af_num.afi, af_num.safi)
def unpack_mp_unreach_nlri(self):
'''
Decoder for MP_UNREACH_NLRI attribute
'''
attr_len = self.p + self.len
self.mp_unreach = {}
self.mp_unreach['afi'] = self.val_num(2)
self.mp_unreach['safi'] = self.val_num(1)
if self.mp_unreach['afi'] != AFI_T['IPv4'] \
and self.mp_unreach['afi'] != AFI_T['IPv6']:
self.p = attr_len
return
if self.mp_unreach['safi'] != SAFI_T['UNICAST'] \
and self.mp_unreach['safi'] != SAFI_T['MULTICAST'] \
and self.mp_unreach['safi'] != SAFI_T['L3VPN_UNICAST'] \
and self.mp_unreach['safi'] != SAFI_T['L3VPN_MULTICAST']:
self.p = attr_len
return
self.mp_unreach['withdrawn'] = self.val_nlri(
attr_len, self.mp_unreach['afi'], self.mp_unreach['safi'])
def unpack_extended_communities(self):
'''
Decoder for EXT_COMMUNITIES attribute
'''
attr_len = self.p + self.len
self.ext_comm = []
while self.p < attr_len:
ext_comm = self.val_num(8)
self.ext_comm.append(ext_comm)
def unpack_as4_path(self):
'''
Decoder for AS4_PATH attribute
'''
attr_len = self.p + self.len
self.as4_path = []
while self.p < attr_len:
path_seg = {}
path_seg['type'] = self.val_num(1)
path_seg['len'] = self.val_num(1)
path_seg['val'] = []
for _ in range(path_seg['len']):
path_seg['val'].append(self.val_asn(4))
self.as4_path.append(path_seg)
def unpack_as4_aggregator(self):
'''
Decoder for AS4_AGGREGATOR attribute
'''
self.as4_aggr = {}
self.as4_aggr['asn'] = self.val_asn(4)
self.as4_aggr['id'] = self.val_addr(AFI_T['IPv4'])
def unpack_aigp(self):
'''
Decoder for AIGP attribute
'''
attr_len = self.p + self.len
self.aigp = []
while self.p < attr_len:
aigp = {}
aigp['type'] = self.val_num(1)
aigp['len'] = self.val_num(2)
aigp['val'] = self.val_num(aigp['len'] - 3)
self.aigp.append(aigp)
def unpack_attr_set(self):
'''
Decoder for ATTR_SET attribute
'''
attr_len = self.p + self.len
self.attr_set = {}
self.attr_set['origin_as'] = self.val_asn(4)
attr_len -= 4
self.attr_set['attr'] = []
while self.p < attr_len:
attr = BgpAttr(self.buf[self.p:])
self.p += attr.unpack()
self.attr_set['attr'].append(attr)
def unpack_large_community(self):
'''
Decoder for LARGE_COMMUNITY attribute
'''
attr_len = self.p + self.len
self.large_comm = []
while self.p < attr_len:
global_admin = self.val_num(4)
local_data_part_1 = self.val_num(4)
local_data_part_2 = self.val_num(4)
self.large_comm.append(
'%d:%d:%d' %
(global_admin, local_data_part_1, local_data_part_2))
class Nlri(Base):
'''
Class for NLRI.
'''
__slots__ = ['buf', 'p', 'path_id', 'label', 'rd', 'plen', 'prefix']
def __init__(self, buf):
Base.__init__(self)
self.buf = buf
def unpack(self, af, saf=0, add_path=0):
'''
Decoder for NLRI.
'''
if add_path:
self.path_id = self.val_num(4)
self.plen = plen = self.val_num(1)
if saf == SAFI_T['L3VPN_UNICAST'] \
or saf == SAFI_T['L3VPN_MULTICAST']:
plen = self.unpack_l3vpn(plen)
if af == AFI_T['IPv4'] and plen > 32 \
or af == AFI_T['IPv6'] and plen > 128:
raise MrtFormatError(
'Invalid prefix length %d (%s)'
% (self.plen, AFI_T[af]))
self.prefix = self.val_addr(af, plen)
return self.p
def unpack_l3vpn(self, plen):
'''
Decoder for L3VPN NLRI.
'''
self.label = []
while True:
label = self.val_num(3)
self.label.append(label)
if label & LBL_BOTTOM or label == LBL_WITHDRAWN:
break
self.rd = self.val_rd()
plen -= (3 * len(self.label) + 8) * 8
return plen
def is_dup(self, l):
'''
Check whether there is duplicate routes in NLRI.
'''
for e in l:
if self.plen == e.plen and self.prefix == e.prefix \
and self.label == e.label and self.rd == e.rd:
raise MrtFormatError(
'Duplicate prefix %s/%d'
% (self.prefix, self.plen))
def is_valid(self):
'''
Check whether route is valid.
'''
if self.label is not None:
plen = self.plen - (len(self.label) * 3 + 8) * 8
else:
plen = self.plen
if ':' in self.prefix:
b = socket.inet_pton(socket.AF_INET6, self.prefix)
t = struct.unpack("!QQ", b)
n = t[0] << 64 | t[1]
plen_max = 128
else:
b = socket.inet_pton(socket.AF_INET, self.prefix)
n = struct.unpack("!L", b)[0]
plen_max = 32
if n & ~(-1 << (plen_max - plen)):
raise MrtFormatError(
'Invalid prefix %s/%d'
% (self.prefix, self.plen))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.