gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkTapsOperations:
"""VirtualNetworkTapsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
tap_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
tap_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the virtual network tap.
:type tap_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def get(
self,
resource_group_name: str,
tap_name: str,
**kwargs: Any
) -> "_models.VirtualNetworkTap":
"""Gets information about the specified virtual network tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of virtual network tap.
:type tap_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkTap, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.VirtualNetworkTap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
tap_name: str,
parameters: "_models.VirtualNetworkTap",
**kwargs: Any
) -> "_models.VirtualNetworkTap":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkTap')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
tap_name: str,
parameters: "_models.VirtualNetworkTap",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualNetworkTap"]:
"""Creates or updates a Virtual Network Tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the virtual network tap.
:type tap_name: str
:param parameters: Parameters supplied to the create or update virtual network tap operation.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.VirtualNetworkTap
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkTap or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_08_01.models.VirtualNetworkTap]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
tap_name: str,
tap_parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.VirtualNetworkTap":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(tap_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
tap_name: str,
tap_parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualNetworkTap"]:
"""Updates an VirtualNetworkTap tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the tap.
:type tap_name: str
:param tap_parameters: Parameters supplied to update VirtualNetworkTap tags.
:type tap_parameters: ~azure.mgmt.network.v2018_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkTap or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_08_01.models.VirtualNetworkTap]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
tap_parameters=tap_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkTapListResult"]:
"""Gets all the VirtualNetworkTaps in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.VirtualNetworkTapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkTapListResult"]:
"""Gets all the VirtualNetworkTaps in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.VirtualNetworkTapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
|
|
import unittest
import itertools
import warnings
from collections import OrderedDict, namedtuple
import numpy as np
import numpy.testing as np_test
from pgmpy.factors import Factor
from pgmpy.factors import JointProbabilityDistribution as JPD
from pgmpy.factors import factor_product
from pgmpy.factors import factor_divide
from pgmpy.factors.CPD import TabularCPD
from pgmpy import exceptions
from pgmpy.extern.six.moves import range
from pgmpy.independencies import Independencies
class TestFactorInit(unittest.TestCase):
def test_class_init(self):
phi = Factor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
self.assertEqual(phi.variables, ['x1', 'x2', 'x3'])
np_test.assert_array_equal(phi.cardinality, np.array([2, 2, 2]))
np_test.assert_array_equal(phi.values, np.ones(8).reshape(2, 2, 2))
def test_class_init1(self):
phi = Factor([1, 2, 3], [2, 3, 2], np.arange(12))
self.assertEqual(phi.variables, [1, 2, 3])
np_test.assert_array_equal(phi.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(phi.values, np.arange(12).reshape(2, 3, 2))
def test_class_init_sizeerror(self):
self.assertRaises(ValueError, Factor, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(9))
def test_class_init_typeerror(self):
self.assertRaises(TypeError, Factor, ['x1', 'x2', 'x3'], [2, 1, 1], ['val1', 'val2'])
self.assertRaises(TypeError, Factor, ['x1', 'x2', 'x3'], [2, 1, 1], [1, 'val1'])
self.assertRaises(TypeError, Factor, ['x1', 'x2', 'x3'], [2, 1, 1], ['val1', 1])
self.assertRaises(TypeError, Factor, ['x1', 'x2', 'x3'], [2, 1, 1], [0.1, 'val1'])
self.assertRaises(TypeError, Factor, ['x1', 'x2', 'x3'], [2, 1, 1], ['val1', 0.1])
self.assertRaises(TypeError, Factor, 'x1', [3], [1, 2, 3])
self.assertRaises(ValueError, Factor, ['x1', 'x1', 'x3'], [2, 3, 2], range(12))
def test_init_size_var_card_not_equal(self):
self.assertRaises(ValueError, Factor, ['x1', 'x2'], [2], np.ones(2))
class TestFactorMethods(unittest.TestCase):
def setUp(self):
self.phi = Factor(['x1', 'x2', 'x3'], [2, 2, 2], np.random.uniform(5, 10, size=8))
self.phi1 = Factor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
self.phi2 = Factor([('x1', 0), ('x2', 0), ('x3', 0)], [2, 3, 2], range(12))
# This larger factor (phi3) caused a bug in reduce
card3 = [3, 3, 3, 2, 2, 2, 2, 2, 2]
self.phi3 = Factor(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
card3, np.arange(np.prod(card3), dtype=np.float))
def test_scope(self):
self.assertListEqual(self.phi.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi1.scope(), ['x1', 'x2', 'x3'])
def test_assignment(self):
self.assertListEqual(self.phi.assignment([0]), [[('x1', 0), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi.assignment([4, 5, 6]), [[('x1', 1), ('x2', 0), ('x3', 0)],
[('x1', 1), ('x2', 0), ('x3', 1)],
[('x1', 1), ('x2', 1), ('x3', 0)]])
self.assertListEqual(self.phi1.assignment(np.array([4, 5, 6])),
[[('x1', 0), ('x2', 2), ('x3', 0)],
[('x1', 0), ('x2', 2), ('x3', 1)],
[('x1', 1), ('x2', 0), ('x3', 0)]])
def test_assignment_indexerror(self):
self.assertRaises(IndexError, self.phi.assignment, [10])
self.assertRaises(IndexError, self.phi.assignment, [1, 3, 10, 5])
self.assertRaises(IndexError, self.phi.assignment, np.array([1, 3, 10, 5]))
def test_get_cardinality(self):
self.assertEqual(self.phi.get_cardinality(['x1']), {'x1': 2})
self.assertEqual(self.phi.get_cardinality(['x2']), {'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x3']), {'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2']), {'x1': 2, 'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x3']), {'x1': 2, 'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 2, 'x3': 2})
def test_get_cardinality_scopeerror(self):
self.assertRaises(ValueError, self.phi.get_cardinality, ['x4'])
def test_get_cardinality_typeerror(self):
self.assertRaises(TypeError, self.phi.get_cardinality, 'x1')
def test_marginalize(self):
self.phi1.marginalize(['x1'])
np_test.assert_array_equal(self.phi1.values, np.array([[6, 8],
[10, 12],
[14, 16]]))
self.phi1.marginalize(['x2'])
np_test.assert_array_equal(self.phi1.values, np.array([30, 36]))
self.phi1.marginalize(['x3'])
np_test.assert_array_equal(self.phi1.values, np.array(66))
def test_marginalize_scopeerror(self):
self.assertRaises(ValueError, self.phi.marginalize, ['x4'])
self.assertRaises(ValueError, self.phi.marginalize, ['x4'])
self.phi.marginalize(['x1'])
self.assertRaises(ValueError, self.phi.marginalize, ['x1'])
def test_marginalize_typeerror(self):
self.assertRaises(TypeError, self.phi.marginalize, 'x1')
def test_marginalize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_max = self.phi3.marginalize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_max.values.shape, phi3_max.cardinality)
def test_normalize(self):
self.phi1.normalize()
np_test.assert_almost_equal(self.phi1.values,
np.array([[[0, 0.01515152],
[0.03030303, 0.04545455],
[0.06060606, 0.07575758]],
[[0.09090909, 0.10606061],
[0.12121212, 0.13636364],
[0.15151515, 0.16666667]]]))
def test_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
def test_reduce1(self):
self.phi1.reduce([('x2', 0), ('x1', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
def test_reduce_shape(self):
values = [('A', 0), ('D', 0), ('F', 0), ('H', 1)]
phi3_reduced = self.phi3.reduce(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_reduced.values.shape, phi3_reduced.cardinality)
@unittest.skip
def test_complete_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0), ('x3', 1)])
np_test.assert_array_equal(self.phi1.values, np.array([1]))
np_test.assert_array_equal(self.phi1.cardinality, np.array([]))
np_test.assert_array_equal(self.phi1.variables, OrderedDict())
def test_reduce_typeerror(self):
self.assertRaises(TypeError, self.phi1.reduce, 'x10')
self.assertRaises(TypeError, self.phi1.reduce, ['x10'])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 'x2')])
self.assertRaises(TypeError, self.phi1.reduce, [(0, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 0.1)])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 0.1)])
def test_reduce_scopeerror(self):
self.assertRaises(ValueError, self.phi1.reduce, [('x4', 1)])
def test_reduce_sizeerror(self):
self.assertRaises(IndexError, self.phi1.reduce, [('x3', 5)])
def test_identity_factor(self):
identity_factor = self.phi.identity_factor()
self.assertEqual(list(identity_factor.variables), ['x1', 'x2', 'x3'])
np_test.assert_array_equal(identity_factor.cardinality, [2, 2, 2])
np_test.assert_array_equal(identity_factor.values, np.ones(8).reshape(2, 2, 2))
def test_factor_product(self):
phi = Factor(['x1', 'x2'], [2, 2], range(4))
phi1 = Factor(['x3', 'x4'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = Factor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2], [0, 0, 0, 0, 0, 1,
2, 3, 0, 2, 4, 6,
0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = Factor(['x1', 'x2'], [3, 2], range(6))
phi1 = Factor(['x2', 'x3'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = Factor(['x1', 'x2', 'x3'], [3, 2, 2], [0, 0, 2, 3, 0, 2,
6, 9, 0, 4, 10, 15])
np_test.assert_almost_equal(prod.values,
np.array([0, 0, 2, 3, 0, 2,
6, 9, 0, 4, 10, 15]).reshape(3, 2, 2))
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3'])
def test_factor_product2(self):
from pgmpy import factors
phi = factors.Factor(['x1', 'x2'], [2, 2], range(4))
phi1 = factors.Factor(['x3', 'x4'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = Factor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = Factor(['x1', 'x2'], [3, 2], range(6))
phi1 = Factor(['x2', 'x3'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = Factor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3'])
def test_factor_product_non_factor_arg(self):
self.assertRaises(TypeError, factor_product, 1, 2)
def test_factor_mul(self):
phi = Factor(['x1', 'x2'], [2, 2], range(4))
phi1 = Factor(['x3', 'x4'], [2, 2], range(4))
prod = phi * phi1
sorted_vars = ['x1', 'x2', 'x3', 'x4']
for axis in range(prod.values.ndim):
exchange_index = prod.variables.index(sorted_vars[axis])
prod.variables[axis], prod.variables[exchange_index] = prod.variables[exchange_index], prod.variables[axis]
prod.values = prod.values.swapaxes(axis, exchange_index)
np_test.assert_almost_equal(prod.values.ravel(),
np.array([0, 0, 0, 0, 0, 1,
2, 3, 0, 2, 4, 6,
0, 3, 6, 9]))
self.assertEqual(prod.variables, ['x1', 'x2', 'x3', 'x4'])
def test_factor_divide(self):
phi1 = Factor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = Factor(['x1'], [2], [1, 2])
div = phi1.divide(phi2, inplace=False)
phi3 = Factor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, div)
def test_factor_divide_truediv(self):
phi1 = Factor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = Factor(['x1'], [2], [1, 2])
div = phi1 / phi2
phi3 = Factor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, div)
def test_factor_divide_invalid(self):
phi1 = Factor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = Factor(['x1'], [2], [0, 2])
div = phi1.divide(phi2, inplace=False)
np_test.assert_array_equal(div.values.ravel(), np.array([np.inf, np.inf, 1.5, 2]))
def test_factor_divide_no_common_scope(self):
phi1 = Factor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = Factor(['x3'], [2], [0, 2])
self.assertRaises(ValueError, factor_divide, phi1, phi2)
def test_factor_divide_non_factor_arg(self):
self.assertRaises(TypeError, factor_divide, 1, 1)
def test_eq(self):
self.assertFalse(self.phi == self.phi1)
self.assertTrue(self.phi == self.phi)
self.assertTrue(self.phi1 == self.phi1)
def test_eq1(self):
phi1 = Factor(['x1', 'x2', 'x3'], [2, 4, 3], range(24))
phi2 = Factor(['x2', 'x1', 'x3'], [4, 2, 3], [0, 1, 2, 12, 13, 14, 3,
4, 5, 15, 16, 17, 6, 7,
8, 18, 19, 20, 9, 10, 11,
21, 22, 23])
self.assertTrue(phi1 == phi2)
self.assertEqual(phi2.variables, ['x2', 'x1', 'x3'])
def test_hash(self):
phi = Factor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi1 = Factor(['x2', 'x1'], [2, 2], [1, 3, 2, 4])
self.assertEqual(hash(phi), hash(phi1))
phi = Factor(['x1', 'x2', 'x3'], [2, 2, 2], range(8))
phi1 = Factor(['x3', 'x1', 'x2'], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi), hash(phi1))
def test_maximize1(self):
self.phi1.maximize(['x1'])
self.assertEqual(self.phi1, Factor(['x2', 'x3'], [3, 2], [6, 7, 8, 9, 10, 11]))
self.phi1.maximize(['x2'])
self.assertEqual(self.phi1, Factor(['x3'], [2], [10, 11]))
def test_maximize2(self):
self.phi1.maximize(['x1', 'x2'])
self.assertEqual(self.phi1, Factor(['x3'], [2], [10, 11]))
def test_maximize3(self):
self.phi2 = Factor(['x1', 'x2', 'x3'], [3, 2, 2], [0.25, 0.35, 0.08, 0.16, 0.05, 0.07,
0.00, 0.00, 0.15, 0.21, 0.08, 0.18])
self.phi2.maximize(['x2'])
self.assertEqual(self.phi2, Factor(['x1', 'x3'], [3, 2], [0.25, 0.35, 0.05,
0.07, 0.15, 0.21]))
def test_maximize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_max = self.phi3.maximize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_max.values.shape, phi3_max.cardinality)
def test_maximize_scopeerror(self):
self.assertRaises(ValueError, self.phi.maximize, ['x10'])
def test_maximize_typeerror(self):
self.assertRaises(TypeError, self.phi.maximize, 'x1')
def tearDown(self):
del self.phi
del self.phi1
class TestTabularCPDInit(unittest.TestCase):
def test_cpd_init(self):
cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1]])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
self.assertEqual(list(cpd.variables), ['grade'])
np_test.assert_array_equal(cpd.cardinality, np.array([3]))
np_test.assert_array_almost_equal(cpd.values, np.array([0.1, 0.1, 0.1]))
cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'intel', 'diff'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.8, 0.8, 0.8, 0.8, 0.8, 0.8]).reshape(3, 3, 2))
cpd = TabularCPD('grade', 3, [[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]],
evidence='evi1', evidence_card=2)
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'evi1'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1,
0.1, 0.1,
0.8, 0.8]).reshape(3, 2))
def test_cpd_init_event_card_not_int(self):
self.assertRaises(TypeError, TabularCPD, 'event', '2', [[0.1, 0.9]])
def test_cpd_init_cardinality_not_specified(self):
self.assertRaises(exceptions.CardinalityError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5])
self.assertRaises(exceptions.CardinalityError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], 5)
self.assertRaises(exceptions.CardinalityError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1'], [5, 6])
self.assertRaises(exceptions.CardinalityError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
'evi1', [5, 6])
def test_cpd_init_value_not_2d(self):
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]],
['evi1', 'evi2'], [5, 6])
class TestTabularCPDMethods(unittest.TestCase):
def setUp(self):
self.cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
self.cpd2 = TabularCPD('J', 2, [[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]],
evidence=['A', 'B', 'C'], evidence_card=[2, 2, 2])
def test_marginalize_1(self):
self.cpd.marginalize(['diff'])
self.assertEqual(self.cpd.variable, 'grade')
self.assertEqual(self.cpd.variable_card, 3)
self.assertListEqual(list(self.cpd.variables), ['grade', 'intel'])
np_test.assert_array_equal(self.cpd.cardinality, np.array([3, 3]))
np_test.assert_array_equal(self.cpd.values.ravel(), np.array([0.1, 0.1, 0.1,
0.1, 0.1, 0.1,
0.8, 0.8, 0.8]))
def test_marginalize_2(self):
self.assertRaises(ValueError, self.cpd.marginalize, ['grade'])
def test_marginalize_3(self):
copy_cpd = self.cpd.copy()
copy_cpd.marginalize(['intel', 'diff'])
self.cpd.marginalize(['intel'])
self.cpd.marginalize(['diff'])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_normalize(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize()
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_not_in_place(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
np_test.assert_array_almost_equal(cpd_un_normalized.normalize(inplace=False).values,
np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_original_safe(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize(inplace=False)
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.7, 0.2], [0.6, 0.2]],
[[0.4, 0.4], [0.4, 0.8]]]))
def test__repr__(self):
grade_cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
diff_cpd = TabularCPD('grade', 3, [[0.1, 0.1], [0.1, 0.1], [0.8, 0.8]], evidence=['diff'], evidence_card=[2])
self.assertEqual(repr(grade_cpd), '<TabularCPD representing P(grade:3 | intel:3, diff:2) at {address}>'
.format(address=hex(id(grade_cpd))))
self.assertEqual(repr(intel_cpd), '<TabularCPD representing P(intel:3) at {address}>'
.format(address=hex(id(intel_cpd))))
self.assertEqual(repr(diff_cpd), '<TabularCPD representing P(grade:3 | diff:2) at {address}>'
.format(address=hex(id(diff_cpd))))
def test_copy(self):
copy_cpd = self.cpd.copy()
np_test.assert_array_equal(self.cpd.get_cpd(), copy_cpd.get_cpd())
def test_copy_original_safe(self):
copy_cpd = self.cpd.copy()
copy_cpd.reorder_parents(['diff', 'intel'])
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reduce_1(self):
self.cpd.reduce([('diff', 0)])
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.8, 0.8, 0.8]]))
def test_reduce_2(self):
self.cpd.reduce([('intel', 0)])
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]]))
def test_reduce_3(self):
self.cpd.reduce([('intel', 0), ('diff', 0)])
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1],
[0.1],
[0.8]]))
def test_reduce_4(self):
self.assertRaises(ValueError, self.cpd.reduce, [('grade', 0)])
def test_reduce_5(self):
copy_cpd = self.cpd.copy()
copy_cpd.reduce([('intel', 2), ('diff', 1)])
self.cpd.reduce([('intel', 2)])
self.cpd.reduce([('diff', 1)])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_get_cpd(self):
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reorder_parents_inplace(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
np_test.assert_array_equal(self.cpd2.get_cpd(), np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents_no_effect(self):
self.cpd2.reorder_parents(['C', 'A', 'B'], inplace=False)
np_test.assert_array_equal(self.cpd2.get_cpd(), np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def test_reorder_parents_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cpd2.reorder_parents(['A', 'B', 'C'], inplace=False)
assert("Same ordering provided as current" in str(w[-1].message))
np_test.assert_array_equal(self.cpd2.get_cpd(), np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def tearDown(self):
del self.cpd
class TestJointProbabilityDistributionInit(unittest.TestCase):
def test_jpd_init(self):
jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12) / 12)
np_test.assert_array_equal(jpd.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(jpd.values, np.ones(12).reshape(2, 3, 2) / 12)
self.assertEqual(jpd.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 3, 'x3': 2})
def test_jpd_init_exception(self):
self.assertRaises(ValueError, JPD, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
class TestJointProbabilityDistributionMethods(unittest.TestCase):
def setUp(self):
self.jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd1 = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd2 = JPD(['x1', 'x2', 'x3'], [2, 2, 3],
[0.126, 0.168, 0.126, 0.009, 0.045, 0.126, 0.252, 0.0224, 0.0056, 0.06, 0.036, 0.024])
self.jpd3 = JPD(['x1', 'x2', 'x3'], [2, 2, 2],
[5.0e-04, 5.225e-04, 0.00, 8.9775e-03, 9.9e-03, 5.39055e-02, 0.00, 9.261945e-01])
def test_jpd_marginal_distribution_list(self):
self.jpd.marginal_distribution(['x1', 'x2'])
np_test.assert_array_almost_equal(self.jpd.values, np.array([[0.16666667, 0.16666667, 0.16666667],
[0.16666667, 0.16666667, 0.16666667]]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2, 3]))
dic = {'x1': 2, 'x2': 3}
self.assertEqual(self.jpd.get_cardinality(['x1', 'x2']), dic)
self.assertEqual(self.jpd.scope(), ['x1', 'x2'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution(['x1', 'x2'], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(new_jpd == self.jpd)
def test_marginal_distribution_str(self):
self.jpd.marginal_distribution('x1')
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x1'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution('x1', inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_conditional_distribution_list(self):
self.jpd = self.jpd1.copy()
self.jpd.conditional_distribution([('x1', 1), ('x2', 0)])
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x3'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.conditional_distribution([('x1', 1), ('x2', 0)], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_check_independence(self):
self.assertTrue(self.jpd2.check_independence(['x1'], ['x2']))
self.assertRaises(TypeError, self.jpd2.check_independence, 'x1', ['x2'])
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], 'x2')
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], ['x2'], 'x3')
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], [('x3', 0)]))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], [('x3', 1)]))
self.assertTrue(self.jpd3.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
def test_get_independencies(self):
independencies = Independencies(['x1', 'x2'], ['x2', 'x3'], ['x3', 'x1'])
independencies1 = Independencies(['x1', 'x2'])
self.assertEqual(self.jpd1.get_independencies(), independencies)
self.assertEqual(self.jpd2.get_independencies(), independencies1)
self.assertEqual(self.jpd1.get_independencies([('x3', 0)]), independencies1)
self.assertEqual(self.jpd2.get_independencies([('x3', 0)]), Independencies())
def test_minimal_imap(self):
bm = self.jpd1.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(sorted(bm.edges()), sorted([('x1', 'x3'), ('x2', 'x3')]))
bm = self.jpd1.minimal_imap(order=['x2', 'x3', 'x1'])
self.assertEqual(sorted(bm.edges()), sorted([('x2', 'x1'), ('x3', 'x1')]))
bm = self.jpd2.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(bm.edges(), [])
bm = self.jpd2.minimal_imap(order=['x1', 'x2'])
self.assertEqual(bm.edges(), [])
def test_repr(self):
self.assertEqual(repr(self.jpd1), '<Joint Distribution representing P(x1:2, x2:3, x3:2) at {address}>'.format(
address=hex(id(self.jpd1))))
def test_is_imap(self):
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
G1 = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD('grade', 3,
[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['diff', 'intel'],
evidence_card=[2, 3])
G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
jpd = JPD(['diff', 'intel', 'grade'], [2, 3, 3], val)
self.assertTrue(jpd.is_imap(G1))
self.assertRaises(TypeError, jpd.is_imap, MarkovModel())
def tearDown(self):
del self.jpd
del self.jpd1
del self.jpd2
del self.jpd3
#
# class TestTreeCPDInit(unittest.TestCase):
# def test_init_single_variable_nodes(self):
# tree = TreeCPD([('B', Factor(['A'], [2], [0.8, 0.2]), 0),
# ('B', 'C', 1),
# ('C', Factor(['A'], [2], [0.1, 0.9]), 0),
# ('C', 'D', 1),
# ('D', Factor(['A'], [2], [0.9, 0.1]), 0),
# ('D', Factor(['A'], [2], [0.4, 0.6]), 1)])
#
# self.assertTrue('B' in tree.nodes())
# self.assertTrue('C' in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue(Factor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(Factor(['A'], [2], [0.1, 0.9]) in tree.nodes())
# self.assertTrue(Factor(['A'], [2], [0.9, 0.1]) in tree.nodes())
# self.assertTrue(Factor(['A'], [2], [0.4, 0.6]) in tree.nodes())
#
# self.assertTrue(('B', Factor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue(('B', Factor(['A'], [2], [0.1, 0.9]) in tree.edges()))
# self.assertTrue(('B', Factor(['A'], [2], [0.9, 0.1]) in tree.edges()))
# self.assertTrue(('B', Factor(['A'], [2], [0.4, 0.6]) in tree.edges()))
# self.assertTrue(('C', 'D') in tree.edges())
# self.assertTrue(('B', 'C') in tree.edges())
#
# self.assertEqual(tree['B'][Factor(['A'], [2], [0.8, 0.2])]['label'], 0)
# self.assertEqual(tree['B']['C']['label'], 1)
# self.assertEqual(tree['C'][Factor(['A'], [2], [0.1, 0.9])]['label'], 0)
# self.assertEqual(tree['C']['D']['label'], 1)
# self.assertEqual(tree['D'][Factor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['D'][Factor(['A'], [2], [0.4, 0.6])]['label'], 1)
#
# self.assertRaises(ValueError, tree.add_edges_from, [('F', 'G')])
#
# def test_init_self_loop(self):
# self.assertRaises(ValueError, TreeCPD, [('B', 'B', 0)])
#
# def test_init_cycle(self):
# self.assertRaises(ValueError, TreeCPD, [('A', 'B', 0), ('B', 'C', 1), ('C', 'A', 0)])
#
# def test_init_multi_variable_nodes(self):
# tree = TreeCPD([(('B', 'C'), Factor(['A'], [2], [0.8, 0.2]), (0, 0)),
# (('B', 'C'), 'D', (0, 1)),
# (('B', 'C'), Factor(['A'], [2], [0.1, 0.9]), (1, 0)),
# (('B', 'C'), 'E', (1, 1)),
# ('D', Factor(['A'], [2], [0.9, 0.1]), 0),
# ('D', Factor(['A'], [2], [0.4, 0.6]), 1),
# ('E', Factor(['A'], [2], [0.3, 0.7]), 0),
# ('E', Factor(['A'], [2], [0.8, 0.2]), 1)
# ])
#
# self.assertTrue(('B', 'C') in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue('E' in tree.nodes())
# self.assertTrue(Factor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(Factor(['A'], [2], [0.9, 0.1]) in tree.nodes())
#
# self.assertTrue((('B', 'C'), Factor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue((('B', 'C'), 'E') in tree.edges())
# self.assertTrue(('D', Factor(['A'], [2], [0.4, 0.6])) in tree.edges())
# self.assertTrue(('E', Factor(['A'], [2], [0.8, 0.2])) in tree.edges())
#
# self.assertEqual(tree[('B', 'C')][Factor(['A'], [2], [0.8, 0.2])]['label'], (0, 0))
# self.assertEqual(tree[('B', 'C')]['D']['label'], (0, 1))
# self.assertEqual(tree['D'][Factor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['E'][Factor(['A'], [2], [0.3, 0.7])]['label'], 0)
#
#
# class TestTreeCPD(unittest.TestCase):
# def setUp(self):
# self.tree1 = TreeCPD([('B', Factor(['A'], [2], [0.8, 0.2]), '0'),
# ('B', 'C', '1'),
# ('C', Factor(['A'], [2], [0.1, 0.9]), '0'),
# ('C', 'D', '1'),
# ('D', Factor(['A'], [2], [0.9, 0.1]), '0'),
# ('D', Factor(['A'], [2], [0.4, 0.6]), '1')])
#
# self.tree2 = TreeCPD([('C','A','0'),('C','B','1'),
# ('A', Factor(['J'], [2], [0.9, 0.1]), '0'),
# ('A', Factor(['J'], [2], [0.3, 0.7]), '1'),
# ('B', Factor(['J'], [2], [0.8, 0.2]), '0'),
# ('B', Factor(['J'], [2], [0.4, 0.6]), '1')])
#
# def test_add_edge(self):
# self.tree1.add_edge('yolo', 'yo', 0)
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
#
# def test_add_edges_from(self):
# self.tree1.add_edges_from([('yolo', 'yo', 0), ('hello', 'world', 1)])
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes() and
# 'hello' in self.tree1.nodes() and 'world' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertTrue(('hello', 'world') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
# self.assertEqual(self.tree1['hello']['world']['label'], 1)
#
# def test_to_tabular_cpd(self):
# tabular_cpd = self.tree1.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'B', 'C', 'D'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.8, 0.8, 0.8, 0.1, 0.1, 0.9, 0.4,
# 0.2, 0.2, 0.2, 0.2, 0.9, 0.9, 0.1, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['A', 'B', 'C'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['J', 'C', 'B', 'A'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([ 0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4,
# 0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]))
#
# @unittest.skip('Not implemented yet')
# def test_to_tabular_cpd_parent_order(self):
# tabular_cpd = self.tree1.to_tabular_cpd('A', parents_order=['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'D', 'C', 'B'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.1, 0.8, 0.9, 0.8, 0.1, 0.8, 0.4,
# 0.2, 0.9, 0.2, 0.1, 0.2, 0.9, 0.2, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd('A', parents_order=['E', 'D', 'C', 'B'])
#
# @unittest.skip('Not implemented yet')
# def test_to_rule_cpd(self):
# rule_cpd = self.tree1.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_1', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_!', 'C_1', 'D_1'): 0.6})
#
# rule_cpd = self.tree2.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2, 'E': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D', 'E'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0', 'C_0'): 0.8,
# ('A_1', 'B_0', 'C_0'): 0.2,
# ('A_0', 'B_0', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_0', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_0', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_0', 'C_1', 'D_1'): 0.6,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_1', 'B_1', 'C_0'): 0.9,
# ('A_0', 'B_1', 'C_1', 'E_0'): 0.3,
# ('A_1', 'B_1', 'C_1', 'E_0'): 0.7,
# ('A_0', 'B_1', 'C_1', 'E_1'): 0.8,
# ('A_1', 'B_1', 'C_1', 'E_1'): 0.2})
#
#
# class TestRuleCPDInit(unittest.TestCase):
# def test_init_without_errors_rules_none(self):
# rule_cpd = RuleCPD('A')
# self.assertEqual(rule_cpd.variable, 'A')
#
# def test_init_without_errors_rules_not_none(self):
# rule_cpd = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
# def test_init_with_errors(self):
# self.assertRaises(ValueError, RuleCPD, 'A', {('A_0',): 0.5,
# ('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
#
# class TestRuleCPDMethods(unittest.TestCase):
# def setUp(self):
# self.rule_cpd_with_rules = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6})
# self.rule_cpd_without_rules = RuleCPD('A')
#
# def test_add_rules_single(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_multiple(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_error(self):
# self.assertRaises(ValueError, self.rule_cpd_with_rules.add_rules, {('A_0',): 0.8})
#
# def test_scope(self):
# self.assertEqual(self.rule_cpd_with_rules.scope(), {'A', 'B', 'C'})
# self.assertEqual(self.rule_cpd_without_rules.scope(), set())
#
# def test_cardinality(self):
# self.assertEqual(self.rule_cpd_with_rules.cardinality(), {'A': 2, 'B': 2, 'C': 1})
# self.assertEqual(self.rule_cpd_without_rules.cardinality(), {})
#
# def tearDown(self):
# del self.rule_cpd_without_rules
#
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's MP4 (aka M4A, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
* http://developer.apple.com/documentation/QuickTime/QTFF/
* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt
* http://standards.iso.org/ittf/PubliclyAvailableStandards/\
c041828_ISO_IEC_14496-12_2005(E).zip
* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
were all consulted.
"""
import struct
import sys
from io import BytesIO
from collections.abc import Sequence
from datetime import timedelta
from mutagen import FileType, Tags, StreamInfo, PaddingInfo
from mutagen._constants import GENRES
from mutagen._util import cdata, insert_bytes, DictProxy, MutagenError, \
hashable, enum, get_size, resize_bytes, loadfile, convert_error, bchr, \
reraise
from ._atom import Atoms, Atom, AtomError
from ._util import parse_full_atom
from ._as_entry import AudioSampleEntry, ASEntryError
class error(MutagenError):
pass
class MP4MetadataError(error):
pass
class MP4StreamInfoError(error):
pass
class MP4NoTrackError(MP4StreamInfoError):
pass
class MP4MetadataValueError(ValueError, MP4MetadataError):
pass
__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType']
@enum
class AtomDataType(object):
"""Enum for ``dataformat`` attribute of MP4FreeForm.
.. versionadded:: 1.25
"""
IMPLICIT = 0
"""for use with tags for which no type needs to be indicated because
only one type is allowed"""
UTF8 = 1
"""without any count or null terminator"""
UTF16 = 2
"""also known as UTF-16BE"""
SJIS = 3
"""deprecated unless it is needed for special Japanese characters"""
HTML = 6
"""the HTML file header specifies which HTML version"""
XML = 7
"""the XML header must identify the DTD or schemas"""
UUID = 8
"""also known as GUID; stored as 16 bytes in binary (valid as an ID)"""
ISRC = 9
"""stored as UTF-8 text (valid as an ID)"""
MI3P = 10
"""stored as UTF-8 text (valid as an ID)"""
GIF = 12
"""(deprecated) a GIF image"""
JPEG = 13
"""a JPEG image"""
PNG = 14
"""PNG image"""
URL = 15
"""absolute, in UTF-8 characters"""
DURATION = 16
"""in milliseconds, 32-bit integer"""
DATETIME = 17
"""in UTC, counting seconds since midnight, January 1, 1904;
32 or 64-bits"""
GENRES = 18
"""a list of enumerated values"""
INTEGER = 21
"""a signed big-endian integer with length one of { 1,2,3,4,8 } bytes"""
RIAA_PA = 24
"""RIAA parental advisory; { -1=no, 1=yes, 0=unspecified },
8-bit ingteger"""
UPC = 25
"""Universal Product Code, in text UTF-8 format (valid as an ID)"""
BMP = 27
"""Windows bitmap image"""
@hashable
class MP4Cover(bytes):
"""A cover artwork.
Attributes:
imageformat (`AtomDataType`): format of the image
(either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = AtomDataType.JPEG
FORMAT_PNG = AtomDataType.PNG
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, imageformat=FORMAT_JPEG):
self.imageformat = imageformat
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4Cover):
return bytes(self) == other
return (bytes(self) == bytes(other) and
self.imageformat == other.imageformat)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.imageformat))
@hashable
class MP4FreeForm(bytes):
"""A freeform value.
Attributes:
dataformat (`AtomDataType`): format of the data (see AtomDataType)
"""
FORMAT_DATA = AtomDataType.IMPLICIT # deprecated
FORMAT_TEXT = AtomDataType.UTF8 # deprecated
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, dataformat=AtomDataType.UTF8, version=0):
self.dataformat = dataformat
self.version = version
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4FreeForm):
return bytes(self) == other
return (bytes(self) == bytes(other) and
self.dataformat == other.dataformat and
self.version == other.version)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.dataformat))
def _name2key(name):
return name.decode("latin-1")
def _key2name(key):
return key.encode("latin-1")
def _find_padding(atom_path):
# Check for padding "free" atom
# XXX: we only use them if they are adjacent to ilst, and only one.
# and there also is a top level free atom which we could use maybe..?
meta, ilst = atom_path[-2:]
assert meta.name == b"meta" and ilst.name == b"ilst"
index = meta.children.index(ilst)
try:
prev = meta.children[index - 1]
if prev.name == b"free":
return prev
except IndexError:
pass
try:
next_ = meta.children[index + 1]
if next_.name == b"free":
return next_
except IndexError:
pass
def _item_sort_key(key, value):
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb",
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "pgap", "pcst", "tmpo",
"\xa9too", "----", "covr", "\xa9lyr"]
order = dict(zip(order, range(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (order.get(key[:4], last), len(repr(value)), repr(value))
class MP4Tags(DictProxy, Tags):
r"""MP4Tags()
Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
Text values (multiple values per key are supported):
* '\\xa9nam' -- track title
* '\\xa9alb' -- album
* '\\xa9ART' -- artist
* 'aART' -- album artist
* '\\xa9wrt' -- composer
* '\\xa9day' -- year
* '\\xa9cmt' -- comment
* 'desc' -- description (usually used in podcasts)
* 'purd' -- purchase date
* '\\xa9grp' -- grouping
* '\\xa9gen' -- genre
* '\\xa9lyr' -- lyrics
* 'purl' -- podcast URL
* 'egid' -- podcast episode GUID
* 'catg' -- podcast category
* 'keyw' -- podcast keywords
* '\\xa9too' -- encoded by
* 'cprt' -- copyright
* 'soal' -- album sort order
* 'soaa' -- album artist sort order
* 'soar' -- artist sort order
* 'sonm' -- title sort order
* 'soco' -- composer sort order
* 'sosn' -- show sort order
* 'tvsh' -- show name
* '\\xa9wrk' -- work
* '\\xa9mvn' -- movement
Boolean values:
* 'cpil' -- part of a compilation
* 'pgap' -- part of a gapless album
* 'pcst' -- podcast (iTunes reads this only on import)
Tuples of ints (multiple values per key are supported):
* 'trkn' -- track number, total tracks
* 'disk' -- disc number, total discs
Integer values:
* 'tmpo' -- tempo/BPM
* '\\xa9mvc' -- Movement Count
* '\\xa9mvi' -- Movement Index
* 'shwm' -- work/movement
* 'stik' -- Media Kind
* 'hdvd' -- HD Video
* 'rtng' -- Content Rating
* 'tves' -- TV Episode
* 'tvsn' -- TV Season
* 'plID', 'cnID', 'geID', 'atID', 'sfID', 'cmID', 'akID' -- Various iTunes
Internal IDs
Others:
* 'covr' -- cover artwork, list of MP4Cover objects (which are
tagged strs)
* 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8. Multiple values per key are
supported.
MP4 tag data cannot exist outside of the structure of an MP4 file,
so this class should not be manually instantiated.
Unknown non-text tags and tags that failed to parse will be written
back as is.
"""
def __init__(self, *args, **kwargs):
self._failed_atoms = {}
super(MP4Tags, self).__init__()
if args or kwargs:
self.load(*args, **kwargs)
def load(self, atoms, fileobj):
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError as key:
raise MP4MetadataError(key)
free = _find_padding(path)
self._padding = free.datalength if free is not None else 0
ilst = path[-1]
for atom in ilst.children:
ok, data = atom.read(fileobj)
if not ok:
raise MP4MetadataError("Not enough data")
try:
if atom.name in self.__atoms:
info = self.__atoms[atom.name]
info[0](self, atom, data)
else:
# unknown atom, try as text
self.__parse_text(atom, data, implicit=False)
except MP4MetadataError:
# parsing failed, save them so we can write them back
key = _name2key(atom.name)
self._failed_atoms.setdefault(key, []).append(data)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("key has to be str")
self._render(key, value)
super(MP4Tags, self).__setitem__(key, value)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.meta.ilst" in atoms
def _render(self, key, value):
atom_name = _key2name(key)[:4]
if atom_name in self.__atoms:
render_func = self.__atoms[atom_name][1]
render_args = self.__atoms[atom_name][2:]
else:
render_func = type(self).__render_text
render_args = []
return render_func(self, key, value, *render_args)
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething=None, padding=None):
values = []
items = sorted(self.items(), key=lambda kv: _item_sort_key(*kv))
for key, value in items:
try:
values.append(self._render(key, value))
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
for key, failed in self._failed_atoms.items():
# don't write atoms back if we have added a new one with
# the same name, this excludes freeform which can have
# multiple atoms with the same key (most parsers seem to be able
# to handle that)
if key in self:
assert _key2name(key) != b"----"
continue
for data in failed:
values.append(Atom.render(_key2name(key), data))
data = Atom.render(b"ilst", b"".join(values))
# Find the old atoms.
try:
atoms = Atoms(filething.fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
self.__save(filething.fileobj, atoms, data, padding)
def __save(self, fileobj, atoms, data, padding):
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError:
self.__save_new(fileobj, atoms, data, padding)
else:
self.__save_existing(fileobj, atoms, path, data, padding)
def __save_new(self, fileobj, atoms, ilst_data, padding_func):
hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9)
meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data
try:
path = atoms.path(b"moov", b"udta")
except KeyError:
path = atoms.path(b"moov")
offset = path[-1]._dataoffset
# ignoring some atom overhead... but we don't have padding left anyway
# and padding_size is guaranteed to be less than zero
content_size = get_size(fileobj) - offset
padding_size = -len(meta_data)
assert padding_size < 0
info = PaddingInfo(padding_size, content_size)
new_padding = info._get_padding(padding_func)
new_padding = min(0xFFFFFFFF, new_padding)
free = Atom.render(b"free", b"\x00" * new_padding)
meta = Atom.render(b"meta", meta_data + free)
if path[-1].name != b"udta":
# moov.udta not found -- create one
data = Atom.render(b"udta", meta)
else:
data = meta
insert_bytes(fileobj, len(data), offset)
fileobj.seek(offset)
fileobj.write(data)
self.__update_parents(fileobj, path, len(data))
self.__update_offsets(fileobj, atoms, len(data), offset)
def __save_existing(self, fileobj, atoms, path, ilst_data, padding_func):
# Replace the old ilst atom.
ilst = path[-1]
offset = ilst.offset
length = ilst.length
# Use adjacent free atom if there is one
free = _find_padding(path)
if free is not None:
offset = min(offset, free.offset)
length += free.length
# Always add a padding atom to make things easier
padding_overhead = len(Atom.render(b"free", b""))
content_size = get_size(fileobj) - (offset + length)
padding_size = length - (len(ilst_data) + padding_overhead)
info = PaddingInfo(padding_size, content_size)
new_padding = info._get_padding(padding_func)
# Limit padding size so we can be sure the free atom overhead is as we
# calculated above (see Atom.render)
new_padding = min(0xFFFFFFFF, new_padding)
ilst_data += Atom.render(b"free", b"\x00" * new_padding)
resize_bytes(fileobj, length, len(ilst_data), offset)
delta = len(ilst_data) - length
fileobj.seek(offset)
fileobj.write(ilst_data)
self.__update_parents(fileobj, path[:-1], delta)
self.__update_offsets(fileobj, atoms, delta, offset)
def __update_parents(self, fileobj, path, delta):
"""Update all parent atoms with the new size."""
if delta == 0:
return
for atom in path:
fileobj.seek(atom.offset)
size = cdata.uint_be(fileobj.read(4))
if size == 1: # 64bit
# skip name (4B) and read size (8B)
size = cdata.ulonglong_be(fileobj.read(12)[4:])
fileobj.seek(atom.offset + 8)
fileobj.write(cdata.to_ulonglong_be(size + delta))
else: # 32bit
fileobj.seek(atom.offset)
fileobj.write(cdata.to_uint_be(size + delta))
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets))
def __update_tfhd(self, fileobj, atom, delta, offset):
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 9)
data = fileobj.read(atom.length - 9)
flags = cdata.uint_be(b"\x00" + data[:3])
if flags & 1:
o = cdata.ulonglong_be(data[7:15])
if o > offset:
o += delta
fileobj.seek(atom.offset + 16)
fileobj.write(cdata.to_ulonglong_be(o))
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
def __parse_data(self, atom, data):
pos = 0
while pos < atom.length - 8:
head = data[pos:pos + 12]
if len(head) != 12:
raise MP4MetadataError("truncated atom % r" % atom.name)
length, name = struct.unpack(">I4s", head[:8])
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
version = ord(head[8:9])
flags = struct.unpack(">I", b"\x00" + head[9:12])[0]
if name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (name, atom.name))
chunk = data[pos + 16:pos + length]
if len(chunk) != length - 16:
raise MP4MetadataError("truncated atom % r" % atom.name)
yield version, flags, chunk
pos += length
def __add(self, key, value, single=False):
assert isinstance(key, str)
if single:
self[key] = value
else:
self.setdefault(key, []).extend(value)
def __render_data(self, key, version, flags, value):
return Atom.render(_key2name(key), b"".join([
Atom.render(
b"data", struct.pack(">2I", version << 24 | flags, 0) + data)
for data in value]))
def __parse_freeform(self, atom, data):
length = cdata.uint_be(data[:4])
mean = data[12:length]
pos = length
length = cdata.uint_be(data[pos:pos + 4])
name = data[pos + 12:pos + length]
pos += length
value = []
while pos < atom.length - 8:
length, atom_name = struct.unpack(">I4s", data[pos:pos + 8])
if atom_name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (atom_name, atom.name))
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
version = ord(data[pos + 8:pos + 8 + 1])
flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0]
value.append(MP4FreeForm(data[pos + 16:pos + length],
dataformat=flags, version=version))
pos += length
key = _name2key(atom.name + b":" + mean + b":" + name)
self.__add(key, value)
def __render_freeform(self, key, value):
if isinstance(value, bytes):
value = [value]
dummy, mean, name = _key2name(key).split(b":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name
data = b""
for v in value:
flags = AtomDataType.UTF8
version = 0
if isinstance(v, MP4FreeForm):
flags = v.dataformat
version = v.version
data += struct.pack(
">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0)
data += v
return Atom.render(b"----", mean + name + data)
def __parse_pair(self, atom, data):
key = _name2key(atom.name)
values = [struct.unpack(">2H", d[2:6]) for
version, flags, d in self.__parse_data(atom, data)]
self.__add(key, values)
def __render_pair(self, key, value):
data = []
for v in value:
try:
track, total = v
except TypeError:
raise ValueError
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">4H", 0, track, total, 0))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __render_pair_no_trailing(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">3H", 0, track, total))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __parse_genre(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0
if len(data) != 2:
raise MP4MetadataValueError("invalid genre")
genre = cdata.short_be(data)
# Translate to a freeform genre.
try:
genre = GENRES[genre - 1]
except IndexError:
# this will make us write it back at least
raise MP4MetadataValueError("unknown genre")
values.append(genre)
key = _name2key(b"\xa9gen")
self.__add(key, values)
def __parse_integer(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
if version != 0:
raise MP4MetadataValueError("unsupported version")
if flags not in (AtomDataType.IMPLICIT, AtomDataType.INTEGER):
raise MP4MetadataValueError("unsupported type")
if len(data) == 1:
value = cdata.int8(data)
elif len(data) == 2:
value = cdata.int16_be(data)
elif len(data) == 3:
value = cdata.int32_be(data + b"\x00") >> 8
elif len(data) == 4:
value = cdata.int32_be(data)
elif len(data) == 8:
value = cdata.int64_be(data)
else:
raise MP4MetadataValueError(
"invalid value size %d" % len(data))
values.append(value)
key = _name2key(atom.name)
self.__add(key, values)
def __render_integer(self, key, value, min_bytes):
assert min_bytes in (1, 2, 4, 8)
data_list = []
try:
for v in value:
# We default to the int size of the usual values written
# by itunes for compatibility.
if cdata.int8_min <= v <= cdata.int8_max and min_bytes <= 1:
data = cdata.to_int8(v)
elif cdata.int16_min <= v <= cdata.int16_max and \
min_bytes <= 2:
data = cdata.to_int16_be(v)
elif cdata.int32_min <= v <= cdata.int32_max and \
min_bytes <= 4:
data = cdata.to_int32_be(v)
elif cdata.int64_min <= v <= cdata.int64_max and \
min_bytes <= 8:
data = cdata.to_int64_be(v)
else:
raise MP4MetadataValueError(
"value out of range: %r" % value)
data_list.append(data)
except (TypeError, ValueError, cdata.error) as e:
raise MP4MetadataValueError(e)
return self.__render_data(key, 0, AtomDataType.INTEGER, data_list)
def __parse_bool(self, atom, data):
for version, flags, data in self.__parse_data(atom, data):
if len(data) != 1:
raise MP4MetadataValueError("invalid bool")
value = bool(ord(data))
key = _name2key(atom.name)
self.__add(key, value, single=True)
def __render_bool(self, key, value):
return self.__render_data(
key, 0, AtomDataType.INTEGER, [bchr(bool(value))])
def __parse_cover(self, atom, data):
values = []
pos = 0
while pos < atom.length - 8:
length, name, imageformat = struct.unpack(">I4sI",
data[pos:pos + 12])
if name != b"data":
if name == b"name":
pos += length
continue
raise MP4MetadataError(
"unexpected atom %r inside 'covr'" % name)
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):
# Sometimes AtomDataType.IMPLICIT or simply wrong.
# In all cases it was jpeg, so default to it
imageformat = MP4Cover.FORMAT_JPEG
cover = MP4Cover(data[pos + 16:pos + length], imageformat)
values.append(cover)
pos += length
key = _name2key(atom.name)
self.__add(key, values)
def __render_cover(self, key, value):
atom_data = []
for cover in value:
try:
imageformat = cover.imageformat
except AttributeError:
imageformat = MP4Cover.FORMAT_JPEG
atom_data.append(Atom.render(
b"data", struct.pack(">2I", imageformat, 0) + cover))
return Atom.render(_key2name(key), b"".join(atom_data))
def __parse_text(self, atom, data, implicit=True):
# implicit = False, for parsing unknown atoms only take utf8 ones.
# For known ones we can assume the implicit are utf8 too.
values = []
for version, flags, atom_data in self.__parse_data(atom, data):
if implicit:
if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8):
raise MP4MetadataError(
"Unknown atom type %r for %r" % (flags, atom.name))
else:
if flags != AtomDataType.UTF8:
raise MP4MetadataError(
"%r is not text, ignore" % atom.name)
try:
text = atom_data.decode("utf-8")
except UnicodeDecodeError as e:
raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e))
values.append(text)
key = _name2key(atom.name)
self.__add(key, values)
def __render_text(self, key, value, flags=AtomDataType.UTF8):
if isinstance(value, str):
value = [value]
encoded = []
for v in value:
if not isinstance(v, str):
raise TypeError("%r not str" % v)
encoded.append(v.encode("utf-8"))
return self.__render_data(key, 0, flags, encoded)
def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename, padding=lambda x: 0)
__atoms = {
b"----": (__parse_freeform, __render_freeform),
b"trkn": (__parse_pair, __render_pair),
b"disk": (__parse_pair, __render_pair_no_trailing),
b"gnre": (__parse_genre, None),
b"plID": (__parse_integer, __render_integer, 8),
b"cnID": (__parse_integer, __render_integer, 4),
b"geID": (__parse_integer, __render_integer, 4),
b"atID": (__parse_integer, __render_integer, 4),
b"sfID": (__parse_integer, __render_integer, 4),
b"cmID": (__parse_integer, __render_integer, 4),
b"akID": (__parse_integer, __render_integer, 1),
b"tvsn": (__parse_integer, __render_integer, 4),
b"tves": (__parse_integer, __render_integer, 4),
b"tmpo": (__parse_integer, __render_integer, 2),
b"\xa9mvi": (__parse_integer, __render_integer, 2),
b"\xa9mvc": (__parse_integer, __render_integer, 2),
b"cpil": (__parse_bool, __render_bool),
b"pgap": (__parse_bool, __render_bool),
b"pcst": (__parse_bool, __render_bool),
b"shwm": (__parse_integer, __render_integer, 1),
b"stik": (__parse_integer, __render_integer, 1),
b"hdvd": (__parse_integer, __render_integer, 1),
b"rtng": (__parse_integer, __render_integer, 1),
b"covr": (__parse_cover, __render_cover),
b"purl": (__parse_text, __render_text),
b"egid": (__parse_text, __render_text),
}
# these allow implicit flags and parse as text
for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt",
b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp",
b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too",
b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco",
b"sosn", b"tvsh"]:
__atoms[name] = (__parse_text, __render_text)
def pprint(self):
def to_line(key, value):
assert isinstance(key, str)
if isinstance(value, str):
return u"%s=%s" % (key, value)
return u"%s=%r" % (key, value)
values = []
for key, value in sorted(self.items()):
if not isinstance(key, str):
key = key.decode("latin-1")
if key == "covr":
values.append(u"%s=%s" % (key, u", ".join(
[u"[%d bytes of data]" % len(data) for data in value])))
elif isinstance(value, list):
for v in value:
values.append(to_line(key, v))
else:
values.append(to_line(key, value))
return u"\n".join(values)
class Chapter(object):
"""Chapter()
Chapter information container
"""
def __init__(self, start, title):
self.start = start
self.title = title
class MP4Chapters(Sequence):
"""MP4Chapters()
MPEG-4 Chapter information.
Supports the 'moov.udta.chpl' box.
A sequence of Chapter objects with the following members:
start (`float`): position from the start of the file in seconds
title (`str`): title of the chapter
"""
def __init__(self, *args, **kwargs):
self._timescale = None
self._duration = None
self._chapters = []
super(MP4Chapters, self).__init__()
if args or kwargs:
self.load(*args, **kwargs)
def __len__(self):
return self._chapters.__len__()
def __getitem__(self, key):
return self._chapters.__getitem__(key)
def load(self, atoms, fileobj):
try:
mvhd = atoms.path(b"moov", b"mvhd")[-1]
except KeyError as key:
return MP4MetadataError(key)
self._parse_mvhd(mvhd, fileobj)
if not self._timescale:
raise MP4MetadataError("Unable to get timescale")
try:
chpl = atoms.path(b"moov", b"udta", b"chpl")[-1]
except KeyError as key:
return MP4MetadataError(key)
self._parse_chpl(chpl, fileobj)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.chpl" in atoms and b"moov.mvhd" in atoms
def _parse_mvhd(self, atom, fileobj):
assert atom.name == b"mvhd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid mvhd")
version = data[0]
pos = 4
if version == 0:
pos += 8 # created, modified
self._timescale = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
self._duration = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
elif version == 1:
pos += 16 # created, modified
self._timescale = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
self._duration = struct.unpack(">q", data[pos:pos + 8])[0]
pos += 8
def _parse_chpl(self, atom, fileobj):
assert atom.name == b"chpl"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid atom")
chapters = data[8]
pos = 9
for i in range(chapters):
start = struct.unpack(">Q", data[pos:pos + 8])[0] / 10000
pos += 8
title_len = data[pos]
pos += 1
try:
title = data[pos:pos + title_len].decode()
except UnicodeDecodeError as e:
raise MP4MetadataError("chapter %d title: %s" % (i, e))
pos += title_len
self._chapters.append(Chapter(start / self._timescale, title))
def pprint(self):
chapters = ["%s %s" % (timedelta(seconds=chapter.start), chapter.title)
for chapter in self._chapters]
return "chapters=%s" % '\n '.join(chapters)
class MP4Info(StreamInfo):
"""MP4Info()
MPEG-4 stream information.
Attributes:
bitrate (`int`): bitrate in bits per second, as an int
length (`float`): file length in seconds, as a float
channels (`int`): number of audio channels
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): bits per sample
codec (`mutagen.text`):
* if starting with ``"mp4a"`` uses an mp4a audio codec
(see the codec parameter in rfc6381 for details e.g.
``"mp4a.40.2"``)
* for everything else see a list of possible values at
http://www.mp4ra.org/codecs.html
e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc.
codec_description (`mutagen.text`):
Name of the codec used (ALAC, AAC LC, AC-3...). Values might
change in the future, use for display purposes only.
"""
bitrate = 0
length = 0.0
channels = 0
sample_rate = 0
bits_per_sample = 0
codec = u""
codec_description = u""
def __init__(self, *args, **kwargs):
if args or kwargs:
self.load(*args, **kwargs)
@convert_error(IOError, MP4StreamInfoError)
def load(self, atoms, fileobj):
try:
moov = atoms[b"moov"]
except KeyError:
raise MP4StreamInfoError("not a MP4 file")
for trak in moov.findall(b"trak"):
hdlr = trak[b"mdia", b"hdlr"]
ok, data = hdlr.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
if data[8:12] == b"soun":
break
else:
raise MP4NoTrackError("track has no audio data")
mdhd = trak[b"mdia", b"mdhd"]
ok, data = mdhd.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version == 0:
offset = 8
fmt = ">2I"
elif version == 1:
offset = 16
fmt = ">IQ"
else:
raise MP4StreamInfoError("Unknown mdhd version %d" % version)
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
try:
self.length = float(length) / unit
except ZeroDivisionError:
self.length = 0
try:
atom = trak[b"mdia", b"minf", b"stbl", b"stsd"]
except KeyError:
pass
else:
self._parse_stsd(atom, fileobj)
def _parse_stsd(self, atom, fileobj):
"""Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError.
"""
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
# look at the first entry if there is one
entry_fileobj = BytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description
def pprint(self):
return "MPEG-4 audio (%s), %.2f seconds, %d bps" % (
self.codec_description, self.length, self.bitrate)
class MP4(FileType):
"""MP4(filething)
An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
Arguments:
filething (filething)
Attributes:
info (`MP4Info`)
tags (`MP4Tags`)
"""
MP4Tags = MP4Tags
MP4Chapters = MP4Chapters
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
@loadfile()
def load(self, filething):
fileobj = filething.fileobj
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
self.info = MP4Info()
try:
self.info.load(atoms, fileobj)
except MP4NoTrackError:
pass
except error:
raise
except Exception as err:
reraise(MP4StreamInfoError, err, sys.exc_info()[2])
if not MP4Tags._can_load(atoms):
self.tags = None
else:
try:
self.tags = self.MP4Tags(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
if not MP4Chapters._can_load(atoms):
self.chapters = None
else:
try:
self.chapters = self.MP4Chapters(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
@property
def _padding(self):
if self.tags is None:
return 0
else:
return self.tags._padding
def save(self, *args, **kwargs):
"""save(filething=None, padding=None)"""
super(MP4, self).save(*args, **kwargs)
def pprint(self):
"""
Returns:
text: stream information, comment key=value pairs and chapters.
"""
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
pass
else:
stream += ((tags and "\n" + tags) or "")
try:
chapters = self.chapters.pprint()
except AttributeError:
pass
else:
stream += "\n" + chapters
return stream
def add_tags(self):
if self.tags is None:
self.tags = self.MP4Tags()
else:
raise error("an MP4 tag already exists")
@staticmethod
def score(filename, fileobj, header_data):
return (b"ftyp" in header_data) + (b"mp4" in header_data)
Open = MP4
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = MP4(filething)
filething.fileobj.seek(0)
t.delete(filething)
|
|
import binascii
import errno
import hashlib
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
import uuid
from inspect import signature
import numpy as np
import psutil
import ray
import ray.gcs_utils
import ray.ray_constants as ray_constants
pwd = None
if sys.platform != "win32":
import pwd
logger = logging.getLogger(__name__)
# Linux can bind child processes' lifetimes to that of their parents via prctl.
# prctl support is detected dynamically once, and assumed thereafter.
linux_prctl = None
# Windows can bind processes' lifetimes to that of kernel-level "job objects".
# We keep a global job object to tie its lifetime to that of our own process.
win32_job = None
win32_AssignProcessToJobObject = None
def get_user_temp_dir():
if sys.platform.startswith("darwin") or sys.platform.startswith("linux"):
# Ideally we wouldn't need this fallback, but keep it for now for
# for compatibility
tempdir = os.path.join(os.sep, "tmp")
else:
tempdir = tempfile.gettempdir()
return tempdir
def get_ray_temp_dir():
return os.path.join(get_user_temp_dir(), "ray")
def _random_string():
id_hash = hashlib.sha1()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest()
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
def format_error_message(exception_message, task_exception=False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message (str): A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
def push_error_to_driver(worker, error_type, message, job_id=None):
"""Push an error message to the driver to be printed in the background.
Args:
worker: The worker to use.
error_type (str): The type of the error.
message (str): The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.core_worker.push_error(job_id, error_type, message, time.time())
def push_error_to_driver_through_redis(redis_client,
error_type,
message,
job_id=None):
"""Push an error message to the driver to be printed in the background.
Normally the push_error_to_driver function should be used. However, in some
instances, the raylet client is not available, e.g., because the
error happens in Python before the driver or worker has connected to the
backend processes.
Args:
redis_client: The redis client to use.
error_type (str): The type of the error.
message (str): The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
# Do everything in Python and through the Python Redis client instead
# of through the raylet.
error_data = ray.gcs_utils.construct_error_message(job_id, error_type,
message, time.time())
pubsub_msg = ray.gcs_utils.PubSubMessage()
pubsub_msg.id = job_id.binary()
pubsub_msg.data = error_data
redis_client.publish("ERROR_INFO:" + job_id.hex(),
pubsub_msg.SerializeToString())
def random_string():
"""Generate a random string to use as an ID.
Note that users may seed numpy, which could cause this function to generate
duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't
interfere with the state of the user's random number generator, so we
extract the state of the random number generator and reset it after we are
done.
TODO(rkn): If we want to later guarantee that these are generated in a
deterministic manner, then we will need to make some changes here.
Returns:
A random byte string of length ray_constants.ID_SIZE.
"""
# Get the state of the numpy random number generator.
numpy_state = np.random.get_state()
# Try to use true randomness.
np.random.seed(None)
# Generate the random ID.
random_id = np.random.bytes(ray_constants.ID_SIZE)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
return random_id
def decode(byte_str, allow_none=False):
"""Make this unicode in Python 3, otherwise leave it as bytes.
Args:
byte_str: The byte string to decode.
allow_none: If true, then we will allow byte_str to be None in which
case we will return an empty string. TODO(rkn): Remove this flag.
This is only here to simplify upgrading to flatbuffers 1.10.0.
Returns:
A byte string in Python 2 and a unicode string in Python 3.
"""
if byte_str is None and allow_none:
return ""
if not isinstance(byte_str, bytes):
raise ValueError(f"The argument {byte_str} must be a bytes object.")
if sys.version_info >= (3, 0):
return byte_str.decode("ascii")
else:
return byte_str
def ensure_str(s, encoding="utf-8", errors="strict"):
"""Coerce *s* to `str`.
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, str):
return s
else:
assert isinstance(s, bytes)
return s.decode(encoding, errors)
def binary_to_object_ref(binary_object_ref):
return ray.ObjectRef(binary_object_ref)
def binary_to_task_id(binary_task_id):
return ray.TaskID(binary_task_id)
def binary_to_hex(identifier):
hex_identifier = binascii.hexlify(identifier)
if sys.version_info >= (3, 0):
hex_identifier = hex_identifier.decode()
return hex_identifier
def hex_to_binary(hex_identifier):
return binascii.unhexlify(hex_identifier)
# TODO(qwang): Remove these hepler functions
# once we separate `WorkerID` from `UniqueID`.
def compute_job_id_from_driver(driver_id):
assert isinstance(driver_id, ray.WorkerID)
return ray.JobID(driver_id.binary()[0:ray.JobID.size()])
def compute_driver_id_from_job(job_id):
assert isinstance(job_id, ray.JobID)
rest_length = ray_constants.ID_SIZE - job_id.size()
driver_id_str = job_id.binary() + (rest_length * b"\xff")
return ray.WorkerID(driver_id_str)
def get_cuda_visible_devices():
"""Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
Returns:
devices (List[str]): If CUDA_VISIBLE_DEVICES is set, returns a
list of strings representing the IDs of the visible GPUs.
If it is not set or is set to NoDevFiles, returns empty list.
"""
gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if gpu_ids_str is None:
return None
if gpu_ids_str == "":
return []
if gpu_ids_str == "NoDevFiles":
return []
# GPU identifiers are given as strings representing integers or UUIDs.
return list(gpu_ids_str.split(","))
last_set_gpu_ids = None
def set_cuda_visible_devices(gpu_ids):
"""Set the CUDA_VISIBLE_DEVICES environment variable.
Args:
gpu_ids (List[str]): List of strings representing GPU IDs.
"""
global last_set_gpu_ids
if last_set_gpu_ids == gpu_ids:
return # optimization: already set
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids])
last_set_gpu_ids = gpu_ids
def resources_from_resource_arguments(
default_num_cpus, default_num_gpus, default_memory,
default_object_store_memory, default_resources,
default_accelerator_type, runtime_num_cpus, runtime_num_gpus,
runtime_memory, runtime_object_store_memory, runtime_resources,
runtime_accelerator_type):
"""Determine a task's resource requirements.
Args:
default_num_cpus: The default number of CPUs required by this function
or actor method.
default_num_gpus: The default number of GPUs required by this function
or actor method.
default_memory: The default heap memory required by this function
or actor method.
default_object_store_memory: The default object store memory required
by this function or actor method.
default_resources: The default custom resources required by this
function or actor method.
runtime_num_cpus: The number of CPUs requested when the task was
invoked.
runtime_num_gpus: The number of GPUs requested when the task was
invoked.
runtime_memory: The heap memory requested when the task was invoked.
runtime_object_store_memory: The object store memory requested when
the task was invoked.
runtime_resources: The custom resources requested when the task was
invoked.
Returns:
A dictionary of the resource requirements for the task.
"""
if runtime_resources is not None:
resources = runtime_resources.copy()
elif default_resources is not None:
resources = default_resources.copy()
else:
resources = {}
if "CPU" in resources or "GPU" in resources:
raise ValueError("The resources dictionary must not "
"contain the key 'CPU' or 'GPU'")
elif "memory" in resources or "object_store_memory" in resources:
raise ValueError("The resources dictionary must not "
"contain the key 'memory' or 'object_store_memory'")
assert default_num_cpus is not None
resources["CPU"] = (default_num_cpus
if runtime_num_cpus is None else runtime_num_cpus)
if runtime_num_gpus is not None:
resources["GPU"] = runtime_num_gpus
elif default_num_gpus is not None:
resources["GPU"] = default_num_gpus
# Order of arguments matter for short circuiting.
memory = runtime_memory or default_memory
object_store_memory = (runtime_object_store_memory
or default_object_store_memory)
if memory is not None:
resources["memory"] = ray_constants.to_memory_units(
memory, round_up=True)
if object_store_memory is not None:
resources["object_store_memory"] = ray_constants.to_memory_units(
object_store_memory, round_up=True)
if runtime_accelerator_type is not None:
resources[f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}"
f"{runtime_accelerator_type}"] = 0.001
elif default_accelerator_type is not None:
resources[f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}"
f"{default_accelerator_type}"] = 0.001
return resources
class Unbuffered(object):
"""There's no "built-in" solution to programatically disabling buffering of
text files. Ray expects stdout/err to be text files, so creating an
unbuffered binary file is unacceptable.
See
https://mail.python.org/pipermail/tutor/2003-November/026645.html.
https://docs.python.org/3/library/functions.html#open
"""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def open_log(path, unbuffered=False, **kwargs):
"""
Opens the log file at `path`, with the provided kwargs being given to
`open`.
"""
# Disable buffering, see test_advanced_3.py::test_logging_to_driver
kwargs.setdefault("buffering", 1)
kwargs.setdefault("mode", "a")
kwargs.setdefault("encoding", "utf-8")
stream = open(path, **kwargs)
if unbuffered:
return Unbuffered(stream)
else:
return stream
def get_system_memory():
"""Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
"""
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
memory_limit_filename = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().total
if docker_limit is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_limit, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def _get_docker_cpus(
cpu_quota_file_name="/sys/fs/cgroup/cpu/cpu.cfs_quota_us",
cpu_share_file_name="/sys/fs/cgroup/cpu/cpu.cfs_period_us",
cpuset_file_name="/sys/fs/cgroup/cpuset/cpuset.cpus"):
# TODO (Alex): Don't implement this logic oursleves.
# Docker has 2 underyling ways of implementing CPU limits:
# https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler
# 1. --cpuset-cpus 2. --cpus or --cpu-quota/--cpu-period (--cpu-shares is a
# soft limit so we don't worry about it). For Ray's purposes, if we use
# docker, the number of vCPUs on a machine is whichever is set (ties broken
# by smaller value).
cpu_quota = None
# See: https://bugs.openjdk.java.net/browse/JDK-8146115
if os.path.exists(cpu_quota_file_name) and os.path.exists(
cpu_quota_file_name):
try:
with open(cpu_quota_file_name, "r") as quota_file, open(
cpu_share_file_name, "r") as period_file:
cpu_quota = float(quota_file.read()) / float(
period_file.read())
except Exception as e:
logger.exception("Unexpected error calculating docker cpu quota.",
e)
if cpu_quota < 0:
cpu_quota = None
cpuset_num = None
if os.path.exists(cpuset_file_name):
try:
with open(cpuset_file_name) as cpuset_file:
ranges_as_string = cpuset_file.read()
ranges = ranges_as_string.split(",")
cpu_ids = []
for num_or_range in ranges:
if "-" in num_or_range:
start, end = num_or_range.split("-")
cpu_ids.extend(list(range(int(start), int(end) + 1)))
else:
cpu_ids.append(int(num_or_range))
cpuset_num = len(cpu_ids)
except Exception as e:
logger.exception("Unexpected error calculating docker cpuset ids.",
e)
if cpu_quota and cpuset_num:
return min(cpu_quota, cpuset_num)
else:
return cpu_quota or cpuset_num
def get_num_cpus():
cpu_count = multiprocessing.cpu_count()
if os.environ.get("RAY_USE_MULTIPROCESSING_CPU_COUNT"):
logger.info(
"Detected RAY_USE_MULTIPROCESSING_CPU_COUNT=1: Using "
"multiprocessing.cpu_count() to detect the number of CPUs. "
"This may be inconsistent when used inside docker. "
"To correctly detect CPUs, unset the env var: "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT`.")
return cpu_count
try:
# Not easy to get cpu count in docker, see:
# https://bugs.python.org/issue36054
docker_count = _get_docker_cpus()
if docker_count is not None and docker_count != cpu_count:
if "RAY_DISABLE_DOCKER_CPU_WARNING" not in os.environ:
logger.warning(
"Detecting docker specified CPUs. In "
"previous versions of Ray, CPU detection in containers "
"was incorrect. Please ensure that Ray has enough CPUs "
"allocated. As a temporary workaround to revert to the "
"prior behavior, set "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT=1` as an env var "
"before starting Ray. Set the env var: "
"`RAY_DISABLE_DOCKER_CPU_WARNING=1` to mute this warning.")
# TODO (Alex): We should probably add support for fractional cpus.
if int(docker_count) != float(docker_count):
logger.warning(
f"Ray currently does not support initializing Ray"
f"with fractional cpus. Your num_cpus will be "
f"truncated from {docker_count} to "
f"{int(docker_count)}.")
docker_count = int(docker_count)
cpu_count = docker_count
except Exception:
# `nproc` and cgroup are linux-only. If docker only works on linux
# (will run in a linux VM on other platforms), so this is fine.
pass
return cpu_count
def get_used_memory():
"""Return the currently used system memory in bytes
Returns:
The total amount of used memory
"""
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
memory_usage_filename = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
if os.path.exists(memory_usage_filename):
with open(memory_usage_filename, "r") as f:
docker_usage = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().used
if docker_usage is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_usage, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def estimate_available_memory():
"""Return the currently available amount of system memory in bytes.
Returns:
The total amount of available memory in bytes. Based on the used
and total memory.
"""
return get_system_memory() - get_used_memory()
def get_shared_memory_bytes():
"""Get the size of the shared memory file system.
Returns:
The size of the shared memory file system in bytes.
"""
# Make sure this is only called on Linux.
assert sys.platform == "linux" or sys.platform == "linux2"
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
finally:
os.close(shm_fd)
return shm_avail
def check_oversized_pickle(pickled, name, obj_type, worker):
"""Send a warning message if the pickled object is too large.
Args:
pickled: the pickled object.
name: name of the pickled object.
obj_type: type of the pickled object, can be 'function',
'remote function', 'actor', or 'object'.
worker: the worker used to send warning message.
"""
length = len(pickled)
if length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE:
return
warning_message = (
"Warning: The {} {} has size {} when pickled. "
"It will be stored in Redis, which could cause memory issues. "
"This may mean that its definition uses a large array or other object."
).format(obj_type, name, length)
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
warning_message,
job_id=worker.current_job_id)
def is_main_thread():
return threading.current_thread().getName() == "MainThread"
def detect_fate_sharing_support_win32():
global win32_job, win32_AssignProcessToJobObject
if win32_job is None and sys.platform == "win32":
import ctypes
try:
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPVOID, LPCWSTR
kernel32 = ctypes.WinDLL("kernel32")
kernel32.CreateJobObjectW.argtypes = (LPVOID, LPCWSTR)
kernel32.CreateJobObjectW.restype = HANDLE
sijo_argtypes = (HANDLE, ctypes.c_int, LPVOID, DWORD)
kernel32.SetInformationJobObject.argtypes = sijo_argtypes
kernel32.SetInformationJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.IsDebuggerPresent.argtypes = ()
kernel32.IsDebuggerPresent.restype = BOOL
except (AttributeError, TypeError, ImportError):
kernel32 = None
job = kernel32.CreateJobObjectW(None, None) if kernel32 else None
job = subprocess.Handle(job) if job else job
if job:
from ctypes.wintypes import DWORD, LARGE_INTEGER, ULARGE_INTEGER
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("PerProcessUserTimeLimit", LARGE_INTEGER),
("PerJobUserTimeLimit", LARGE_INTEGER),
("LimitFlags", DWORD),
("MinimumWorkingSetSize", ctypes.c_size_t),
("MaximumWorkingSetSize", ctypes.c_size_t),
("ActiveProcessLimit", DWORD),
("Affinity", ctypes.c_size_t),
("PriorityClass", DWORD),
("SchedulingClass", DWORD),
]
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
("ReadOperationCount", ULARGE_INTEGER),
("WriteOperationCount", ULARGE_INTEGER),
("OtherOperationCount", ULARGE_INTEGER),
("ReadTransferCount", ULARGE_INTEGER),
("WriteTransferCount", ULARGE_INTEGER),
("OtherTransferCount", ULARGE_INTEGER),
]
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("BasicLimitInformation",
JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", ctypes.c_size_t),
("JobMemoryLimit", ctypes.c_size_t),
("PeakProcessMemoryUsed", ctypes.c_size_t),
("PeakJobMemoryUsed", ctypes.c_size_t),
]
debug = kernel32.IsDebuggerPresent()
# Defined in <WinNT.h>; also available here:
# https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-setinformationjobobject
JobObjectExtendedLimitInformation = 9
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
buf = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
buf.BasicLimitInformation.LimitFlags = (
(0 if debug else JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)
| JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION
| JOB_OBJECT_LIMIT_BREAKAWAY_OK)
infoclass = JobObjectExtendedLimitInformation
if not kernel32.SetInformationJobObject(
job, infoclass, ctypes.byref(buf), ctypes.sizeof(buf)):
job = None
win32_AssignProcessToJobObject = (kernel32.AssignProcessToJobObject
if kernel32 is not None else False)
win32_job = job if job else False
return bool(win32_job)
def detect_fate_sharing_support_linux():
global linux_prctl
if linux_prctl is None and sys.platform.startswith("linux"):
try:
from ctypes import c_int, c_ulong, CDLL
prctl = CDLL(None).prctl
prctl.restype = c_int
prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]
except (AttributeError, TypeError):
prctl = None
linux_prctl = prctl if prctl else False
return bool(linux_prctl)
def detect_fate_sharing_support():
result = None
if sys.platform == "win32":
result = detect_fate_sharing_support_win32()
elif sys.platform.startswith("linux"):
result = detect_fate_sharing_support_linux()
return result
def set_kill_on_parent_death_linux():
"""Ensures this process dies if its parent dies (fate-sharing).
Linux-only. Must be called in preexec_fn (i.e. by the child).
"""
if detect_fate_sharing_support_linux():
import signal
PR_SET_PDEATHSIG = 1
if linux_prctl(PR_SET_PDEATHSIG, signal.SIGKILL, 0, 0, 0) != 0:
import ctypes
raise OSError(ctypes.get_errno(), "prctl(PR_SET_PDEATHSIG) failed")
else:
assert False, "PR_SET_PDEATHSIG used despite being unavailable"
def set_kill_child_on_death_win32(child_proc):
"""Ensures the child process dies if this process dies (fate-sharing).
Windows-only. Must be called by the parent, after spawning the child.
Args:
child_proc: The subprocess.Popen or subprocess.Handle object.
"""
if isinstance(child_proc, subprocess.Popen):
child_proc = child_proc._handle
assert isinstance(child_proc, subprocess.Handle)
if detect_fate_sharing_support_win32():
if not win32_AssignProcessToJobObject(win32_job, int(child_proc)):
import ctypes
raise OSError(ctypes.get_last_error(),
"AssignProcessToJobObject() failed")
else:
assert False, "AssignProcessToJobObject used despite being unavailable"
def set_sigterm_handler(sigterm_handler):
"""Registers a handler for SIGTERM in a platform-compatible manner."""
if sys.platform == "win32":
# Note that these signal handlers only work for console applications.
# TODO(mehrdadn): implement graceful process termination mechanism
# SIGINT is Ctrl+C, SIGBREAK is Ctrl+Break.
signal.signal(signal.SIGBREAK, sigterm_handler)
else:
signal.signal(signal.SIGTERM, sigterm_handler)
def try_make_directory_shared(directory_path):
try:
os.chmod(directory_path, 0o0777)
except OSError as e:
# Silently suppress the PermissionError that is thrown by the chmod.
# This is done because the user attempting to change the permissions
# on a directory may not own it. The chmod is attempted whether the
# directory is new or not to avoid race conditions.
# ray-project/ray/#3591
if e.errno in [errno.EACCES, errno.EPERM]:
pass
else:
raise
def try_to_create_directory(directory_path):
"""Attempt to create a directory that is globally readable/writable.
Args:
directory_path: The path of the directory to create.
"""
directory_path = os.path.expanduser(directory_path)
os.makedirs(directory_path, exist_ok=True)
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
try_make_directory_shared(directory_path)
def try_to_symlink(symlink_path, target_path):
"""Attempt to create a symlink.
If the symlink path exists and isn't a symlink, the symlink will not be
created. If a symlink exists in the path, it will be attempted to be
removed and replaced.
Args:
symlink_path: The path at which to create the symlink.
target_path: The path the symlink should point to.
"""
symlink_path = os.path.expanduser(symlink_path)
target_path = os.path.expanduser(target_path)
if os.path.exists(symlink_path):
if os.path.islink(symlink_path):
# Try to remove existing symlink.
try:
os.remove(symlink_path)
except OSError:
return
else:
# There's an existing non-symlink file, don't overwrite it.
return
try:
os.symlink(target_path, symlink_path)
except OSError:
return
def get_user():
if pwd is None:
return ""
try:
return pwd.getpwuid(os.getuid()).pw_name
except Exception:
return ""
def get_function_args(callable):
all_parameters = frozenset(signature(callable).parameters)
return list(all_parameters)
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import expression as expr
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as n_context
from neutron.db import agents_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import portbindings_db as p_binding
from neutron.extensions import providernet as pr_net
from neutron.i18n import _LE, _LI
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.cisco.common import cisco_constants as c_const
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.l3.rpc import l3_router_rpc_joint_agent_api
LOG = logging.getLogger(__name__)
ROUTER_APPLIANCE_OPTS = [
cfg.IntOpt('backlog_processing_interval',
default=10,
help=_('Time in seconds between renewed scheduling attempts of '
'non-scheduled routers.')),
]
cfg.CONF.register_opts(ROUTER_APPLIANCE_OPTS, "general")
class RouterCreateInternalError(n_exc.NeutronException):
message = _("Router could not be created due to internal error.")
class RouterInternalError(n_exc.NeutronException):
message = _("Internal error during router processing.")
class RouterBindingInfoError(n_exc.NeutronException):
message = _("Could not get binding information for router %(router_id)s.")
class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
"""Mixin class implementing Neutron's routing service using appliances."""
# Dictionary of routers for which new scheduling attempts should
# be made and the refresh setting and heartbeat for that.
_backlogged_routers = {}
_refresh_router_backlog = True
_heartbeat = None
@property
def l3_cfg_rpc_notifier(self):
if not hasattr(self, '_l3_cfg_rpc_notifier'):
self._l3_cfg_rpc_notifier = (l3_router_rpc_joint_agent_api.
L3RouterJointAgentNotifyAPI(self))
return self._l3_cfg_rpc_notifier
@l3_cfg_rpc_notifier.setter
def l3_cfg_rpc_notifier(self, value):
self._l3_cfg_rpc_notifier = value
def create_router(self, context, router):
with context.session.begin(subtransactions=True):
if self.mgmt_nw_id() is None:
raise RouterCreateInternalError()
router_created = (super(L3RouterApplianceDBMixin, self).
create_router(context, router))
r_hd_b_db = l3_models.RouterHostingDeviceBinding(
router_id=router_created['id'],
auto_schedule=True,
hosting_device_id=None)
context.session.add(r_hd_b_db)
# backlog so this new router gets scheduled asynchronously
self.backlog_router(r_hd_b_db['router'])
return router_created
def update_router(self, context, id, router):
r = router['router']
# Check if external gateway has changed so we may have to
# update trunking
o_r_db = self._get_router(context, id)
old_ext_gw = (o_r_db.gw_port or {}).get('network_id')
new_ext_gw = (r.get('external_gateway_info', {}) or {}).get(
'network_id')
with context.session.begin(subtransactions=True):
e_context = context.elevated()
if old_ext_gw is not None and old_ext_gw != new_ext_gw:
o_r = self._make_router_dict(o_r_db, process_extensions=False)
# no need to schedule now since we're only doing this to
# tear-down connectivity and there won't be any if not
# already scheduled.
self._add_type_and_hosting_device_info(e_context, o_r,
schedule=False)
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context,
o_r_db.gw_port)
router_updated = (
super(L3RouterApplianceDBMixin, self).update_router(
context, id, router))
routers = [copy.deepcopy(router_updated)]
self._add_type_and_hosting_device_info(e_context, routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers)
return router_updated
def delete_router(self, context, id):
router_db = self._get_router(context, id)
router = self._make_router_dict(router_db)
with context.session.begin(subtransactions=True):
e_context = context.elevated()
r_hd_binding = self._get_router_binding_info(e_context, id)
self._add_type_and_hosting_device_info(
e_context, router, binding_info=r_hd_binding, schedule=False)
if router_db.gw_port is not None:
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context,
router_db.gw_port)
# conditionally remove router from backlog just to be sure
self.remove_router_from_backlog(id)
if router['hosting_device'] is not None:
self.unschedule_router_from_hosting_device(context,
r_hd_binding)
super(L3RouterApplianceDBMixin, self).delete_router(context, id)
self.l3_cfg_rpc_notifier.router_deleted(context, router)
def notify_router_interface_action(
self, context, router_interface_info, routers, action):
l3_method = '%s_router_interface' % action
self.l3_cfg_rpc_notifier.routers_updated(context, routers, l3_method)
mapping = {'add': 'create', 'remove': 'delete'}
notifier = n_rpc.get_notifier('network')
router_event = 'router.interface.%s' % mapping[action]
notifier.info(context, router_event,
{'router_interface': router_interface_info})
def add_router_interface(self, context, router_id, interface_info):
with context.session.begin(subtransactions=True):
info = (super(L3RouterApplianceDBMixin, self).
add_router_interface(context, router_id, interface_info))
routers = [self.get_router(context, router_id)]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.notify_router_interface_action(context, info, routers, 'add')
return info
def remove_router_interface(self, context, router_id, interface_info):
if 'port_id' in (interface_info or {}):
port_db = self._core_plugin._get_port(
context, interface_info['port_id'])
elif 'subnet_id' in (interface_info or {}):
subnet_db = self._core_plugin._get_subnet(
context, interface_info['subnet_id'])
port_db = self._get_router_port_db_on_subnet(
context, router_id, subnet_db)
else:
msg = _("Either subnet_id or port_id must be specified")
raise n_exc.BadRequest(resource='router', msg=msg)
routers = [self.get_router(context, router_id)]
with context.session.begin(subtransactions=True):
e_context = context.elevated()
self._add_type_and_hosting_device_info(e_context, routers[0])
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context, port_db)
info = (super(L3RouterApplianceDBMixin, self).
remove_router_interface(context, router_id,
interface_info))
self.notify_router_interface_action(context, info, routers, 'remove')
return info
def create_floatingip(
self, context, floatingip,
initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
with context.session.begin(subtransactions=True):
info = super(L3RouterApplianceDBMixin, self).create_floatingip(
context, floatingip)
if info['router_id']:
routers = [self.get_router(context, info['router_id'])]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'create_floatingip')
return info
def update_floatingip(self, context, id, floatingip):
orig_fl_ip = super(L3RouterApplianceDBMixin, self).get_floatingip(
context, id)
before_router_id = orig_fl_ip['router_id']
with context.session.begin(subtransactions=True):
info = super(L3RouterApplianceDBMixin, self).update_floatingip(
context, id, floatingip)
router_ids = []
if before_router_id:
router_ids.append(before_router_id)
router_id = info['router_id']
if router_id and router_id != before_router_id:
router_ids.append(router_id)
routers = []
for router_id in router_ids:
router = self.get_router(context, router_id)
self._add_type_and_hosting_device_info(context.elevated(),
router)
routers.append(router)
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'update_floatingip')
return info
def delete_floatingip(self, context, id):
floatingip_db = self._get_floatingip(context, id)
router_id = floatingip_db['router_id']
with context.session.begin(subtransactions=True):
super(L3RouterApplianceDBMixin, self).delete_floatingip(
context, id)
if router_id:
routers = [self.get_router(context, router_id)]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'delete_floatingip')
def disassociate_floatingips(self, context, port_id, do_notify=True):
with context.session.begin(subtransactions=True):
router_ids = super(L3RouterApplianceDBMixin,
self).disassociate_floatingips(context, port_id)
if router_ids and do_notify:
routers = []
for router_id in router_ids:
router = self.get_router(context, router_id)
self._add_type_and_hosting_device_info(context.elevated(),
router)
routers.append(router)
self.l3_cfg_rpc_notifier.routers_updated(
context, routers, 'disassociate_floatingips')
# since caller assumes that we handled notifications on its
# behalf, return nothing
return
return router_ids
@lockutils.synchronized('routerbacklog', 'neutron-')
def _handle_non_responding_hosting_devices(self, context, hosting_devices,
affected_resources):
"""Handle hosting devices determined to be "dead".
This function is called by the hosting device manager.
Service plugins are supposed to extend the 'affected_resources'
dictionary. Hence, we add the id of Neutron routers that are
hosted in <hosting_devices>.
param: hosting_devices - list of dead hosting devices
param: affected_resources - dict with list of affected logical
resources per hosting device:
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...],
'fw': [id1, ...],
...},
...}
"""
LOG.debug('Processing affected routers in dead hosting devices')
with context.session.begin(subtransactions=True):
for hd in hosting_devices:
hd_bindings = self._get_hosting_device_bindings(context,
hd['id'])
router_ids = []
for binding in hd_bindings:
router_ids.append(binding['router_id'])
if binding['auto_schedule']:
self.backlog_router(binding['router'])
try:
affected_resources[hd['id']].update(
{'routers': router_ids})
except KeyError:
affected_resources[hd['id']] = {'routers': router_ids}
def get_sync_data_ext(self, context, router_ids=None, active=None):
"""Query routers and their related floating_ips, interfaces.
Adds information about hosting device as well as trunking.
"""
with context.session.begin(subtransactions=True):
sync_data = (super(L3RouterApplianceDBMixin, self).
get_sync_data(context, router_ids, active))
for router in sync_data:
self._add_type_and_hosting_device_info(context, router)
plg_drv = self.get_hosting_device_plugging_driver()
if plg_drv and router['hosting_device']:
self._add_hosting_port_info(context, router, plg_drv)
return sync_data
def schedule_router_on_hosting_device(self, context, r_hd_binding):
LOG.info(_LI('Attempting to schedule router %s.'),
r_hd_binding['router']['id'])
result = self._create_csr1kv_vm_hosting_device(context.elevated())
if result is None:
# CSR1kv hosting device creation was unsuccessful so backlog
# it for another scheduling attempt later.
self.backlog_router(r_hd_binding['router'])
return False
with context.session.begin(subtransactions=True):
router = r_hd_binding['router']
r_hd_binding.hosting_device = result
self.remove_router_from_backlog(router['id'])
LOG.info(_LI('Successfully scheduled router %(r_id)s to '
'hosting device %(d_id)s'),
{'r_id': r_hd_binding['router']['id'],
'd_id': result['id']})
return True
def unschedule_router_from_hosting_device(self, context, r_hd_binding):
LOG.info(_LI('Un-schedule router %s.'),
r_hd_binding['router']['id'])
hosting_device = r_hd_binding['hosting_device']
if r_hd_binding['hosting_device'] is None:
return False
self._delete_service_vm_hosting_device(context.elevated(),
hosting_device)
@lockutils.synchronized('routers', 'neutron-')
def backlog_router(self, router):
if ((router or {}).get('id') is None or
router['id'] in self._backlogged_routers):
return
LOG.info(_LI('Backlogging router %s for renewed scheduling attempt '
'later'), router['id'])
self._backlogged_routers[router['id']] = router
@lockutils.synchronized('routers', 'neutron-')
def remove_router_from_backlog(self, id):
self._backlogged_routers.pop(id, None)
LOG.info(_LI('Router %s removed from backlog'), id)
@lockutils.synchronized('routerbacklog', 'neutron-')
def _process_backlogged_routers(self):
if self._refresh_router_backlog:
self._sync_router_backlog()
if not self._backlogged_routers:
return
context = n_context.get_admin_context()
scheduled_routers = []
LOG.info(_LI('Processing router (scheduling) backlog'))
# try to reschedule
for r_id, router in self._backlogged_routers.items():
self._add_type_and_hosting_device_info(context, router)
if router.get('hosting_device'):
# scheduling attempt succeeded
scheduled_routers.append(router)
self._backlogged_routers.pop(r_id, None)
# notify cfg agents so the scheduled routers are instantiated
if scheduled_routers:
self.l3_cfg_rpc_notifier.routers_updated(context,
scheduled_routers)
def _setup_backlog_handling(self):
self._heartbeat = loopingcall.FixedIntervalLoopingCall(
self._process_backlogged_routers)
self._heartbeat.start(
interval=cfg.CONF.general.backlog_processing_interval)
def _sync_router_backlog(self):
LOG.info(_LI('Synchronizing router (scheduling) backlog'))
context = n_context.get_admin_context()
query = context.session.query(l3_models.RouterHostingDeviceBinding)
query = query.options(joinedload('router'))
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
expr.null())
for binding in query:
router = self._make_router_dict(binding.router,
process_extensions=False)
self._backlogged_routers[binding.router_id] = router
self._refresh_router_backlog = False
def _get_router_binding_info(self, context, id, load_hd_info=True):
query = context.session.query(l3_models.RouterHostingDeviceBinding)
if load_hd_info:
query = query.options(joinedload('hosting_device'))
query = query.filter(l3_models.RouterHostingDeviceBinding.router_id ==
id)
try:
return query.one()
except exc.NoResultFound:
# This should not happen
LOG.error(_LE('DB inconsistency: No type and hosting info '
'associated with router %s'), id)
raise RouterBindingInfoError(router_id=id)
except exc.MultipleResultsFound:
# This should not happen either
LOG.error(_LE('DB inconsistency: Multiple type and hosting info '
'associated with router %s'), id)
raise RouterBindingInfoError(router_id=id)
def _get_hosting_device_bindings(self, context, id, load_routers=False,
load_hosting_device=False):
query = context.session.query(l3_models.RouterHostingDeviceBinding)
if load_routers:
query = query.options(joinedload('router'))
if load_hosting_device:
query = query.options(joinedload('hosting_device'))
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id == id)
return query.all()
def _add_type_and_hosting_device_info(self, context, router,
binding_info=None, schedule=True):
"""Adds type and hosting device information to a router."""
try:
if binding_info is None:
binding_info = self._get_router_binding_info(context,
router['id'])
except RouterBindingInfoError:
LOG.error(_LE('DB inconsistency: No hosting info associated with '
'router %s'), router['id'])
router['hosting_device'] = None
return
router['router_type'] = {
'id': None,
'name': 'CSR1kv_router',
'cfg_agent_driver': (cfg.CONF.hosting_devices
.csr1kv_cfgagent_router_driver)}
if binding_info.hosting_device is None and schedule:
# This router has not been scheduled to a hosting device
# so we try to do it now.
self.schedule_router_on_hosting_device(context, binding_info)
context.session.expire(binding_info)
if binding_info.hosting_device is None:
router['hosting_device'] = None
else:
router['hosting_device'] = self.get_device_info_for_agent(
binding_info.hosting_device)
def _add_hosting_port_info(self, context, router, plugging_driver):
"""Adds hosting port information to router ports.
We only populate hosting port info, i.e., reach here, if the
router has been scheduled to a hosting device. Hence this
a good place to allocate hosting ports to the router ports.
"""
# cache of hosting port information: {mac_addr: {'name': port_name}}
hosting_pdata = {}
if router['external_gateway_info'] is not None:
h_info, did_allocation = self._populate_hosting_info_for_port(
context, router['id'], router['gw_port'],
router['hosting_device'], hosting_pdata, plugging_driver)
for itfc in router.get(l3_constants.INTERFACE_KEY, []):
h_info, did_allocation = self._populate_hosting_info_for_port(
context, router['id'], itfc, router['hosting_device'],
hosting_pdata, plugging_driver)
def _populate_hosting_info_for_port(self, context, router_id, port,
hosting_device, hosting_pdata,
plugging_driver):
port_db = self._core_plugin._get_port(context, port['id'])
h_info = port_db.hosting_info
new_allocation = False
if h_info is None:
# The port does not yet have a hosting port so allocate one now
h_info = self._allocate_hosting_port(
context, router_id, port_db, hosting_device['id'],
plugging_driver)
if h_info is None:
# This should not happen but just in case ...
port['hosting_info'] = None
return None, new_allocation
else:
new_allocation = True
if hosting_pdata.get('mac') is None:
p_data = self._core_plugin.get_port(
context, h_info.hosting_port_id, ['mac_address', 'name'])
hosting_pdata['mac'] = p_data['mac_address']
hosting_pdata['name'] = p_data['name']
# Including MAC address of hosting port so L3CfgAgent can easily
# determine which VM VIF to configure VLAN sub-interface on.
port['hosting_info'] = {'hosting_port_id': h_info.hosting_port_id,
'hosting_mac': hosting_pdata.get('mac'),
'hosting_port_name': hosting_pdata.get('name')}
plugging_driver.extend_hosting_port_info(
context, port_db, port['hosting_info'])
return h_info, new_allocation
def _allocate_hosting_port(self, context, router_id, port_db,
hosting_device_id, plugging_driver):
net_data = self._core_plugin.get_network(
context, port_db['network_id'], [pr_net.NETWORK_TYPE])
network_type = net_data.get(pr_net.NETWORK_TYPE)
alloc = plugging_driver.allocate_hosting_port(
context, router_id, port_db, network_type, hosting_device_id)
if alloc is None:
LOG.error(_LE('Failed to allocate hosting port for port %s'),
port_db['id'])
return
with context.session.begin(subtransactions=True):
h_info = l3_models.HostedHostingPortBinding(
logical_resource_id=router_id,
logical_port_id=port_db['id'],
network_type=network_type,
hosting_port_id=alloc['allocated_port_id'],
segmentation_id=alloc['allocated_vlan'])
context.session.add(h_info)
context.session.expire(port_db)
# allocation succeeded so establish connectivity for logical port
context.session.expire(h_info)
plugging_driver.setup_logical_port_connectivity(context, port_db)
return h_info
def _get_router_port_db_on_subnet(self, context, router_id, subnet):
try:
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet['id']:
return p
except exc.NoResultFound:
return
def list_active_sync_routers_on_hosting_devices(self, context, host,
router_ids=None,
hosting_device_ids=None):
agent = self._get_agent_by_type_and_host(
context, c_const.AGENT_TYPE_CFG, host)
if not agent.admin_state_up:
return []
query = context.session.query(
l3_models.RouterHostingDeviceBinding.router_id)
query = query.join(l3_models.HostingDevice)
query = query.filter(l3_models.HostingDevice.cfg_agent_id == agent.id)
if router_ids:
if len(router_ids) == 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.router_id ==
router_ids[0])
else:
query = query.filter(
l3_models.RouterHostingDeviceBinding.router_id.in_(
router_ids))
if hosting_device_ids:
if len(hosting_device_ids) == 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
hosting_device_ids[0])
elif len(hosting_device_ids) > 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id.in_(
hosting_device_ids))
router_ids = [item[0] for item in query]
if router_ids:
return self.get_sync_data_ext(context, router_ids=router_ids,
active=True)
else:
return []
def get_active_routers_for_host(self, context, host):
query = context.session.query(
l3_models.RouterHostingDeviceBinding.router_id)
query = query.join(
models_v2.Port,
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
models_v2.Port.device_id)
query = query.join(p_binding.PortBindingPort)
query = query.filter(p_binding.PortBindingPort.host == host)
query = query.filter(models_v2.Port.name == 'mgmt')
router_ids = [item[0] for item in query]
if router_ids:
return self.get_sync_data_ext(context, router_ids=router_ids,
active=True)
else:
return []
@staticmethod
def _agent_state_filter(check_active, last_heartbeat):
"""Filters only active agents, if requested."""
if not check_active:
return True
return not agents_db.AgentDbMixin.is_agent_down(last_heartbeat)
def get_host_for_router(self, context, router, admin_state_up=None,
check_active=False):
query = context.session.query(agents_db.Agent.host,
agents_db.Agent.heartbeat_timestamp)
query = query.join(
p_binding.PortBindingPort,
p_binding.PortBindingPort.host == agents_db.Agent.host)
query = query.join(
models_v2.Port,
models_v2.Port.id == p_binding.PortBindingPort.port_id)
query = query.join(
l3_models.RouterHostingDeviceBinding,
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
models_v2.Port.device_id)
query = query.filter(
agents_db.Agent.topic == topics.L3_AGENT,
l3_models.RouterHostingDeviceBinding.router_id == router)
if admin_state_up is not None:
query = query.filter(
agents_db.Agent.admin_state_up == admin_state_up)
entry = query.first()
if entry and L3RouterApplianceDBMixin._agent_state_filter(check_active,
entry[1]):
return entry[0]
return ""
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
########################################
##
# @author: Amyth
# @email: [email protected]
# @website: www.techstricks.com
# @created_date: 31-03-2017
# @last_modify: Fri Mar 31 11:21:22 2017
##
########################################
from collections import OrderedDict
import datetime
import logging
import operator
from itertools import groupby
from django.conf import settings
from django.contrib import messages as django_messages
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.shortcuts import redirect
from django.views.generic import TemplateView, ListView
from django.utils import timezone
from apps.adverts.documents import Advert, Impression
from apps.messages.documents import RecruiterMessages
from .predicates import is_allowed_campaign
import rules
logger = logging.getLogger('main')
class IndexView(TemplateView):
template_name = "home/index.html"
def get_context_data(self, *args, **kwargs):
context = super(IndexView, self).get_context_data(*args, **kwargs)
if self.request.method == 'POST':
request = self.request
sdate = request.POST.get('start_date')
edate = request.POST.get('end_date')
cams = request.POST.get('selected')
camp_id = request.POST.get('camp_id')
uid = request.POST.get('sent_by')
main_data, messages = self.get_numbers(sdate=sdate,
edate=edate, cams=cams, camp_id=camp_id, uid=uid)
context['main_data'] = main_data
else:
selected_data = {}
main_data, messages = self.get_numbers()
context["main_data"] = main_data
selected_data['start_date'] = (
timezone.now() + timezone.timedelta(days=-1)).strftime('%a %b %d %Y')
context['selected_data'] = selected_data
#campaigns = list(set([m.campaign for m in messages if not m.campaign.startswith('RevivalEmails_')]))
campaigns = list(set([m.campaign for m in messages]))
context['campaigns'] = [{'name': c} for c in campaigns]
return context
def get_numbers(self, sdate=None, edate=None, cams=None, camp_id=None, uid=None):
allowed_cams = []
restricted_campaigns = []
results = []
query_filter = {}
# restrict campaigns
if (not cams) and (self.request.user) and (self.request.user.username in settings.RESTRICTED_USERS):
cams = 'sendJob'
sdate = self.format_date(sdate) if sdate else datetime.datetime.now() + datetime.timedelta(days=-1)
edate = self.format_date(edate) if edate else None
if cams:
cams = cams.split(',')
for cam in cams:
if rules.test_rule('is_allowed_campaign', self.request.user, cam):
allowed_cams.append(cam)
else:
restricted_campaigns.append(cam)
if restricted_campaigns:
django_messages.error(self.request, 'You do not have permission to access the mentioned campaign(s): %s' % ', '.join(restricted_campaigns))
else:
admins = Group.objects.get(name='administrators')
if admins not in self.request.user.groups.all():
user_group = self.request.user.groups.first()
allowed_cams = settings.CAMPAIGN_PERMISSIONS.get(
user_group.name, [])
query_filter['campaign__in'] = allowed_cams
if sdate:
if not edate:
sdate = timezone.datetime.strptime(sdate.strftime('%D'), '%m/%d/%y')
start_time = sdate.replace(hour=0, minute=0, second=0)
end_time = sdate.replace(hour=23, minute=59, second=59)
query_filter['date__gte'] = start_time
query_filter['date__lte'] = end_time
else:
query_filter['date__gte'] = sdate
query_filter['date__lte'] = edate
if camp_id:
query_filter['campaign_id'] = camp_id
if uid:
query_filter['recruiter'] = uid
#logger.debug(query_filter)
message_qs = RecruiterMessages.objects.filter(**query_filter)
messages = list(message_qs)
sent = sum([x.sent for x in messages if x.sent])
opened = sum([x.opened for x in messages if x.opened])
clicked = sum([x.clicked for x in messages if x.clicked])
results.append({'sent': sent})
results.append({'opened': opened})
results.append({'clicked': clicked})
if (sdate and edate):
the_dates = [sdate + datetime.timedelta(n) for n in range(int((edate - sdate).days) + 1)]
dw_results = OrderedDict()
# campaignwise data
if uid:
cids = list(tuple([x.campaign_id for x in message_qs]))
for campaign_id in cids:
for d in the_dates:
start_time = d.replace(hour=0, minute=0, second=0)
end_time = d.replace(hour=23, minute=59, second=59)
filtered = list(message_qs.filter(date__gte=start_time, date__lte=end_time, campaign_id=campaign_id))
if filtered:
dw_results[campaign_id] = {d.strftime('%B %d %Y'): {
'sent': sum([x.sent for x in filtered if x.sent]),
'opened': sum([x.opened for x in filtered if x.opened]),
'clicked': sum([x.clicked for x in filtered if x.clicked]),
}}
results.append({'campaignwise': dw_results})
return results, messages
# datewise data
for d in the_dates:
start_time = d.replace(hour=0, minute=0, second=0)
end_time = d.replace(hour=23, minute=59, second=59)
filtered = list(message_qs.filter(date__gte=start_time, date__lte=end_time))
dw_results[d.strftime('%B %d %Y')] = {
'sent': sum([x.sent for x in filtered if x.sent]),
'opened': sum([x.opened for x in filtered if x.opened]),
'clicked': sum([x.clicked for x in filtered if x.clicked]),
'primary_clicks': sum([x.primary_clicks for x in filtered if x.primary_clicks])
}
results.append({'datewise': dw_results})
return results, messages
def format_date(self, d):
return timezone.datetime.strptime(d, '%a %b %d %Y')
def post(self, request, *args, **kwargs):
context = self.get_context_data()
context['selected_data'] = self.get_selected_data(request.POST)
return super(TemplateView, self).render_to_response(context)
def get_selected_data(self, post):
selected_data = {}
selected = post.get('selected', '')
selected = selected.split(',') if selected else []
selected_data['selected'] = selected
selected_data['start_date'] = post.get('start_date')
selected_data['end_date'] = post.get('end_date')
selected_data['sent_by'] = post.get('sent_by')
selected_data['camp_id'] = post.get('camp_id')
return selected_data
class TrackAds(TemplateView):
template_name = "home/track_ads.html"
def get_context_data(self, *args, **kwargs):
context = super(TrackAds, self).get_context_data(*args, **kwargs)
if self.request.method == 'POST':
request = self.request
sdate = request.POST.get('start_date')
edate = request.POST.get('end_date')
tracking_id = request.POST.get('tracking_id')
tracking_source = request.POST.get('tracking_source')
tracking_medium = request.POST.get('tracking_medium')
tracking_drive = request.POST.get('tracking_drive')
main_data = self.get_numbers(sdate=sdate, edate=edate,
tracking_id=tracking_id, tracking_medium=tracking_medium,
tracking_source=tracking_source, tracking_drive=tracking_drive)
context['main_data'] = main_data
else:
selected_data = {}
main_data = self.get_numbers()
context["main_data"] = main_data
selected_data['start_date'] = (
timezone.now() + timezone.timedelta(days=-1)).strftime('%a %b %d %Y')
context['selected_data'] = selected_data
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
context['selected_data'] = self.get_selected_data(request.POST)
return super(TemplateView, self).render_to_response(context)
def get_selected_data(self, post):
selected_data = {}
selected_data['start_date'] = post.get('start_date')
selected_data['end_date'] = post.get('end_date')
selected_data['tracking_id'] = post.get('tracking_id')
selected_data['tracking_source'] = post.get('tracking_source')
selected_data['tracking_medium'] = post.get('tracking_medium')
selected_data['tracking_drive'] = post.get('tracking_drive')
return selected_data
def get_numbers(self, sdate=None, edate=None, tracking_id=None, tracking_source=None,
tracking_drive=None, tracking_medium=None):
results = []
query_filter = {}
sdate = self.format_date(
sdate) if sdate else datetime.datetime.now() + datetime.timedelta(days=-1)
edate = self.format_date(edate) if edate else None
if sdate:
if not edate:
sdate = timezone.datetime.strptime(
sdate.strftime('%D'), '%m/%d/%y')
start_time = sdate.replace(hour=0, minute=0, second=0)
end_time = sdate.replace(hour=23, minute=59, second=59)
query_filter['date__gte'] = start_time
query_filter['date__lte'] = end_time
else:
query_filter['date__gte'] = sdate
query_filter['date__lte'] = edate
if tracking_id:
query_filter['tracking_id'] = tracking_id
if tracking_source:
query_filter['tracking_source'] = tracking_source
if tracking_medium:
query_filter['tracking_medium'] = tracking_medium
if tracking_drive:
query_filter['tracking_drive'] = tracking_drive
if tracking_id:
query_filter['tracking_id'] = tracking_id
adverts = list(Advert.objects.filter(**query_filter).order_by('-date'))
get_attr = operator.attrgetter('date')
results = {k.strftime('%d %b %Y'): list(g) for k, g in groupby(
sorted(adverts, key=get_attr), get_attr)}
results = OrderedDict(sorted(results.items(), key=lambda x: x[0]))
return results
def format_date(self, d):
return timezone.datetime.strptime(d, '%a %b %d %Y')
class TrackImpressions(TemplateView):
template_name = "home/track_imps.html"
def get_context_data(self, *args, **kwargs):
context = super(TrackImpressions, self).get_context_data(*args, **kwargs)
if self.request.method == 'POST':
request = self.request
sdate = request.POST.get('start_date')
edate = request.POST.get('end_date')
tracking_id = request.POST.get('tracking_id')
tracking_source = request.POST.get('tracking_source')
tracking_medium = request.POST.get('tracking_medium')
tracking_drive = request.POST.get('tracking_drive')
main_data, total_count = self.get_numbers(sdate=sdate, edate=edate,
tracking_id=tracking_id, tracking_medium=tracking_medium,
tracking_source=tracking_source, tracking_drive=tracking_drive)
context['main_data'] = main_data
else:
selected_data = {}
main_data, total_count = self.get_numbers()
context["main_data"] = main_data
selected_data['start_date'] = (
timezone.now() + timezone.timedelta(days=-1)).strftime('%a %b %d %Y')
context['selected_data'] = selected_data
context['total_count'] = total_count
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
context['selected_data'] = self.get_selected_data(request.POST)
return super(TemplateView, self).render_to_response(context)
def get_selected_data(self, post):
selected_data = {}
selected_data['start_date'] = post.get('start_date')
selected_data['end_date'] = post.get('end_date')
selected_data['tracking_id'] = post.get('tracking_id')
selected_data['tracking_source'] = post.get('tracking_source')
selected_data['tracking_medium'] = post.get('tracking_medium')
selected_data['tracking_drive'] = post.get('tracking_drive')
return selected_data
def get_numbers(self, sdate=None, edate=None, tracking_id=None, tracking_source=None,
tracking_drive=None, tracking_medium=None):
results = []
query_filter = {}
sdate = self.format_date(
sdate) if sdate else datetime.datetime.now() + datetime.timedelta(days=-1)
edate = self.format_date(edate) if edate else None
if sdate:
if not edate:
sdate = timezone.datetime.strptime(
sdate.strftime('%D'), '%m/%d/%y')
start_time = sdate.replace(hour=0, minute=0, second=0)
end_time = sdate.replace(hour=23, minute=59, second=59)
query_filter['date__gte'] = start_time
query_filter['date__lte'] = end_time
else:
query_filter['date__gte'] = sdate
query_filter['date__lte'] = edate
if tracking_id:
query_filter['tracking_id'] = tracking_id
if tracking_source:
query_filter['tracking_source'] = tracking_source
if tracking_medium:
query_filter['tracking_medium'] = tracking_medium
if tracking_drive:
query_filter['tracking_drive'] = tracking_drive
if tracking_id:
query_filter['tracking_id'] = tracking_id
adverts = list(Impression.objects.filter(**query_filter).order_by('-date'))
total_count = sum([x.count for x in adverts])
get_attr = operator.attrgetter('date')
results = {k.strftime('%d %b %Y'): list(g) for k, g in groupby(
sorted(adverts, key=get_attr), get_attr)}
results = OrderedDict(sorted(results.items(), key=lambda x: x[0]))
return results, total_count
def format_date(self, d):
return timezone.datetime.strptime(d, '%a %b %d %Y')
def logout_view(request):
logout(request)
return redirect('/login/')
# View variables
index = login_required(IndexView.as_view())
track_ads = login_required(TrackAds.as_view())
track_imps = login_required(TrackImpressions.as_view())
|
|
# -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
import csv
import logging
from datetime import datetime
from functools import wraps
from json import dumps
from threading import Lock
from time import time as timer
from flask import Response
from lxml import etree
from presence_analyzer.main import app
log = logging.getLogger(__name__) # pylint: disable=invalid-name
TREE = etree.parse(app.config['DATA_XML']) # pylint: disable=no-member
class cache(object): # pylint: disable=invalid-name, too-few-public-methods
"""
Decorator class
"""
def __init__(self, seconds):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.seconds = seconds
self.mem = {}
self.timer_dict = {}
self.lock = Lock()
def __call__(self, func):
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
def wrapped_func(*args):
"""
Checking object in cache. If is not, add object to cache
"""
self.lock.acquire()
try:
if func.__name__ in self.mem\
and self.cache_is_valid(func.__name__):
return self.mem.get(func.__name__)
else:
self.mem[func.__name__] = func(*args)
return self.mem.get(func.__name__)
finally:
self.lock.release()
return wrapped_func
def cache_is_valid(self, func_name):
"""
Checking if cache content is not expired
"""
if func_name in self.timer_dict\
and not self.timer_dict.get(func_name) < timer():
return True
else:
self.timer_dict[func_name] = timer() + self.seconds
return False
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
"""
This docstring will be overridden by @wraps decorator.
"""
return Response(
dumps(function(*args, **kwargs)),
mimetype='application/json'
)
return inner
@cache(600)
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(app.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
except (ValueError, TypeError):
log.debug('Problem with line %d: ', i, exc_info=True)
data.setdefault(user_id, {})[date] = {'start': start, 'end': end}
return data
def get_users_from_xml():
"""
Extracts users data from xml file. Returns dict.
It creates structure like this:
{'151': {'avatar': '/api/images/users/151', 'name': 'Dawid J.'}}
"""
users_data = {}
for element in TREE.iter('user'):
users_data[element.get('id')] = {
'avatar': element[0].text, 'name': element[1].text}
return users_data
def get_users():
"""
Comparing lists ID's extracted from xml file and csv.
If ID from CSV file is not in XML file adding default values.
Structure of dict is below:
{'151': {'avatar': '/api/images/users/151', 'name': 'Dawid J.'},
18: {'avatar': '/api/images/users/00', 'name': 'User 18'}}
"""
out = {}
data = get_data()
users = get_users_from_xml()
for i in data.keys():
user = users.get(str(i))
if user:
out[i] = user
else:
out[i] = {
'avatar': '/api/images/users/00', 'name': 'User {0}'.format(i)}
return out
def get_server():
"""
Extracts hostname and protocol data from xml file. Returns string.
"""
host = TREE.find('server')[2].text
protocol = TREE.find('server')[0].text
return '{0}://{1}'.format(host, protocol)
def seconds_to_time(seconds):
"""
Calculate time HH:MM:SS from seconds since midnight
"""
minuts, seconds = divmod(seconds, 60)
hours, minuts = divmod(minuts, 60)
hms = "%d:%02d:%02d" % (hours, minuts, seconds)
return hms
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = [[], [], [], [], [], [], []] # one list for every day in week
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def group_start_end(items):
"""
Caclulate average start end by weekday. Returns tuple with two lists
"""
result_starts = [[], [], [], [], [], [], []]
result_ends = [[], [], [], [], [], [], []]
for date in items:
start = items[date]['start']
end = items[date]['end']
result_starts[date.weekday()].append(seconds_since_midnight(start))
result_ends[date.weekday()].append(seconds_since_midnight(end))
result_starts = [
seconds_to_time(mean(x)) if len(x) > 0 else x for x in result_starts]
result_ends = [
seconds_to_time(mean(x)) if len(x) > 0 else x for x in result_ends]
return result_starts, result_ends
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
|
|
'''
This package manages the interface to monit for the set of services
managed by this instance of sirikata-sample-deployment (ideally just
this one per node). It's preferable to collect all services under a
single monit daemon, with a single config. This module manages getting
services into that config, removing them, and working with the monit
daemon.
To avoid having to deal with databases, we just generate a config file
and use blocks labelled with the corresponding service name in a
comment so we can pick them out for update/removal. You can find the
config file in data/monit.rc
'''
import util
import service
import subprocess, os, stat, time
def _cfgfile():
return util.data_path('monit.rc')
def _logfile():
return util.data_path('monit.log')
def monitize_name(n):
'''Monit service names can't have some characters. This transforms them to ensure names are monit-compatible.'''
n = n.replace('/', '_')
return n
def base_config():
'''Get the basic monit config, which should go at the head of the file'''
return """
set daemon 120
set logfile %(logfile)s
set httpd port 2812
use address localhost
allow localhost
""" % { 'logfile' : _logfile() }
def service_config(name):
'''Get a config block for the current service (using the current serviceconfig)'''
params = service.get_run_params(name)
# We need to handle directories carefully. We need to be careful
# about two things. First, we need to get the working directory
# right. For that, we'll make the commands cd into the directory
# before executing the actual script.
#
# We also need to make sure we get the path to the script
# directory right. For that, we can just use the full directory
# path to this script since it is colocated with __main__.py.
#
# Note that monit seems to make the bizarre decision that it
# should execute scripts from the root directory, so generally you
# should be specifying absolute paths for everything or making
# sure you get yourself into a sane location before doing anything
# else
workdir = os.getcwd()
scriptsdir = os.path.dirname(__file__)
return """
check process %(monit_name)s
with pidfile "%(pidfile)s"
start program = "%(startcommand)s" with timeout 60 seconds
stop program = "%(stopcommand)s"
if 4 restarts within 5 cycles then timeout
""" % {
'name' : params.name,
'monit_name' : monitize_name(params.name),
'pidfile' : params.pidfile,
'startcommand' : "/bin/bash -c 'cd %s; /usr/bin/env python %s service rawstart %s'" % (workdir, scriptsdir, name),
'stopcommand' : "/bin/bash -c 'cd %s; /usr/bin/env python %s service rawstop %s'" % (workdir, scriptsdir, name),
}
def load_config():
'''
Tries to load the config file. Creates it with a basic config and
no services if it doesn't exist. Returns a dict of service_name ->
str config.
'''
# Make sure we have something there
if not os.path.exists(_cfgfile()):
return { '__base' : base_config() }
configs = { '__base' : '' }
with open(_cfgfile(), 'r') as f:
cur_service_name = '__base'
for line in f.readlines():
if line.startswith('\n'):
continue
elif line.startswith('### SERVICE '):
# Remove the prefix and make sure it has an entry
cur_service_name = line[(len('### SERVICE ')):-1]
if cur_service_name not in configs:
configs[cur_service_name] = ''
elif line.startswith('### ENDSERVICE'):
cur_service_name = '__base'
else:
# Normal case, just append
configs[cur_service_name] += line
return configs
def save_config(configs, do_reload=True):
'''
Save a config. The parameters should have the same structure as
the result of load_config.
'''
with open(_cfgfile(), 'w') as f:
for serv in sorted(configs.keys()):
if serv != '__base':
f.write('### SERVICE ' + serv + '\n')
f.write(configs[serv])
if serv != '__base':
f.write('### ENDSERVICE ' + serv + '\n\n\n')
os.chmod(_cfgfile(), stat.S_IRUSR | stat.S_IWUSR)
# Force a reload
if do_reload:
monit_cmd = ['monit', '-c', _cfgfile(), 'reload']
subprocess.call(monit_cmd)
time.sleep(5)
monit_cmd = ['monit', '-c', _cfgfile(), 'start', 'all']
subprocess.call(monit_cmd)
time.sleep(5)
# Commands against monit. These *mostly* shouldn't be exposed directly
# since they are mostly just helpers for starting/stopping services
# that have been placed under monit control. The exception will be
# restarting all services that should be running, e.g. after a
# restart.
def command_monit_start(*args):
'''Start the monit daemon. You should run this before starting any monit-enabled services.'''
# Ensure there's a config by loading it and resaving it. Don't
# reload since there shouldn't be anything running, or if there
# is, we'll kill it and start from scratch anyway.
config = load_config()
save_config(config, do_reload=False)
# Kill the daemon if it's already running
command_monit_stop(*args)
# Start new daemon with new config
monit_cmd = ['monit', '-c', _cfgfile()]
subprocess.call(monit_cmd)
time.sleep(5)
# Start all services. This case really just covers the reboot
# case. Otherwise, we shouldn't have to do this. Of course, after
# reboot you might need to do this more carefully anyway since
# startup order is possibly important in some cases.
monit_cmd = ['monit', '-c', _cfgfile(), 'start', 'all']
time.sleep(5)
return subprocess.call(monit_cmd)
def command_monit_stop(*args):
'''Stop the monit daemon.'''
# Quit current instance
monit_cmd = ['monit', '-c', _cfgfile(), '-I', 'quit']
subprocess.call(monit_cmd)
time.sleep(5)
def start_monit_service(name):
# There's actually nothing to do here. The save_config() call that
# you need to do to use this will update monit, and if its not
# running this wouldn't have done anything anyway. We just have
# this here so the calls are symmetric.
pass
def stop_monit_service(name):
monit_cmd = ['monit', '-c', _cfgfile(), 'stop', monitize_name(name)]
return subprocess.call(monit_cmd)
# Control of services. Only expose start and stop, but start and stop
# also add and remove entries so that between restarts we don't
# accidentally restart a service we don't need anymore.
def start_service(name):
config = load_config()
# We might need to stop an existing service and replace it
if name in config:
stop_monit_service(name)
del config[name]
# Then put the new config in place and save, then start
config[name] = service_config(name)
save_config(config)
start_monit_service(name)
def stop_service(name):
config = load_config()
if name in config:
# Make sure we stop it first
stop_monit_service(name)
# Then clear it out of the config and save
del config[name]
save_config(config)
|
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Python test reporter that generates test reports in JUnit XML format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import threading
import time
import traceback
import unittest
from xml.sax import saxutils
import six
# See http://www.w3.org/TR/REC-xml/#NT-Char
_bad_control_character_codes = set(range(0, 0x20)) - {0x9, 0xA, 0xD}
_control_character_conversions = {
chr(i): '\\x{:02x}'.format(i) for i in _bad_control_character_codes}
_escape_xml_attr_conversions = {
'"': '"',
"'": ''',
'\n': '
',
'\t': '	',
'\r': '
',
' ': ' '}
_escape_xml_attr_conversions.update(_control_character_conversions)
# When class or module level function fails, unittest/suite.py adds a
# _ErrorHolder instance instead of a real TestCase, and it has a description
# like "setUpClass (__main__.MyTestCase)".
_CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX = re.compile(r'^(\w+) \((\S+)\)$')
# NOTE: while saxutils.quoteattr() theoretically does the same thing; it
# seems to often end up being too smart for it's own good not escaping properly.
# This function is much more reliable.
def _escape_xml_attr(content):
"""Escapes xml attributes."""
# Note: saxutils doesn't escape the quotes.
return saxutils.escape(content, _escape_xml_attr_conversions)
def _escape_cdata(s):
"""Escapes a string to be used as XML CDATA.
CDATA characters are treated strictly as character data, not as XML markup,
but there are still certain restrictions on them.
Args:
s: the string to be escaped.
Returns:
An escaped version of the input string.
"""
for char, escaped in six.iteritems(_control_character_conversions):
s = s.replace(char, escaped)
return s.replace(']]>', ']] >')
# Copy time.time which ensures the real time is used internally.
# This prevents bad interactions with tests that stub out time.
_time_copy = time.time
if hasattr(traceback, '_some_str'):
# Use the traceback module str function to format safely.
_safe_str = traceback._some_str
else:
_safe_str = str # pylint: disable=invalid-name
class _TestCaseResult(object):
"""Private helper for _TextAndXMLTestResult that represents a test result.
Attributes:
test: A TestCase instance of an individual test method.
name: The name of the individual test method.
full_class_name: The full name of the test class.
run_time: The duration (in seconds) it took to run the test.
errors: A list of error 4-tuples. Error tuple entries are
1) a string identifier of either "failure" or "error"
2) an exception_type
3) an exception_message
4) a string version of a sys.exc_info()-style tuple of values
('error', err[0], err[1], self._exc_info_to_string(err))
If the length of errors is 0, then the test is either passed or
skipped.
skip_reason: A string explaining why the test was skipped.
"""
def __init__(self, test):
self.run_time = -1
self.skip_reason = None
self.errors = []
self.test = test
# Parse the test id to get its test name and full class path.
# Unfortunately there is no better way of knowning the test and class.
# Worse, unittest uses _ErrorHandler instances to represent class / module
# level failures.
test_desc = test.id() or str(test)
# Check if it's something like "setUpClass (__main__.TestCase)".
match = _CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX.match(test_desc)
if match:
name = match.group(1)
full_class_name = match.group(2)
else:
class_name = unittest.util.strclass(test.__class__)
if test_desc.startswith(class_name + '.'):
# In a typical unittest.TestCase scenario, test.id() returns with
# a class name formatted using unittest.util.strclass.
name = test_desc[len(class_name)+1:]
full_class_name = class_name
else:
# Otherwise make a best effort to guess the test name and full class
# path.
parts = test_desc.rsplit('.', 1)
name = parts[-1]
full_class_name = parts[0] if len(parts) == 2 else ''
self.name = _escape_xml_attr(name)
self.full_class_name = _escape_xml_attr(full_class_name)
def set_run_time(self, time_in_secs):
self.run_time = time_in_secs
def print_xml_summary(self, stream):
"""Prints an XML Summary of a TestCase.
Status and result are populated as per JUnit XML test result reporter.
A test that has been skipped will always have a skip reason,
as every skip method in Python's unittest requires the reason arg to be
passed.
Args:
stream: output stream to write test report XML to
"""
if self.skip_reason is None:
status = 'run'
result = 'completed'
else:
status = 'notrun'
result = 'suppressed'
stream.write(
' <testcase name="%s" status="%s" result="%s" time="%.1f" '
'classname="%s">\n' % (
self.name, status, result, self.run_time, self.full_class_name))
self._print_testcase_details(stream)
stream.write(' </testcase>\n')
def _print_testcase_details(self, stream):
for error in self.errors:
outcome, exception_type, message, error_msg = error # pylint: disable=unpacking-non-sequence
message = _escape_xml_attr(_safe_str(message))
exception_type = _escape_xml_attr(str(exception_type))
error_msg = _escape_cdata(error_msg)
stream.write(' <%s message="%s" type="%s"><![CDATA[%s]]></%s>\n'
% (outcome, message, exception_type, error_msg, outcome))
class _TestSuiteResult(object):
"""Private helper for _TextAndXMLTestResult."""
def __init__(self):
self.suites = {}
self.failure_counts = {}
self.error_counts = {}
def add_test_case_result(self, test_case_result):
suite_name = type(test_case_result.test).__name__
if suite_name == '_ErrorHolder':
# _ErrorHolder is a special case created by unittest for class / module
# level functions.
suite_name = test_case_result.full_class_name.rsplit('.')[-1]
self._setup_test_suite(suite_name)
self.suites[suite_name].append(test_case_result)
for error in test_case_result.errors:
# Only count the first failure or error so that the sum is equal to the
# total number of *testcases* that have failures or errors.
if error[0] == 'failure':
self.failure_counts[suite_name] += 1
break
elif error[0] == 'error':
self.error_counts[suite_name] += 1
break
def print_xml_summary(self, stream):
overall_test_count = sum([len(x) for x in self.suites.values()])
overall_failures = sum(self.failure_counts.values())
overall_errors = sum(self.error_counts.values())
overall_time = 0
for tests in self.suites.values():
overall_time += sum([x.run_time for x in tests])
overall_args = (overall_test_count, overall_failures, overall_errors,
overall_time)
stream.write('<testsuites name="" tests="%d" failures="%d" '
'errors="%d" time="%.1f">\n' % overall_args)
for suite_name in self.suites:
suite = self.suites[suite_name]
suite_time = sum([x.run_time for x in suite])
failures = self.failure_counts[suite_name]
errors = self.error_counts[suite_name]
args = (suite_name, len(suite), failures, errors, suite_time)
stream.write('<testsuite name="%s" tests="%d" failures="%d" '
'errors="%d" time="%.1f">\n' % args)
for test_case_result in suite:
test_case_result.print_xml_summary(stream)
stream.write('</testsuite>\n')
stream.write('</testsuites>\n')
def _setup_test_suite(self, suite_name):
"""Adds a test suite to the set of suites tracked by this test run.
Args:
suite_name: string, The name of the test suite being initialized.
"""
if suite_name in self.suites:
return
self.suites[suite_name] = []
self.failure_counts[suite_name] = 0
self.error_counts[suite_name] = 0
class _TextAndXMLTestResult(unittest.TextTestResult):
"""Private TestResult class that produces both formatted text results and XML.
Used by TextAndXMLTestRunner.
"""
_TEST_SUITE_RESULT_CLASS = _TestSuiteResult
_TEST_CASE_RESULT_CLASS = _TestCaseResult
def __init__(self, xml_stream, stream, descriptions, verbosity,
time_getter=_time_copy):
super(_TextAndXMLTestResult, self).__init__(stream, descriptions, verbosity)
self.xml_stream = xml_stream
self.pending_test_case_results = {}
self.suite = self._TEST_SUITE_RESULT_CLASS()
self.time_getter = time_getter
# This lock guards any mutations on pending_test_case_results.
self._pending_test_case_results_lock = threading.Lock()
def startTest(self, test):
self.start_time = self.time_getter()
super(_TextAndXMLTestResult, self).startTest(test)
def stopTest(self, test):
# Grabbing the write lock to avoid conflicting with stopTestRun.
with self._pending_test_case_results_lock:
super(_TextAndXMLTestResult, self).stopTest(test)
result = self.get_pending_test_case_result(test)
if not result:
test_name = test.id() or str(test)
sys.stderr.write('No pending test case: %s\n' % test_name)
return
test_id = id(test)
run_time = self.time_getter() - self.start_time
result.set_run_time(run_time)
self.suite.add_test_case_result(result)
del self.pending_test_case_results[test_id]
def stopTestRun(self):
# All pending_test_case_results will be added to the suite and removed from
# the pending_test_case_results dictionary. Grabing the write lock to avoid
# results from being added during this process to avoid duplicating adds or
# accidentally erasing newly appended pending results.
with self._pending_test_case_results_lock:
# Errors in the test fixture (setUpModule, tearDownModule,
# setUpClass, tearDownClass) can leave a pending result which
# never gets added to the suite. The runner calls stopTestRun
# which gives us an opportunity to add these errors for
# reporting here.
for test_id in self.pending_test_case_results:
result = self.pending_test_case_results[test_id]
if hasattr(self, 'start_time'):
run_time = self.time_getter() - self.start_time
result.set_run_time(run_time)
self.suite.add_test_case_result(result)
self.pending_test_case_results.clear()
def _exc_info_to_string(self, err, test=None):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method must be overridden because the method signature in
unittest.TestResult changed between Python 2.2 and 2.4.
Args:
err: A sys.exc_info() tuple of values for an error.
test: The test method.
Returns:
A formatted exception string.
"""
if test:
return super(_TextAndXMLTestResult, self)._exc_info_to_string(err, test)
return ''.join(traceback.format_exception(*err))
def add_pending_test_case_result(self, test, error_summary=None,
skip_reason=None):
"""Adds result information to a test case result which may still be running.
If a result entry for the test already exists, add_pending_test_case_result
will add error summary tuples and/or overwrite skip_reason for the result.
If it does not yet exist, a result entry will be created.
Note that a test result is considered to have been run and passed
only if there are no errors or skip_reason.
Args:
test: A test method as defined by unittest
error_summary: A 4-tuple with the following entries:
1) a string identifier of either "failure" or "error"
2) an exception_type
3) an exception_message
4) a string version of a sys.exc_info()-style tuple of values
('error', err[0], err[1], self._exc_info_to_string(err))
If the length of errors is 0, then the test is either passed or
skipped.
skip_reason: a string explaining why the test was skipped
"""
with self._pending_test_case_results_lock:
test_id = id(test)
if test_id not in self.pending_test_case_results:
self.pending_test_case_results[test_id] = self._TEST_CASE_RESULT_CLASS(
test)
if error_summary:
self.pending_test_case_results[test_id].errors.append(error_summary)
if skip_reason:
self.pending_test_case_results[test_id].skip_reason = skip_reason
def delete_pending_test_case_result(self, test):
with self._pending_test_case_results_lock:
test_id = id(test)
del self.pending_test_case_results[test_id]
def get_pending_test_case_result(self, test):
test_id = id(test)
return self.pending_test_case_results.get(test_id, None)
def addSuccess(self, test):
super(_TextAndXMLTestResult, self).addSuccess(test)
self.add_pending_test_case_result(test)
def addError(self, test, err):
super(_TextAndXMLTestResult, self).addError(test, err)
error_summary = ('error', err[0], err[1], self._exc_info_to_string(err))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addFailure(self, test, err):
super(_TextAndXMLTestResult, self).addFailure(test, err)
error_summary = ('failure', err[0], err[1], self._exc_info_to_string(err))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addSkip(self, test, reason):
super(_TextAndXMLTestResult, self).addSkip(test, reason)
self.add_pending_test_case_result(test, skip_reason=reason)
def addExpectedFailure(self, test, err):
super(_TextAndXMLTestResult, self).addExpectedFailure(test, err)
if callable(getattr(test, 'recordProperty', None)):
test.recordProperty('EXPECTED_FAILURE', self._exc_info_to_string(err))
self.add_pending_test_case_result(test)
def addUnexpectedSuccess(self, test):
super(_TextAndXMLTestResult, self).addUnexpectedSuccess(test)
test_name = test.id() or str(test)
error_summary = ('error', '', '',
'Test case %s should have failed, but passed.'
% (test_name))
self.add_pending_test_case_result(test, error_summary=error_summary)
def printErrors(self):
super(_TextAndXMLTestResult, self).printErrors()
self.xml_stream.write('<?xml version="1.0"?>\n')
self.suite.print_xml_summary(self.xml_stream)
class TextAndXMLTestRunner(unittest.TextTestRunner):
"""A test runner that produces both formatted text results and XML.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
_TEST_RESULT_CLASS = _TextAndXMLTestResult
_xml_stream = None
def __init__(self, xml_stream=None, *args, **kwargs):
"""Initialize a TextAndXMLTestRunner.
Args:
xml_stream: file-like or None; XML-formatted test results are output
via this object's write() method. If None (the default), the
new instance behaves as described in the set_default_xml_stream method
documentation below.
*args: passed unmodified to unittest.TextTestRunner.__init__.
**kwargs: passed unmodified to unittest.TextTestRunner.__init__.
"""
super(TextAndXMLTestRunner, self).__init__(*args, **kwargs)
if xml_stream is not None:
self._xml_stream = xml_stream
# else, do not set self._xml_stream to None -- this allows implicit fallback
# to the class attribute's value.
@classmethod
def set_default_xml_stream(cls, xml_stream):
"""Sets the default XML stream for the class.
Args:
xml_stream: file-like or None; used for instances when xml_stream is None
or not passed to their constructors. If None is passed, instances
created with xml_stream=None will act as ordinary TextTestRunner
instances; this is the default state before any calls to this method
have been made.
"""
cls._xml_stream = xml_stream
def _makeResult(self):
if self._xml_stream is None:
return super(TextAndXMLTestRunner, self)._makeResult()
else:
return self._TEST_RESULT_CLASS(self._xml_stream, self.stream,
self.descriptions, self.verbosity)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mptt.fields
from django.conf import settings
import cms.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CMSPlugin',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.PositiveSmallIntegerField(verbose_name='position', null=True, editable=False, blank=True)),
('language', models.CharField(verbose_name='language', max_length=15, editable=False, db_index=True)),
('plugin_type', models.CharField(verbose_name='plugin name', max_length=50, editable=False, db_index=True)),
('creation_date', models.DateTimeField(default=cms.utils.timezone.now, verbose_name='creation date', editable=False)),
('changed_date', models.DateTimeField(auto_now=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', models.ForeignKey(blank=True, editable=False, to='cms.CMSPlugin', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GlobalPagePermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('can_change', models.BooleanField(default=True, verbose_name='can edit')),
('can_add', models.BooleanField(default=True, verbose_name='can add')),
('can_delete', models.BooleanField(default=True, verbose_name='can delete')),
('can_change_advanced_settings', models.BooleanField(default=False, verbose_name='can change advanced settings')),
('can_publish', models.BooleanField(default=True, verbose_name='can publish')),
('can_set_navigation', models.BooleanField(default=True, verbose_name='can set navigation')),
('can_change_permissions', models.BooleanField(default=False, help_text='on page level', verbose_name='can change permissions')),
('can_move_page', models.BooleanField(default=True, verbose_name='can move')),
('can_moderate', models.BooleanField(default=True, verbose_name='can moderate')),
('can_view', models.BooleanField(default=False, help_text='frontend view restriction', verbose_name='view restricted')),
('can_recover_page', models.BooleanField(default=True, help_text='can recover any deleted page', verbose_name='can recover pages')),
],
options={
'verbose_name': 'Page global permission',
'verbose_name_plural': 'Pages global permissions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_by', models.CharField(verbose_name='created by', max_length=70, editable=False)),
('changed_by', models.CharField(verbose_name='changed by', max_length=70, editable=False)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('changed_date', models.DateTimeField(auto_now=True)),
('publication_date', models.DateTimeField(help_text='When the page should go live. Status must be "Published" for page to go live.', null=True, verbose_name='publication date', db_index=True, blank=True)),
('publication_end_date', models.DateTimeField(help_text='When to expire the page. Leave empty to never expire.', null=True, verbose_name='publication end date', db_index=True, blank=True)),
('in_navigation', models.BooleanField(default=True, db_index=True, verbose_name='in navigation')),
('soft_root', models.BooleanField(default=False, help_text='All ancestors will not be displayed in the navigation', db_index=True, verbose_name='soft root')),
('reverse_id', models.CharField(max_length=40, blank=True, help_text='An unique identifier that is used with the page_url templatetag for linking to this page', null=True, verbose_name='id', db_index=True)),
('navigation_extenders', models.CharField(db_index=True, max_length=80, null=True, verbose_name='attached menu', blank=True)),
('published', models.BooleanField(default=False, verbose_name='is published')),
('template', models.CharField(help_text='The template used to render the content.', max_length=100, verbose_name='template', choices=[(b'test-template.html', 'Test Template')])),
('moderator_state', models.SmallIntegerField(default=1, blank=True, verbose_name='moderator state', choices=[(0, 'changed'), (1, 'approval required'), (2, 'delete'), (10, 'approved'), (11, 'app. par.')])),
('login_required', models.BooleanField(default=False, verbose_name='login required')),
('limit_visibility_in_menu', models.SmallIntegerField(default=None, choices=[(1, 'for logged in users only'), (2, 'for anonymous users only')], blank=True, help_text='limit when this page is visible in the menu', null=True, verbose_name='menu visibility', db_index=True)),
('publisher_is_draft', models.BooleanField(default=1, db_index=True, editable=False)),
('publisher_state', models.SmallIntegerField(default=0, editable=False, db_index=True)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='cms.Page', null=True)),
],
options={
'ordering': ('site', 'tree_id', 'lft'),
'verbose_name': 'page',
'verbose_name_plural': 'pages',
'permissions': (('view_page', 'Can view page'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PageModerator',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('moderate_page', models.BooleanField(default=False, verbose_name='Moderate page')),
('moderate_children', models.BooleanField(default=False, verbose_name='Moderate children')),
('moderate_descendants', models.BooleanField(default=False, verbose_name='Moderate descendants')),
('page', models.ForeignKey(verbose_name='Page', to='cms.Page')),
],
options={
'verbose_name': 'PageModerator',
'verbose_name_plural': 'PageModerator',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PageModeratorState',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('action', models.CharField(blank=True, max_length=3, null=True, choices=[(b'ADD', 'created'), (b'CHA', 'changed'), (b'DEL', 'deletion request'), (b'MOV', 'move request'), (b'PUB', 'publish request'), (b'UNP', 'unpublish request'), (b'APP', 'approved')])),
('message', models.TextField(default=b'', max_length=1000, blank=True)),
('page', models.ForeignKey(to='cms.Page')),
],
options={
'ordering': ('page', 'action', '-created'),
'verbose_name': 'Page moderator state',
'verbose_name_plural': 'Page moderator states',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PagePermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('can_change', models.BooleanField(default=True, verbose_name='can edit')),
('can_add', models.BooleanField(default=True, verbose_name='can add')),
('can_delete', models.BooleanField(default=True, verbose_name='can delete')),
('can_change_advanced_settings', models.BooleanField(default=False, verbose_name='can change advanced settings')),
('can_publish', models.BooleanField(default=True, verbose_name='can publish')),
('can_set_navigation', models.BooleanField(default=True, verbose_name='can set navigation')),
('can_change_permissions', models.BooleanField(default=False, help_text='on page level', verbose_name='can change permissions')),
('can_move_page', models.BooleanField(default=True, verbose_name='can move')),
('can_moderate', models.BooleanField(default=True, verbose_name='can moderate')),
('can_view', models.BooleanField(default=False, help_text='frontend view restriction', verbose_name='view restricted')),
('grant_on', models.IntegerField(default=5, verbose_name='Grant on', choices=[(1, 'Current page'), (2, 'Page children (immediate)'), (3, 'Page and children (immediate)'), (4, 'Page descendants'), (5, 'Page and descendants')])),
],
options={
'verbose_name': 'Page permission',
'verbose_name_plural': 'Page permissions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PageUser',
fields=[
('user_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('created_by', models.ForeignKey(related_name='created_users', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User (page)',
'verbose_name_plural': 'Users (page)',
},
bases=('auth.user',),
),
migrations.CreateModel(
name='PageUserGroup',
fields=[
('group_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='auth.Group')),
('created_by', models.ForeignKey(related_name='created_usergroups', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User group (page)',
'verbose_name_plural': 'User groups (page)',
},
bases=('auth.group',),
),
migrations.CreateModel(
name='Placeholder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slot', models.CharField(verbose_name='slot', max_length=50, editable=False, db_index=True)),
('default_width', models.PositiveSmallIntegerField(verbose_name='width', null=True, editable=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Title',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language', models.CharField(max_length=15, verbose_name='language', db_index=True)),
('title', models.CharField(max_length=255, verbose_name='title')),
('menu_title', models.CharField(help_text='overwrite the title in the menu', max_length=255, null=True, verbose_name='title', blank=True)),
('slug', models.SlugField(max_length=255, verbose_name='slug')),
('path', models.CharField(max_length=255, verbose_name='Path', db_index=True)),
('has_url_overwrite', models.BooleanField(default=False, verbose_name='has URL overwrite', db_index=True, editable=False)),
('application_urls', models.CharField(db_index=True, max_length=200, null=True, verbose_name='application', blank=True)),
('redirect', models.CharField(max_length=255, null=True, verbose_name='redirect', blank=True)),
('meta_description', models.TextField(max_length=255, null=True, verbose_name='description', blank=True)),
('meta_keywords', models.CharField(max_length=255, null=True, verbose_name='keywords', blank=True)),
('page_title', models.CharField(help_text='overwrite the title (html title tag)', max_length=255, null=True, verbose_name='title', blank=True)),
('creation_date', models.DateTimeField(default=cms.utils.timezone.now, verbose_name='creation date', editable=False)),
('page', models.ForeignKey(related_name='title_set', verbose_name='page', to='cms.Page')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='title',
unique_together=set([('language', 'page')]),
),
migrations.AddField(
model_name='pagepermission',
name='group',
field=models.ForeignKey(verbose_name='group', blank=True, to='auth.Group', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='pagepermission',
name='page',
field=models.ForeignKey(verbose_name='page', blank=True, to='cms.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='pagepermission',
name='user',
field=models.ForeignKey(verbose_name='user', blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='pagemoderatorstate',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='pagemoderator',
name='user',
field=models.ForeignKey(verbose_name='User', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='page',
name='placeholders',
field=models.ManyToManyField(to='cms.Placeholder', editable=False),
preserve_default=True,
),
migrations.AddField(
model_name='page',
name='publisher_public',
field=models.OneToOneField(related_name='publisher_draft', null=True, editable=False, to='cms.Page'),
preserve_default=True,
),
migrations.AddField(
model_name='page',
name='site',
field=models.ForeignKey(verbose_name='site', to='sites.Site', help_text='The site the page is accessible at.'),
preserve_default=True,
),
migrations.AddField(
model_name='globalpagepermission',
name='group',
field=models.ForeignKey(verbose_name='group', blank=True, to='auth.Group', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='globalpagepermission',
name='sites',
field=models.ManyToManyField(help_text='If none selected, user haves granted permissions to all sites.', to='sites.Site', null=True, verbose_name='sites', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='globalpagepermission',
name='user',
field=models.ForeignKey(verbose_name='user', blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='cmsplugin',
name='placeholder',
field=models.ForeignKey(editable=False, to='cms.Placeholder', null=True),
preserve_default=True,
),
]
|
|
"""
plotly
======
A module that contains the plotly class, a liaison between the user
and ploty's servers.
1. get DEFAULT_PLOT_OPTIONS for options
2. update plot_options with .plotly/ dir
3. update plot_options with _plot_options
4. update plot_options with kwargs!
"""
from __future__ import absolute_import
import json
import warnings
import copy
import os
import six
import requests
from plotly.plotly import chunked_requests
from plotly import utils
from plotly import tools
from plotly import exceptions
from plotly import version
__all__ = None
_DEFAULT_PLOT_OPTIONS = dict(
filename="plot from API",
fileopt="new",
world_readable=True,
auto_open=True,
validate=True)
_credentials = dict()
_plot_options = dict()
### test file permissions and make sure nothing is corrupted ###
tools.ensure_local_plotly_files()
### _credentials stuff ###
def sign_in(username, api_key):
"""Set module-scoped _credentials for session. Verify with plotly."""
global _credentials
_credentials['username'], _credentials['api_key'] = username, api_key
# TODO: verify these _credentials with plotly
### plot options stuff ###
def update_plot_options(**kwargs):
""" Update the module-level _plot_options
"""
global _plot_options
_plot_options.update(kwargs)
def get_plot_options():
""" Returns a copy of the user supplied plot options.
Use `update_plot_options()` to change.
"""
global _plot_options
return copy.copy(_plot_options)
def get_credentials():
""" Returns a copy of the user supplied credentials.
"""
global _credentials
if ('username' in _credentials) and ('api_key' in _credentials):
return copy.copy(_credentials)
else:
return tools.get_credentials_file()
### plot stuff ###
def iplot(figure_or_data, **plot_options):
"""Create a unique url for this plot in Plotly and open in IPython.
plot_options keyword agruments:
filename (string) -- the name that will be associated with this figure
fileopt ('new' | 'overwrite' | 'extend' | 'append') -- 'new' creates a
'new': create a new, unique url for this plot
'overwrite': overwrite the file associated with `filename` with this
'extend': add additional numbers (data) to existing traces
'append': add additional traces to existing data lists
world_readable (default=True) -- make this figure private/public
"""
if 'auto_open' not in plot_options:
plot_options['auto_open'] = False
res = plot(figure_or_data, **plot_options)
urlsplit = res.split('/')
username, plot_id = urlsplit[-2][1:], urlsplit[-1] # TODO: HACKY!
embed_options = dict()
if 'width' in plot_options:
embed_options['width'] = plot_options['width']
if 'height' in plot_options:
embed_options['height'] = plot_options['height']
return tools.embed(username, plot_id, **embed_options)
def _plot_option_logic(plot_options):
"""Sets plot_options via a precedence hierarchy."""
options = dict()
options.update(_DEFAULT_PLOT_OPTIONS)
options.update(_plot_options)
options.update(plot_options)
if ('filename' in plot_options
and 'fileopt' not in _plot_options
and 'fileopt' not in plot_options):
options['fileopt'] = 'overwrite'
return options
def plot(figure_or_data, validate=True, **plot_options):
"""Create a unique url for this plot in Plotly and optionally open url.
plot_options keyword agruments:
filename (string) -- the name that will be associated with this figure
fileopt ('new' | 'overwrite' | 'extend' | 'append') -- 'new' creates a
'new': create a new, unique url for this plot
'overwrite': overwrite the file associated with `filename` with this
'extend': add additional numbers (data) to existing traces
'append': add additional traces to existing data lists
world_readable (default=True) -- make this figure private/public
auto_open (default=True) -- Toggle browser options
True: open this plot in a new browser tab
False: do not open plot in the browser, but do return the unique url
"""
if isinstance(figure_or_data, dict):
figure = figure_or_data
elif isinstance(figure_or_data, list):
figure = {'data': figure_or_data}
else:
raise exceptions.PlotlyError("The `figure_or_data` positional argument "
"must be either `dict`-like or "
"`list`-like.")
if validate:
try:
tools.validate(figure, obj_type='Figure')
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError("Invalid 'figure_or_data' argument. "
"Plotly will not be able to properly "
"parse the resulting JSON. If you "
"want to send this 'figure_or_data' "
"to Plotly anyway (not recommended), "
"you can set 'validate=False' as a "
"plot option.\nHere's why you're "
"seeing this error:\n\n{0}"
"".format(err))
for entry in figure['data']:
for key, val in list(entry.items()):
try:
if len(val) > 40000:
msg = ("Woah there! Look at all those points! Due to "
"browser limitations, Plotly has a hard time "
"graphing more than 500k data points for line "
"charts, or 40k points for other types of charts. "
"Here are some suggestions:\n"
"(1) Trying using the image API to return an image "
"instead of a graph URL\n"
"(2) Use matplotlib\n"
"(3) See if you can create your visualization with "
"fewer data points\n\n"
"If the visualization you're using aggregates "
"points (e.g., box plot, histogram, etc.) you can "
"disregard this warning.")
warnings.warn(msg)
except TypeError:
pass
plot_options = _plot_option_logic(plot_options)
res = _send_to_plotly(figure, **plot_options)
if res['error'] == '':
if plot_options['auto_open']:
try:
from webbrowser import open as wbopen
wbopen(res['url'])
except: # TODO: what should we except here? this is dangerous
pass
return res['url']
else:
raise exceptions.PlotlyAccountError(res['error'])
def iplot_mpl(fig, resize=True, strip_style=False, update=None, **plot_options):
"""Replot a matplotlib figure with plotly in IPython.
This function:
1. converts the mpl figure into JSON (run help(plolty.tools.mpl_to_plotly))
2. makes a request to Plotly to save this figure in your account
3. displays the image in your IPython output cell
Positional agruments:
fig -- a figure object from matplotlib
Keyword arguments:
resize (default=True) -- allow plotly to choose the figure size
strip_style (default=False) -- allow plotly to choose style options
update (default=None) -- update the resulting figure with an 'update'
dictionary-like object resembling a plotly 'Figure' object
Additional keyword arguments:
plot_options -- run help(plotly.plotly.iplot)
"""
fig = tools.mpl_to_plotly(fig, resize=resize, strip_style=strip_style)
if update and isinstance(update, dict):
try:
fig.update(update)
fig.validate()
except exceptions.PlotlyGraphObjectError as err:
err.add_note("Your updated figure could not be properly validated.")
err.prepare()
raise
elif update is not None:
raise exceptions.PlotlyGraphObjectError(
"'update' must be dictionary-like and a valid plotly Figure "
"object. Run 'help(plotly.graph_objs.Figure)' for more info."
)
return iplot(fig, **plot_options)
def plot_mpl(fig, resize=True, strip_style=False, update=None, **plot_options):
"""Replot a matplotlib figure with plotly.
This function:
1. converts the mpl figure into JSON (run help(plolty.tools.mpl_to_plotly))
2. makes a request to Plotly to save this figure in your account
3. opens your figure in a browser tab OR returns the unique figure url
Positional agruments:
fig -- a figure object from matplotlib
Keyword arguments:
resize (default=True) -- allow plotly to choose the figure size
strip_style (default=False) -- allow plotly to choose style options
update (default=None) -- update the resulting figure with an 'update'
dictionary-like object resembling a plotly 'Figure' object
Additional keyword arguments:
plot_options -- run help(plotly.plotly.plot)
"""
fig = tools.mpl_to_plotly(fig, resize=resize, strip_style=strip_style)
if update and isinstance(update, dict):
try:
fig.update(update)
fig.validate()
except exceptions.PlotlyGraphObjectError as err:
err.add_note("Your updated figure could not be properly validated.")
err.prepare()
raise
elif update is not None:
raise exceptions.PlotlyGraphObjectError(
"'update' must be dictionary-like and a valid plotly Figure "
"object. Run 'help(plotly.graph_objs.Figure)' for more info."
)
return plot(fig, **plot_options)
def get_figure(file_owner_or_url, file_id=None, raw=False):
"""Returns a JSON figure representation for the specified file
Plotly uniquely identifies figures with a 'file_owner'/'file_id' pair.
Since each file is given a corresponding unique url, you may also simply
pass a valid plotly url as the first argument.
Note, if you're using a file_owner string as the first argument, you MUST
specify a `file_id` keyword argument. Else, if you're using a url string
as the first argument, you MUST NOT specify a `file_id` keyword argument, or
file_id must be set to Python's None value.
Positional arguments:
file_owner_or_url (string) -- a valid plotly username OR a valid plotly url
Keyword arguments:
file_id (default=None) -- an int or string that can be converted to int
if you're using a url, don't fill this in!
raw (default=False) -- if true, return unicode JSON string verbatim**
**by default, plotly will return a Figure object (run help(plotly
.graph_objs.Figure)). This representation decodes the keys and values from
unicode (if possible), removes information irrelevant to the figure
representation, and converts the JSON dictionary objects to plotly
`graph objects`.
"""
plotly_rest_url = tools.get_config_file()['plotly_domain']
if file_id is None: # assume we're using a url
url = file_owner_or_url
if url[:len(plotly_rest_url)] != plotly_rest_url:
raise exceptions.PlotlyError(
"Because you didn't supply a 'file_id' in the call, "
"we're assuming you're trying to snag a figure from a url. "
"You supplied the url, '{0}', we expected it to start with "
"'{1}'."
"\nRun help on this function for more information."
"".format(url, plotly_rest_url))
head = plotly_rest_url + "/~"
file_owner = url.replace(head, "").split('/')[0]
file_id = url.replace(head, "").split('/')[1]
else:
file_owner = file_owner_or_url
resource = "/apigetfile/{username}/{file_id}".format(username=file_owner,
file_id=file_id)
(username, api_key) = _validation_key_logic()
headers = {'plotly-username': username,
'plotly-apikey': api_key,
'plotly-version': version.__version__,
'plotly-platform': 'python'}
try:
test_if_int = int(file_id)
except ValueError:
raise exceptions.PlotlyError(
"The 'file_id' argument was not able to be converted into an "
"integer number. Make sure that the positional 'file_id' argument "
"is a number that can be converted into an integer or a string "
"that can be converted into an integer."
)
if int(file_id) < 0:
raise exceptions.PlotlyError(
"The 'file_id' argument must be a non-negative number."
)
response = requests.get(plotly_rest_url + resource, headers=headers)
if response.status_code == 200:
if six.PY3:
content = json.loads(response.content.decode('unicode_escape'))
else:
content = json.loads(response.content)
response_payload = content['payload']
figure = response_payload['figure']
utils.decode_unicode(figure)
if raw:
return figure
else:
return tools.get_valid_graph_obj(figure, obj_type='Figure')
else:
try:
content = json.loads(response.content)
raise exceptions.PlotlyError(content)
except:
raise exceptions.PlotlyError(
"There was an error retrieving this file")
@utils.template_doc(**tools.get_config_file())
class Stream:
""" Interface to Plotly's real-time graphing API.
Initialize a Stream object with a stream_id
found in {plotly_domain}/settings.
Real-time graphs are initialized with a call to `plot` that embeds
your unique `stream_id`s in each of the graph's traces. The `Stream`
interface plots data to these traces, as identified with the unique
stream_id, in real-time.
Every viewer of the graph sees the same data at the same time.
View examples and tutorials here:
http://nbviewer.ipython.org/github/plotly/python-user-guide/blob/master/s7_streaming/s7_streaming.ipynb
Stream example:
# Initialize a streaming graph
# by embedding stream_id's in the graph's traces
>>> stream_id = "your_stream_id" # See {plotly_domain}/settings
>>> py.plot(Data([Scatter(x=[],
y=[],
stream=dict(token=stream_id, maxpoints=100))])
# Stream data to the import trace
>>> stream = Stream(stream_id) # Initialize a stream object
>>> stream.open() # Open the stream
>>> stream.write(dict(x=1, y=1)) # Plot (1, 1) in your graph
"""
@utils.template_doc(**tools.get_config_file())
def __init__(self, stream_id):
""" Initialize a Stream object with your unique stream_id.
Find your stream_id at {plotly_domain}/settings.
For more help, see: `help(plotly.plotly.Stream)`
or see examples and tutorials here:
http://nbviewer.ipython.org/github/plotly/python-user-guide/blob/master/s7_streaming/s7_streaming.ipynb
"""
self.stream_id = stream_id
self.connected = False
def open(self):
"""Open streaming connection to plotly.
For more help, see: `help(plotly.plotly.Stream)`
or see examples and tutorials here:
http://nbviewer.ipython.org/github/plotly/python-user-guide/blob/master/s7_streaming/s7_streaming.ipynb
"""
streaming_url = tools.get_config_file()['plotly_streaming_domain']
self._stream = chunked_requests.Stream(streaming_url,
80,
{'Host': streaming_url,
'plotly-streamtoken': self.stream_id})
def write(self, trace, layout=None, validate=True,
reconnect_on=(200, '', 408)):
"""Write to an open stream.
Once you've instantiated a 'Stream' object with a 'stream_id',
you can 'write' to it in real time.
positional arguments:
trace - A valid plotly trace object (e.g., Scatter, Heatmap, etc.).
Not all keys in these are `stremable` run help(Obj) on the type
of trace your trying to stream, for each valid key, if the key
is streamable, it will say 'streamable = True'. Trace objects
must be dictionary-like.
keyword arguments:
layout (default=None) - A valid Layout object
Run help(plotly.graph_objs.Layout)
validate (default = True) - Validate this stream before sending?
This will catch local errors if set to True.
Some valid keys for trace dictionaries:
'x', 'y', 'text', 'z', 'marker', 'line'
Examples:
>>> write(dict(x=1, y=2)) # assumes 'scatter' type
>>> write(Bar(x=[1, 2, 3], y=[10, 20, 30]))
>>> write(Scatter(x=1, y=2, text='scatter text'))
>>> write(Scatter(x=1, y=3, marker=Marker(color='blue')))
>>> write(Heatmap(z=[[1, 2, 3], [4, 5, 6]]))
The connection to plotly's servers is checked before writing
and reconnected if disconnected and if the response status code
is in `reconnect_on`.
For more help, see: `help(plotly.plotly.Stream)`
or see examples and tutorials here:
http://nbviewer.ipython.org/github/plotly/python-user-guide/blob/master/s7_streaming/s7_streaming.ipynb
"""
stream_object = dict()
stream_object.update(trace)
if 'type' not in stream_object:
stream_object['type'] = 'scatter'
if validate:
try:
tools.validate(stream_object, stream_object['type'])
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError(
"Part of the data object with type, '{0}', is invalid. "
"This will default to 'scatter' if you do not supply a "
"'type'. If you do not want to validate your data objects "
"when streaming, you can set 'validate=False' in the call "
"to 'your_stream.write()'. Here's why the object is "
"invalid:\n\n{1}".format(stream_object['type'], err)
)
try:
tools.validate_stream(stream_object, stream_object['type'])
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError(
"Part of the data object with type, '{0}', cannot yet be "
"streamed into Plotly. If you do not want to validate your "
"data objects when streaming, you can set 'validate=False' "
"in the call to 'your_stream.write()'. Here's why the "
"object cannot be streamed:\n\n{1}"
"".format(stream_object['type'], err)
)
if layout is not None:
try:
tools.validate(layout, 'Layout')
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError(
"Your layout kwarg was invalid. "
"Here's why:\n\n{0}".format(err)
)
del stream_object['type']
if layout is not None:
stream_object.update(dict(layout=layout))
# TODO: allow string version of this?
jdata = json.dumps(stream_object, cls=utils._plotlyJSONEncoder)
jdata += "\n"
try:
self._stream.write(jdata, reconnect_on=reconnect_on)
except AttributeError:
raise exceptions.PlotlyError("Stream has not been opened yet, "
"cannot write to a closed connection. "
"Call `open()` on the stream to open the stream.")
def close(self):
""" Close the stream connection to plotly's streaming servers.
For more help, see: `help(plotly.plotly.Stream)`
or see examples and tutorials here:
http://nbviewer.ipython.org/github/plotly/python-user-guide/blob/master/s7_streaming/s7_streaming.ipynb
"""
try:
self._stream.close()
except AttributeError:
raise exceptions.PlotlyError("Stream has not been opened yet.")
class image:
''' Helper functions wrapped around plotly's static image generation api.
'''
@staticmethod
def get(figure_or_data, format='png', width=None, height=None):
""" Return a static image of the plot described by `figure`.
Valid formats: 'png', 'svg', 'jpeg', 'pdf'
"""
if isinstance(figure_or_data, dict):
figure = figure_or_data
elif isinstance(figure_or_data, list):
figure = {'data': figure_or_data}
if format not in ['png', 'svg', 'jpeg', 'pdf']:
raise exceptions.PlotlyError("Invalid format. "
"This version of your Plotly-Python "
"package currently only supports "
"png, svg, jpeg, and pdf. "
"Learn more about image exporting, "
"and the currently supported file "
"types here: "
"https://plot.ly/python/static-image-export/")
(username, api_key) = _validation_key_logic()
headers = {'plotly-username': username,
'plotly-apikey': api_key,
'plotly-version': version.__version__,
'plotly-platform': 'python'}
payload = {
'figure': figure,
'format': format
}
if width is not None:
payload['width'] = width
if height is not None:
payload['height'] = height
url = tools.get_config_file()['plotly_domain'] + "/apigenimage/"
res = requests.post(url,
data=json.dumps(payload,
cls=utils._plotlyJSONEncoder),
headers=headers)
headers = res.headers
if res.status_code == 200:
if ('content-type' in headers and
headers['content-type'] in ['image/png', 'image/jpeg',
'application/pdf',
'image/svg+xml']):
return res.content
elif ('content-type' in headers and
'json' in headers['content-type']):
return_data = json.loads(res.content)
return return_data['image']
else:
try:
if ('content-type' in headers and
'json' in headers['content-type']):
return_data = json.loads(res.content)
else:
return_data = {'error': res.content}
except:
raise exceptions.PlotlyError("The response "
"from plotly could "
"not be translated.")
raise exceptions.PlotlyError(return_data['error'])
@classmethod
def ishow(cls, figure_or_data, format='png', width=None, height=None):
""" Display a static image of the plot described by `figure`
in an IPython Notebook.
"""
if format == 'pdf':
raise exceptions.PlotlyError("Aw, snap! "
"It's not currently possible to embed a pdf into "
"an IPython notebook. You can save the pdf "
"with the `image.save_as` or you can "
"embed an png, jpeg, or svg.")
img = cls.get(figure_or_data, format, width, height)
from IPython.display import display, Image, SVG
if format == 'svg':
display(SVG(img))
else:
display(Image(img))
@classmethod
def save_as(cls, figure_or_data, filename, format=None, width=None, height=None):
""" Save a static image of the plot described by `figure` locally as `filename`.
Valid image formats are 'png', 'svg', 'jpeg', and 'pdf'.
The format is taken as the extension of the filename or as the supplied format.
"""
(base, ext) = os.path.splitext(filename)
if not ext and not format:
filename += '.png'
elif ext and not format:
format = ext[1:]
elif not ext and format:
filename += '.'+format
else:
filename += '.'+format
img = cls.get(figure_or_data, format, width, height)
f = open(filename, 'wb')
f.write(img)
f.close()
def _send_to_plotly(figure, **plot_options):
"""
"""
fig = tools._replace_newline(figure) # does not mutate figure
data = json.dumps(fig['data'] if 'data' in fig else [],
cls=utils._plotlyJSONEncoder)
file_credentials = tools.get_credentials_file()
if ('username' in _credentials) and ('api_key' in _credentials):
username, api_key = _credentials['username'], _credentials['api_key']
elif ('username' in file_credentials) and ('api_key' in file_credentials):
(username, api_key) = (file_credentials['username'],
file_credentials['api_key'])
else:
raise exceptions.PlotlyLocalCredentialsError()
kwargs = json.dumps(dict(filename=plot_options['filename'],
fileopt=plot_options['fileopt'],
world_readable=plot_options['world_readable'],
layout=fig['layout'] if 'layout' in fig
else {}),
cls=utils._plotlyJSONEncoder)
payload = dict(platform='python', # TODO: It'd be cool to expose the platform for RaspPi and others
version=version.__version__,
args=data,
un=username,
key=api_key,
origin='plot',
kwargs=kwargs)
url = tools.get_config_file()['plotly_domain'] + "/clientresp"
r = requests.post(url, data=payload)
r.raise_for_status()
r = json.loads(r.text)
if 'error' in r and r['error'] != '':
print((r['error']))
if 'warning' in r and r['warning'] != '':
warnings.warn(r['warning'])
if 'message' in r and r['message'] != '':
print((r['message']))
return r
def _validation_key_logic():
creds_on_file = tools.get_credentials_file()
if 'username' in _credentials:
username = _credentials['username']
elif 'username' in creds_on_file:
username = creds_on_file['username']
else:
username = None
if 'api_key' in _credentials:
api_key = _credentials['api_key']
elif 'api_key' in creds_on_file:
api_key = creds_on_file['api_key']
else:
api_key = None
if username is None or api_key is None:
raise exceptions.PlotlyLocalCredentialsError()
return (username, api_key)
|
|
from __future__ import division
import warnings
import keras.backend as K
from keras.models import Model
from keras.layers import Lambda, Input, Layer, Dense
from rl.core import Agent
from rl.policy import EpsGreedyQPolicy, GreedyQPolicy
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
class AbstractDQNAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, memory, gamma=.99, batch_size=32, nb_steps_warmup=1000,
train_interval=1, memory_interval=1, target_model_update=10000,
delta_range=None, delta_clip=np.inf, custom_model_objects={}, **kwargs):
super(AbstractDQNAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.gamma = gamma
self.batch_size = batch_size
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
self.target_model_update = target_model_update
self.delta_clip = delta_clip
self.custom_model_objects = custom_model_objects
# Related objects.
self.memory = memory
# State.
self.compiled = False
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def compute_batch_q_values(self, state_batch):
batch = self.process_state_batch(state_batch)
q_values = self.model.predict_on_batch(batch)
assert q_values.shape == (len(state_batch), self.nb_actions)
return q_values
def compute_q_values(self, state):
q_values = self.compute_batch_q_values([state]).flatten()
assert q_values.shape == (self.nb_actions,)
return q_values
def get_config(self):
return {
'nb_actions': self.nb_actions,
'gamma': self.gamma,
'batch_size': self.batch_size,
'nb_steps_warmup': self.nb_steps_warmup,
'train_interval': self.train_interval,
'memory_interval': self.memory_interval,
'target_model_update': self.target_model_update,
'delta_clip': self.delta_clip,
'memory': get_object_config(self.memory),
}
# An implementation of the DQN agent as described in Mnih (2013) and Mnih (2015).
# http://arxiv.org/pdf/1312.5602.pdf
# http://arxiv.org/abs/1509.06461
class DQNAgent(AbstractDQNAgent):
"""
# Arguments
model__: A Keras model.
policy__: A Keras-rl policy that are defined in [policy](https://github.com/keras-rl/keras-rl/blob/master/rl/policy.py).
test_policy__: A Keras-rl policy.
enable_double_dqn__: A boolean which enable target network as a second network proposed by van Hasselt et al. to decrease overfitting.
enable_dueling_dqn__: A boolean which enable dueling architecture proposed by Mnih et al.
dueling_type__: If `enable_dueling_dqn` is set to `True`, a type of dueling architecture must be chosen which calculate Q(s,a) from V(s) and A(s,a) differently. Note that `avg` is recommanded in the [paper](https://arxiv.org/abs/1511.06581).
`avg`: Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
`max`: Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
`naive`: Q(s,a;theta) = V(s;theta) + A(s,a;theta)
"""
def __init__(self, model, policy=None, test_policy=None, enable_double_dqn=False, enable_dueling_network=False,
dueling_type='avg', *args, **kwargs):
super(DQNAgent, self).__init__(*args, **kwargs)
# Validate (important) input.
if hasattr(model.output, '__len__') and len(model.output) > 1:
raise ValueError('Model "{}" has more than one output. DQN expects a model that has a single output.'.format(model))
if model.output._keras_shape != (None, self.nb_actions):
raise ValueError('Model output "{}" has invalid shape. DQN expects a model that has one dimension for each action, in this case {}.'.format(model.output, self.nb_actions))
# Parameters.
self.enable_double_dqn = enable_double_dqn
self.enable_dueling_network = enable_dueling_network
self.dueling_type = dueling_type
if self.enable_dueling_network:
# get the second last layer of the model, abandon the last layer
layer = model.layers[-2]
nb_action = model.output._keras_shape[-1]
# layer y has a shape (nb_action+1,)
# y[:,0] represents V(s;theta)
# y[:,1:] represents A(s,a;theta)
y = Dense(nb_action + 1, activation='linear')(layer.output)
# caculate the Q(s,a;theta)
# dueling_type == 'avg'
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
# dueling_type == 'max'
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
# dueling_type == 'naive'
# Q(s,a;theta) = V(s;theta) + A(s,a;theta)
if self.dueling_type == 'avg':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], axis=1, keepdims=True), output_shape=(nb_action,))(y)
elif self.dueling_type == 'max':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], axis=1, keepdims=True), output_shape=(nb_action,))(y)
elif self.dueling_type == 'naive':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_action,))(y)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
model = Model(inputs=model.input, outputs=outputlayer)
# Related objects.
self.model = model
if policy is None:
policy = EpsGreedyQPolicy()
if test_policy is None:
test_policy = GreedyQPolicy()
self.policy = policy
self.test_policy = test_policy
# State.
self.reset_states()
def get_config(self):
config = super(DQNAgent, self).get_config()
config['enable_double_dqn'] = self.enable_double_dqn
config['dueling_type'] = self.dueling_type
config['enable_dueling_network'] = self.enable_dueling_network
config['model'] = get_object_config(self.model)
config['policy'] = get_object_config(self.policy)
config['test_policy'] = get_object_config(self.test_policy)
if self.compiled:
config['target_model'] = get_object_config(self.target_model)
return config
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# We never train the target model, hence we can set the optimizer and loss arbitrarily.
self.target_model = clone_model(self.model, self.custom_model_objects)
self.target_model.compile(optimizer='sgd', loss='mse')
self.model.compile(optimizer='sgd', loss='mse')
# Compile model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_model, self.model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_masked_error(args):
y_true, y_pred, mask = args
loss = huber_loss(y_true, y_pred, self.delta_clip)
loss *= mask # apply element-wise mask
return K.sum(loss, axis=-1)
# Create trainable model. The problem is that we need to mask the output since we only
# ever want to update the Q values for a certain action. The way we achieve this is by
# using a custom Lambda layer that computes the loss. This gives us the necessary flexibility
# to mask out certain parameters by passing in multiple inputs to the Lambda layer.
y_pred = self.model.output
y_true = Input(name='y_true', shape=(self.nb_actions,))
mask = Input(name='mask', shape=(self.nb_actions,))
loss_out = Lambda(clipped_masked_error, output_shape=(1,), name='loss')([y_true, y_pred, mask])
ins = [self.model.input] if type(self.model.input) is not list else self.model.input
trainable_model = Model(inputs=ins + [y_true, mask], outputs=[loss_out, y_pred])
assert len(trainable_model.output_names) == 2
combined_metrics = {trainable_model.output_names[1]: metrics}
losses = [
lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer
lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics
]
trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics)
self.trainable_model = trainable_model
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.model.reset_states()
self.target_model.reset_states()
def update_target_model_hard(self):
self.target_model.set_weights(self.model.get_weights())
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
q_values = self.compute_q_values(state)
if self.training:
action = self.policy.select_action(q_values=q_values)
else:
action = self.test_policy.select_action(q_values=q_values)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert len(action_batch) == len(reward_batch)
# Compute Q values for mini-batch update.
if self.enable_double_dqn:
# According to the paper "Deep Reinforcement Learning with Double Q-learning"
# (van Hasselt et al., 2015), in Double DQN, the online network predicts the actions
# while the target network is used to estimate the Q value.
q_values = self.model.predict_on_batch(state1_batch)
assert q_values.shape == (self.batch_size, self.nb_actions)
actions = np.argmax(q_values, axis=1)
assert actions.shape == (self.batch_size,)
# Now, estimate Q values using the target network but select the values with the
# highest Q value wrt to the online model (as computed above).
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = target_q_values[range(self.batch_size), actions]
else:
# Compute the q_values given state1, and extract the maximum for each sample in the batch.
# We perform this prediction on the target_model instead of the model for reasons
# outlined in Mnih (2015). In short: it makes the algorithm more stable.
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = np.max(target_q_values, axis=1).flatten()
assert q_batch.shape == (self.batch_size,)
targets = np.zeros((self.batch_size, self.nb_actions))
dummy_targets = np.zeros((self.batch_size,))
masks = np.zeros((self.batch_size, self.nb_actions))
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target targets accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)):
target[action] = R # update action with estimated accumulated reward
dummy_targets[idx] = R
mask[action] = 1. # enable loss for this specific action
targets = np.array(targets).astype('float32')
masks = np.array(masks).astype('float32')
# Finally, perform a single update on the entire batch. We use a dummy target since
# the actual loss is computed in a Lambda layer that needs more complex input. However,
# it is still useful to know the actual target to compute metrics properly.
ins = [state0_batch] if type(self.model.input) is not list else state0_batch
metrics = self.trainable_model.train_on_batch(ins + [targets, masks], [dummy_targets, targets])
metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses
metrics += self.policy.metrics
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def layers(self):
return self.model.layers[:]
@property
def metrics_names(self):
# Throw away individual losses and replace output name since this is hidden from the user.
assert len(self.trainable_model.output_names) == 2
dummy_output_name = self.trainable_model.output_names[1]
model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)]
model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics]
names = model_metrics + self.policy.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
@property
def policy(self):
return self.__policy
@policy.setter
def policy(self, policy):
self.__policy = policy
self.__policy._set_agent(self)
@property
def test_policy(self):
return self.__test_policy
@test_policy.setter
def test_policy(self, policy):
self.__test_policy = policy
self.__test_policy._set_agent(self)
class NAFLayer(Layer):
"""Write me
"""
def __init__(self, nb_actions, mode='full', **kwargs):
if mode not in ('full', 'diag'):
raise RuntimeError('Unknown mode "{}" in NAFLayer.'.format(self.mode))
self.nb_actions = nb_actions
self.mode = mode
super(NAFLayer, self).__init__(**kwargs)
def call(self, x, mask=None):
# TODO: validate input shape
assert (len(x) == 3)
L_flat = x[0]
mu = x[1]
a = x[2]
if self.mode == 'full':
# Create L and L^T matrix, which we use to construct the positive-definite matrix P.
L = None
LT = None
if K.backend() == 'theano':
import theano.tensor as T
import theano
def fn(x, L_acc, LT_acc):
x_ = K.zeros((self.nb_actions, self.nb_actions))
x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)], x)
diag = K.exp(T.diag(x_)) + K.epsilon()
x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], diag)
return x_, x_.T
outputs_info = [
K.zeros((self.nb_actions, self.nb_actions)),
K.zeros((self.nb_actions, self.nb_actions)),
]
results, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
L, LT = results
elif K.backend() == 'tensorflow':
import tensorflow as tf
# Number of elements in a triangular matrix.
nb_elems = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
# Create mask for the diagonal elements in L_flat. This is used to exponentiate
# only the diagonal elements, which is done before gathering.
diag_indeces = [0]
for row in range(1, self.nb_actions):
diag_indeces.append(diag_indeces[-1] + (row + 1))
diag_mask = np.zeros(1 + nb_elems) # +1 for the leading zero
diag_mask[np.array(diag_indeces) + 1] = 1
diag_mask = K.variable(diag_mask)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = tf.shape(L_flat)[0]
zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old TF behavior.
L_flat = tf.concat(1, [zeros, L_flat])
except (TypeError, ValueError):
# New TF behavior
L_flat = tf.concat([zeros, L_flat], 1)
# Create mask that can be used to gather elements from L_flat and put them
# into a lower triangular matrix.
tril_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
tril_mask[np.tril_indices(self.nb_actions)] = range(1, nb_elems + 1)
# Finally, process each element of the batch.
init = [
K.zeros((self.nb_actions, self.nb_actions)),
K.zeros((self.nb_actions, self.nb_actions)),
]
def fn(a, x):
# Exponentiate everything. This is much easier than only exponentiating
# the diagonal elements, and, usually, the action space is relatively low.
x_ = K.exp(x) + K.epsilon()
# Only keep the diagonal elements.
x_ *= diag_mask
# Add the original, non-diagonal elements.
x_ += x * (1. - diag_mask)
# Finally, gather everything into a lower triangular matrix.
L_ = tf.gather(x_, tril_mask)
return [L_, tf.transpose(L_)]
tmp = tf.scan(fn, L_flat, initializer=init)
if isinstance(tmp, (list, tuple)):
# TensorFlow 0.10 now returns a tuple of tensors.
L, LT = tmp
else:
# Old TensorFlow < 0.10 returns a shared tensor.
L = tmp[:, 0, :, :]
LT = tmp[:, 1, :, :]
else:
raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
assert L is not None
assert LT is not None
P = K.batch_dot(L, LT)
elif self.mode == 'diag':
if K.backend() == 'theano':
import theano.tensor as T
import theano
def fn(x, P_acc):
x_ = K.zeros((self.nb_actions, self.nb_actions))
x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], x)
return x_
outputs_info = [
K.zeros((self.nb_actions, self.nb_actions)),
]
P, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
elif K.backend() == 'tensorflow':
import tensorflow as tf
# Create mask that can be used to gather elements from L_flat and put them
# into a diagonal matrix.
diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = tf.shape(L_flat)[0]
zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old TF behavior.
L_flat = tf.concat(1, [zeros, L_flat])
except (TypeError, ValueError):
# New TF behavior
L_flat = tf.concat([zeros, L_flat], 1)
# Finally, process each element of the batch.
def fn(a, x):
x_ = tf.gather(x, diag_mask)
return x_
P = tf.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions)))
else:
raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
assert P is not None
assert K.ndim(P) == 3
# Combine a, mu and P into a scalar (over the batches). What we compute here is
# -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately
# TensorFlow handles vector * P slightly suboptimal, hence we convert the vectors to
# 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All
# operations happen over the batch size, which is dimension 0.
prod = K.batch_dot(K.expand_dims(a - mu, 1), P)
prod = K.batch_dot(prod, K.expand_dims(a - mu, -1))
A = -.5 * K.batch_flatten(prod)
assert K.ndim(A) == 2
return A
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
if len(input_shape) != 3:
raise RuntimeError("Expects 3 inputs: L, mu, a")
for i, shape in enumerate(input_shape):
if len(shape) != 2:
raise RuntimeError("Input {} has {} dimensions but should have 2".format(i, len(shape)))
assert self.mode in ('full','diag')
if self.mode == 'full':
expected_elements = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
elif self.mode == 'diag':
expected_elements = self.nb_actions
else:
expected_elements = None
assert expected_elements is not None
if input_shape[0][1] != expected_elements:
raise RuntimeError("Input 0 (L) should have {} elements but has {}".format(input_shape[0][1]))
if input_shape[1][1] != self.nb_actions:
raise RuntimeError(
"Input 1 (mu) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
if input_shape[2][1] != self.nb_actions:
raise RuntimeError(
"Input 2 (action) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
return input_shape[0][0], 1
class NAFAgent(AbstractDQNAgent):
"""Write me
"""
def __init__(self, V_model, L_model, mu_model, random_process=None,
covariance_mode='full', *args, **kwargs):
super(NAFAgent, self).__init__(*args, **kwargs)
# TODO: Validate (important) input.
# Parameters.
self.random_process = random_process
self.covariance_mode = covariance_mode
# Related objects.
self.V_model = V_model
self.L_model = L_model
self.mu_model = mu_model
# State.
self.reset_states()
def update_target_model_hard(self):
self.target_V_model.set_weights(self.V_model.get_weights())
def load_weights(self, filepath):
self.combined_model.load_weights(filepath) # updates V, L and mu model since the weights are shared
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.combined_model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.combined_model.reset_states()
self.target_V_model.reset_states()
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# Create target V model. We don't need targets for mu or L.
self.target_V_model = clone_model(self.V_model, self.custom_model_objects)
self.target_V_model.compile(optimizer='sgd', loss='mse')
# Build combined model.
a_in = Input(shape=(self.nb_actions,), name='action_input')
if type(self.V_model.input) is list:
observation_shapes = [i._keras_shape[1:] for i in self.V_model.input]
else:
observation_shapes = [self.V_model.input._keras_shape[1:]]
os_in = [Input(shape=shape, name='observation_input_{}'.format(idx)) for idx, shape in enumerate(observation_shapes)]
L_out = self.L_model([a_in] + os_in)
V_out = self.V_model(os_in)
mu_out = self.mu_model(os_in)
A_out = NAFLayer(self.nb_actions, mode=self.covariance_mode)([L_out, mu_out, a_in])
combined_out = Lambda(lambda x: x[0]+x[1], output_shape=lambda x: x[0])([A_out, V_out])
combined = Model(inputs=[a_in] + os_in, outputs=[combined_out])
# Compile combined model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_V_model, self.V_model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_error(y_true, y_pred):
return K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
combined.compile(loss=clipped_error, optimizer=optimizer, metrics=metrics)
self.combined_model = combined
self.compiled = True
def select_action(self, state):
batch = self.process_state_batch([state])
action = self.mu_model.predict_on_batch(batch).flatten()
assert action.shape == (self.nb_actions,)
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action.shape
action += noise
return action
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Compute Q values for mini-batch update.
q_batch = self.target_V_model.predict_on_batch(state1_batch).flatten()
assert q_batch.shape == (self.batch_size,)
# Compute discounted reward.
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
assert Rs.shape == (self.batch_size,)
# Finally, perform a single update on the entire batch.
if len(self.combined_model.input) == 2:
metrics = self.combined_model.train_on_batch([action_batch, state0_batch], Rs)
else:
metrics = self.combined_model.train_on_batch([action_batch] + state0_batch, Rs)
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def layers(self):
return self.combined_model.layers[:]
def get_config(self):
config = super(NAFAgent, self).get_config()
config['V_model'] = get_object_config(self.V_model)
config['mu_model'] = get_object_config(self.mu_model)
config['L_model'] = get_object_config(self.L_model)
if self.compiled:
config['target_V_model'] = get_object_config(self.target_V_model)
return config
@property
def metrics_names(self):
names = self.combined_model.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
# Aliases
ContinuousDQNAgent = NAFAgent
|
|
import json
import datetime
import urllib
import util
import common
import pagination
import archive
@route(common.PREFIX + '/live_channels_menu')
def GetLiveChannelsMenu():
oc = ObjectContainer(title2=unicode(L('Live')))
oc.add(DirectoryObject(key=Callback(GetLiveChannels, title=L('All Channels')), title=unicode(L('All Channels'))))
oc.add(DirectoryObject(key=Callback(GetLiveChannels, title=L('Favorite'), favorite_only=True),
title=unicode(L('Favorite'))))
result = service.get_live_categories()
for genre in result['data']:
name = genre['name']
category = int(genre['id'])
oc.add(DirectoryObject(
key=Callback(GetLiveChannels, title=name, category=category),
title=unicode(name)
))
return oc
@route(common.PREFIX + '/live_channels')
def GetLiveChannels(title, favorite_only=False, category=0, page=1, **params):
page = int(page)
oc = ObjectContainer(title2=unicode(title))
response = service.get_live_channels(favorite_only=favorite_only, category=category)
for index, media in enumerate(response['data']):
if index >= (page - 1) * util.get_elements_per_page() and index < page * util.get_elements_per_page():
id = media['id']
name = media['name']
thumb = media['icon']
files = media['files']
oc.add(DirectoryObject(
key=Callback(GetLiveChannel, name=name, channel_id=id, thumb=thumb, files=json.dumps(files), **params),
title=unicode(name),
thumb=Resource.ContentsOfURLWithFallback(url=thumb)
))
add_pagination_to_response(response, page)
pagination.append_controls(oc, response['data'], callback=GetLiveChannels, title=title, favorite_only=favorite_only,
page=page, **params)
return oc
@route(common.PREFIX + '/live_channel')
def GetLiveChannel(name, channel_id, thumb, files, container=False, **params):
oc = ObjectContainer(title2=unicode(name))
oc.add(MetadataObjectForURL(name, channel_id, thumb, files))
if not container:
oc.add(DirectoryObject(key=Callback(GetSchedule, channel_id=channel_id), title=unicode(L('Schedule'))))
append_controls(oc, id=channel_id, name=name, thumb=thumb)
return oc
@route(common.PREFIX + '/schedule')
def GetSchedule(channel_id):
oc = ObjectContainer(title2=unicode(L('Schedule')))
default_time = get_moscow_time()
today = default_time.date()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
yesterday_result = service.get_live_schedule(live_channel_id=channel_id, date=yesterday)
today_result = service.get_live_schedule(live_channel_id=channel_id, date=today)
tomorrow_result = service.get_live_schedule(live_channel_id=channel_id, date=tomorrow)
add_schedule(oc, channel_id, default_time, yesterday_result['data'])
add_schedule(oc, channel_id, default_time, today_result['data'])
add_schedule(oc, channel_id, default_time, tomorrow_result['data'])
return oc
def add_schedule(oc, channel_id, default_time, list):
channels = service.get_live_channels()['data']
channel = find_channel(int(channel_id), channels)
offset = service.get_offset(util.get_time_shift())
time_delta = datetime.timedelta(hours=offset)
files = channel['files']
for media in list:
start_time = get_time(media['start_time'])
finish_time = get_time(media['finish_time'])
current_title = in_time_range(default_time - time_delta, start_time, finish_time)
if media['media_id']:
if media['rating']:
rating = media['rating']
else:
rating = 'unknown'
key = Callback(archive.HandleChild,
id = media['media_id'],
name = media['name'],
thumb = 'thumb',
rating_key = rating,
description = media['description'],
duration = 0,
year = 0,
on_air = media['start_time'],
index = 0,
files = json.dumps(files)
)
title = get_schedule_title(media['name'], start_time, finish_time, current_title=current_title)
oc.add(DirectoryObject(key=key, title=unicode(title)))
else:
select_key = Callback(GetSchedule, channel_id=channel_id)
title = get_schedule_title(media['name'], start_time, finish_time, current_title=current_title, available=False)
oc.add(DirectoryObject(key=select_key, title=unicode(title)))
def in_time_range(actual_time, start_time, finish_time):
in_range = False
if actual_time.day == start_time.day:
if actual_time.hour == finish_time.hour:
if actual_time.minute <= finish_time.minute:
in_range = True
elif actual_time.hour == start_time.hour:
if actual_time.minute >= start_time.minute:
return True
return in_range
def get_schedule_title(name, start_time, finish_time, current_title=False, available=True):
if current_title:
left_sep = "---> "
right_sep = " <---"
else:
left_sep = ""
right_sep = ""
if not available:
left_sep = left_sep + " ***"
right_sep = right_sep + " ***"
prefix = str(start_time)[11:16] + " - " + str(finish_time)[11:16] + " : "
return prefix + left_sep + name + right_sep
def get_time(value):
return datetime.datetime.strptime(value.replace('T', ' '), '%Y-%m-%d %H:%M:%S')
def get_moscow_time():
utc_datetime = datetime.datetime.utcnow()
time = datetime.datetime.strptime(str(utc_datetime), '%Y-%m-%d %H:%M:%S.%f')
return time + datetime.timedelta(hours=3)
def append_controls(oc, **params):
favorite_channels = service.get_live_channels(favorite_only=True)['data']
favorite_channel = find_channel(int(params['id']), favorite_channels)
if favorite_channel:
oc.add(DirectoryObject(
key=Callback(HandleRemoveFavoriteChannel, type=type, **params),
title=unicode(L('Remove Favorite')),
thumb=R(common.REMOVE_ICON)
))
else:
oc.add(DirectoryObject(
key=Callback(HandleAddFavoriteChannel, type=type, **params),
title=unicode(L('Add Favorite')),
thumb=R(common.ADD_ICON)
))
def find_channel(id, favorite_channels):
found = None
for media in favorite_channels:
if id == media['id']:
found = media
break
return found
@route(common.PREFIX + '/add_favorite_channel')
def HandleAddFavoriteChannel(**params):
service.add_favorite_channel(params['id'])
return ObjectContainer(header=unicode(L(params['name'])), message=unicode(L('Favorite Added')))
@route(common.PREFIX + '/remove_favorite_channel')
def HandleRemoveFavoriteChannel(**params):
service.remove_favorite_channel(params['id'])
return ObjectContainer(header=unicode(L(params['name'])), message=unicode(L('Favorite Removed')))
def add_pagination_to_response(response, page):
pages = len(response['data']) / util.get_elements_per_page()
response['data'] = {'pagination': {
'page': page,
'pages': pages,
'has_next': page < pages,
'has_previous': page > 1
}}
def MetadataObjectForURL(name, channel_id, thumb, files):
video = MovieObject(
rating_key='rating_key',
title=unicode(name),
thumb=thumb,
art=thumb
)
video.key = Callback(GetLiveChannel, name=name, channel_id=channel_id, thumb=thumb, files=files, container=True)
offset = service.get_offset(util.get_time_shift())
format ='mp4'
quality_level = util.get_quality_level()
# Log(Client.Platform in util.RAW_HLS_CLIENTS)
# Log(Client.Product) # Plex Web
# Log(Client.Platform) # Safari
# if Client.Platform == 'Chrome':
# quality_level = util.get_quality_level()
# else:
# quality_level = None
files = json.loads(urllib.unquote_plus(files))
bitrates = service.bitrates(files, accepted_format=format, quality_level=quality_level)
video.items.extend(MediaObjectsForURL(bitrates, channel_id, offset, format))
return video
def MediaObjectsForURL(bitrates, channel_id, offset, format):
items = []
media_objects = []
for bitrate in sorted(bitrates[format], reverse=True):
#video_resolution = service.bitrate_to_resolution(bitrate)[0]
play_callback = Callback(PlayLive, channel_id=channel_id, bitrate=bitrate, format=format, offset=offset)
config = {
"video_codec" : VideoCodec.H264,
"protocol": Protocol.HLS,
"container": Container.MPEGTS,
"video_resolution": bitrate
}
media_object = builder.build_media_object(play_callback, config)
media_objects.append(media_object)
items.extend(media_objects)
return items
@indirect
@route(common.PREFIX + '/play_live')
def PlayLive(channel_id, bitrate, format, offset):
response = service.get_url(None, channel_id=channel_id, bitrate=bitrate, format=format, live=True,
offset=offset, other_server=util.other_server())
url = response['url']
if not url:
util.no_contents()
else:
return IndirectResponse(MovieObject, key=HTTPLiveStreamURL(url))
@route(common.PREFIX + '/Playlist')
def Playlist(url):
return service.get_play_list(url)
|
|
"""Support gathering system information of hosts which are running glances."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_USERNAME, CONF_PASSWORD, CONF_SSL,
CONF_VERIFY_SSL, CONF_RESOURCES, STATE_UNAVAILABLE, TEMP_CELSIUS)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_VERSION = 'version'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Glances'
DEFAULT_PORT = '61208'
DEFAULT_VERSION = 2
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
SENSOR_TYPES = {
'disk_use_percent': ['Disk used', '%', 'mdi:harddisk'],
'disk_use': ['Disk used', 'GiB', 'mdi:harddisk'],
'disk_free': ['Disk free', 'GiB', 'mdi:harddisk'],
'memory_use_percent': ['RAM used', '%', 'mdi:memory'],
'memory_use': ['RAM used', 'MiB', 'mdi:memory'],
'memory_free': ['RAM free', 'MiB', 'mdi:memory'],
'swap_use_percent': ['Swap used', '%', 'mdi:memory'],
'swap_use': ['Swap used', 'GiB', 'mdi:memory'],
'swap_free': ['Swap free', 'GiB', 'mdi:memory'],
'processor_load': ['CPU load', '15 min', 'mdi:memory'],
'process_running': ['Running', 'Count', 'mdi:memory'],
'process_total': ['Total', 'Count', 'mdi:memory'],
'process_thread': ['Thread', 'Count', 'mdi:memory'],
'process_sleeping': ['Sleeping', 'Count', 'mdi:memory'],
'cpu_use_percent': ['CPU used', '%', 'mdi:memory'],
'cpu_temp': ['CPU Temp', TEMP_CELSIUS, 'mdi:thermometer'],
'docker_active': ['Containers active', '', 'mdi:docker'],
'docker_cpu_use': ['Containers CPU used', '%', 'mdi:docker'],
'docker_memory_use': ['Containers RAM used', 'MiB', 'mdi:docker'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_RESOURCES, default=['disk_use']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.In([2, 3]),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Glances sensors."""
from glances_api import Glances
name = config[CONF_NAME]
host = config[CONF_HOST]
port = config[CONF_PORT]
version = config[CONF_VERSION]
var_conf = config[CONF_RESOURCES]
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
ssl = config[CONF_SSL]
verify_ssl = config[CONF_VERIFY_SSL]
session = async_get_clientsession(hass, verify_ssl)
glances = GlancesData(
Glances(hass.loop, session, host=host, port=port, version=version,
username=username, password=password, ssl=ssl))
await glances.async_update()
if glances.api.data is None:
raise PlatformNotReady
dev = []
for resource in var_conf:
dev.append(GlancesSensor(glances, name, resource))
async_add_entities(dev, True)
class GlancesSensor(Entity):
"""Implementation of a Glances sensor."""
def __init__(self, glances, name, sensor_type):
"""Initialize the sensor."""
self.glances = glances
self._name = name
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._name, SENSOR_TYPES[self.type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self.glances.available
@property
def state(self):
"""Return the state of the resources."""
return self._state
async def async_update(self):
"""Get the latest data from REST API."""
await self.glances.async_update()
value = self.glances.api.data
if value is not None:
if self.type == 'disk_use_percent':
self._state = value['fs'][0]['percent']
elif self.type == 'disk_use':
self._state = round(value['fs'][0]['used'] / 1024**3, 1)
elif self.type == 'disk_free':
try:
self._state = round(value['fs'][0]['free'] / 1024**3, 1)
except KeyError:
self._state = round((value['fs'][0]['size'] -
value['fs'][0]['used']) / 1024**3, 1)
elif self.type == 'memory_use_percent':
self._state = value['mem']['percent']
elif self.type == 'memory_use':
self._state = round(value['mem']['used'] / 1024**2, 1)
elif self.type == 'memory_free':
self._state = round(value['mem']['free'] / 1024**2, 1)
elif self.type == 'swap_use_percent':
self._state = value['memswap']['percent']
elif self.type == 'swap_use':
self._state = round(value['memswap']['used'] / 1024**3, 1)
elif self.type == 'swap_free':
self._state = round(value['memswap']['free'] / 1024**3, 1)
elif self.type == 'processor_load':
# Windows systems don't provide load details
try:
self._state = value['load']['min15']
except KeyError:
self._state = value['cpu']['total']
elif self.type == 'process_running':
self._state = value['processcount']['running']
elif self.type == 'process_total':
self._state = value['processcount']['total']
elif self.type == 'process_thread':
self._state = value['processcount']['thread']
elif self.type == 'process_sleeping':
self._state = value['processcount']['sleeping']
elif self.type == 'cpu_use_percent':
self._state = value['quicklook']['cpu']
elif self.type == 'cpu_temp':
for sensor in value['sensors']:
if sensor['label'] in ['CPU', "Package id 0",
"Physical id 0", "cpu-thermal 1",
"exynos-therm 1", "soc_thermal 1"]:
self._state = sensor['value']
elif self.type == 'docker_active':
count = 0
try:
for container in value['docker']['containers']:
if container['Status'] == 'running' or \
'Up' in container['Status']:
count += 1
self._state = count
except KeyError:
self._state = count
elif self.type == 'docker_cpu_use':
cpu_use = 0.0
try:
for container in value['docker']['containers']:
if container['Status'] == 'running' or \
'Up' in container['Status']:
cpu_use += container['cpu']['total']
self._state = round(cpu_use, 1)
except KeyError:
self._state = STATE_UNAVAILABLE
elif self.type == 'docker_memory_use':
mem_use = 0.0
try:
for container in value['docker']['containers']:
if container['Status'] == 'running' or \
'Up' in container['Status']:
mem_use += container['memory']['usage']
self._state = round(mem_use / 1024**2, 1)
except KeyError:
self._state = STATE_UNAVAILABLE
class GlancesData:
"""The class for handling the data retrieval."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
self.available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from the Glances REST API."""
from glances_api.exceptions import GlancesApiError
try:
await self.api.get_data()
self.available = True
except GlancesApiError:
_LOGGER.error("Unable to fetch data from Glances")
self.available = False
|
|
import StringIO
def addNodes():
addNode("ArrayType", "Expr", "len", "Expr", "elt", "Expr")
addNode(
"AssignStmt",
"Stmt",
"lhs",
"[]Expr",
"rhs",
"[]Expr",
"define",
"bool")
addNode("BadDecl", "Decl")
addNode("BadExpr", "Expr")
addNode("BadStmt", "Stmt")
addNode("BasicLit", "Expr", "value", "Token")
addNode("BinaryExpr", "Expr", "x", "Expr", "y", "Expr", "op", "TokenType")
addNode("BlockStmt", "Stmt", "list", "[]Stmt")
addNode("Ident", "Expr", "name", "Token")
addNode("BranchStmt", "Stmt", "label", "Ident", "tok", "TokenType")
addNode(
"CallExpr",
"Expr",
"fun",
"Expr",
"args",
"[]Expr",
"ellipsis",
"bool")
addNode("CaseClause", "Stmt", "list", "[]Expr", "body", "[]Stmt")
addNode("ChanType", "Expr", "dir", "ChanDir", "value", "Expr")
addNode("CommClause", "Stmt", "comm", "Stmt", "body", "[]Stmt")
addNode("CompositeLit", "Expr", "type", "Expr", "elts", "[]Expr")
addNode("DeclStmt", "Stmt", "decl", "Decl")
addNode("DeferStmt", "Stmt", "call", "CallExpr")
addNode("Ellipsis", "Expr", "elt", "Expr")
addNode("EmptyStmt", "Stmt")
addNode("ExprStmt", "Stmt", "x", "Expr")
addNode(
"Field",
"Node",
"names",
"[]Ident",
"type",
"Expr",
"tag",
"BasicLit")
addNode("FieldList", "Node", "list", "[]Field")
addNode(
"ForStmt",
"Stmt",
"init",
"Stmt",
"cond",
"Expr",
"post",
"Stmt",
"body",
"BlockStmt")
addNode("FuncType", "Expr", "params", "FieldList", "results", "FieldList")
addNode(
"FuncDecl",
"Decl",
"recv",
"FieldList",
"name",
"Ident",
"type",
"FuncType",
"body",
"BlockStmt")
addNode("FuncLit", "Expr", "type", "FuncType", "body", "BlockStmt")
addNode("GenDecl", "Decl", "tok", "TokenType", "specs", "[]Spec")
addNode("GoStmt", "Stmt", "call", "CallExpr")
addNode(
"IfStmt",
"Stmt",
"init",
"Stmt",
"cond",
"Expr",
"body",
"BlockStmt",
"els",
"Stmt")
addNode("ImportSpec", "Spec", "name", "Ident", "path", "BasicLit")
addNode("IncDecStmt", "Stmt", "x", "Expr", "tok", "TokenType")
addNode("IndexExpr", "Expr", "x", "Expr", "index", "Expr")
addNode("InterfaceType", "Expr", "methods", "FieldList")
addNode("KeyValueExpr", "Expr", "key", "Expr", "value", "Expr")
addNode("LabeledStmt", "Stmt", "label", "Ident", "stmt", "Stmt")
addNode("MapType", "Expr", "key", "Expr", "value", "Expr")
addNode("ParenExpr", "Expr", "x", "Expr")
addNode(
"RangeStmt",
"Stmt",
"key",
"Expr",
"value",
"Expr",
"define",
"bool",
"x",
"Expr",
"body",
"BlockStmt")
addNode("ReturnStmt", "Stmt", "results", "[]Expr")
addNode("SelectStmt", "Stmt", "body", "BlockStmt")
addNode("SelectorExpr", "Expr", "x", "Expr", "sel", "Ident")
addNode("SendStmt", "Stmt", "chan", "Expr", "value", "Expr")
addNode(
"SliceExpr",
"Expr",
"x",
"Expr",
"low",
"Expr",
"high",
"Expr",
"max",
"Expr",
"slice3",
"bool")
addNode("StarExpr", "Expr", "x", "Expr")
addNode("StructType", "Expr", "fields", "FieldList")
addNode(
"SwitchStmt",
"Stmt",
"init",
"Stmt",
"tag",
"Expr",
"body",
"BlockStmt")
addNode("TypeAssertExpr", "Expr", "x", "Expr", "type", "Expr")
addNode("TypeSpec", "Spec", "name", "Ident", "type", "Expr")
addNode(
"TypeSwitchStmt",
"Stmt",
"init",
"Stmt",
"assign",
"Stmt",
"body",
"BlockStmt")
addNode("UnaryExpr", "Expr", "op", "TokenType", "x", "Expr")
addNode(
"ValueSpec",
"Spec",
"names",
"[]Ident",
"type",
"Expr",
"values",
"[]Expr")
addParent("Decl", "Node")
addParent("Expr", "Node")
addParent("Spec", "Node")
addParent("Stmt", "Node")
class Member(object):
def __init__(self, name, typename):
self.title = name.title()
self.sname = name
self.mname = 'm_' + name
self.is_list = typename.startswith("[]")
self.is_value = isValueType(typename)
if self.is_value:
self.argtype = typename
self.mtype = typename
elif self.is_list:
self.argtype = 'GoAST' + typename[2:]
self.mtype = 'std::vector<std::unique_ptr<%s> >' % self.argtype
else:
self.argtype = 'GoAST' + typename
self.mtype = 'std::unique_ptr<%s>' % self.argtype
self.mname = self.mname + '_up'
kinds = {}
parentClasses = StringIO.StringIO()
childClasses = StringIO.StringIO()
walker = StringIO.StringIO()
def startClass(name, parent, out):
out.write("""
class GoAST%s : public GoAST%s
{
public:
""" % (name, parent))
def endClass(name, out):
out.write("""
%(name)s(const %(name)s &) = delete;
const %(name)s &operator=(const %(name)s &) = delete;
};
""" % {'name': 'GoAST' + name})
def addNode(name, parent, *children):
startClass(name, parent, childClasses)
l = kinds.setdefault(parent, [])
l.append(name)
children = createMembers(name, children)
addConstructor(name, parent, children)
childClasses.write("""
const char *
GetKindName() const override
{
return "%(name)s";
}
static bool
classof(const GoASTNode *n)
{
return n->GetKind() == e%(name)s;
}
""" % {'name': name})
addChildren(name, children)
endClass(name, childClasses)
def isValueType(typename):
if typename[0].islower():
return True
if typename[0].isupper():
return typename.startswith('Token') or typename == 'ChanDir'
return False
def createMembers(name, children):
l = len(children)
if (l % 2) != 0:
raise Exception("Invalid children for %s: %s" % (name, children))
return [Member(children[i], children[i + 1]) for i in xrange(0, l, 2)]
def addConstructor(name, parent, children):
for c in children:
if c.is_list:
children = [x for x in children if x.is_value]
break
childClasses.write(' ')
if len(children) == 1:
childClasses.write('explicit ')
childClasses.write('GoAST%s(' % name)
for i in xrange(len(children)):
if i > 0:
childClasses.write(', ')
c = children[i]
if c.is_value:
childClasses.write(c.argtype)
childClasses.write(' ')
else:
childClasses.write('%s *' % c.argtype)
childClasses.write(c.sname)
childClasses.write(') : GoAST%s(e%s)' % (parent, name))
for c in children:
childClasses.write(', ')
childClasses.write('%(mname)s(%(sname)s)' % c.__dict__)
childClasses.write(""" {}
~GoAST%s() override = default;
""" % name)
def addChildren(name, children):
if len(children) == 0:
return
walker.write("""
case e%(n)s:
{
GoAST%(n)s *n = llvm::cast<GoAST%(n)s>(this);
(void)n;""" % {'n': name})
for c in children:
if c.is_list:
childClasses.write("""
size_t
Num%(title)s() const
{
return %(mname)s.size();
}
const %(argtype)s *
Get%(title)s(int i) const
{
return %(mname)s[i].get();
}
void
Add%(title)s(%(argtype)s *%(sname)s)
{
%(mname)s.push_back(std::unique_ptr<%(argtype)s>(%(sname)s));
}
""" % c.__dict__)
walker.write("""
for (auto& e : n->%s) { v(e.get()); }""" % c.mname)
else:
const = ''
get = ''
set = ''
t = c.argtype
if isValueType(t):
set = '%(mname)s = %(sname)s' % c.__dict__
t = t + ' '
else:
t = t + ' *'
const = 'const '
get = '.get()'
set = '%(mname)s.reset(%(sname)s)' % c.__dict__
walker.write("""
v(n->%s.get());""" % c.mname)
childClasses.write("""
%(const)s%(type)s
Get%(title)s() const
{
return %(mname)s%(get)s;
}
void
Set%(title)s(%(type)s%(sname)s)
{
%(set)s;
}
""" % {'const': const, 'title': c.title, 'sname': c.sname, 'get': get, 'set': set, 'type': t, 'mname': c.mname})
childClasses.write('\n private:\n friend class GoASTNode;\n')
walker.write("""
return;
}""")
for c in children:
childClasses.write(' %s %s;\n' % (c.mtype, c.mname))
def addParent(name, parent):
startClass(name, parent, parentClasses)
l = kinds[name]
minName = l[0]
maxName = l[-1]
parentClasses.write(""" template <typename R, typename V> R Visit(V *v) const;
static bool
classof(const GoASTNode *n)
{
return n->GetKind() >= e%s && n->GetKind() <= e%s;
}
protected:
explicit GoAST%s(NodeKind kind) : GoASTNode(kind) { }
private:
""" % (minName, maxName, name))
endClass(name, parentClasses)
addNodes()
print """//===-- GoAST.h -------------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// DO NOT EDIT.
// Generated by gen_go_ast.py
#ifndef liblldb_GoAST_h
#define liblldb_GoAST_h
#include "lldb/lldb-forward.h"
#include "lldb/lldb-private.h"
#include "llvm/Support/Casting.h"
#include "Plugins/ExpressionParser/Go/GoLexer.h"
namespace lldb_private
{
class GoASTNode
{
public:
typedef GoLexer::TokenType TokenType;
typedef GoLexer::Token Token;
enum ChanDir
{
eChanBidir,
eChanSend,
eChanRecv,
};
enum NodeKind
{"""
for l in kinds.itervalues():
for x in l:
print " e%s," % x
print """ };
virtual ~GoASTNode() = default;
NodeKind
GetKind() const
{
return m_kind;
}
virtual const char *GetKindName() const = 0;
template <typename V> void WalkChildren(V &v);
protected:
explicit GoASTNode(NodeKind kind) : m_kind(kind) { }
private:
const NodeKind m_kind;
GoASTNode(const GoASTNode &) = delete;
const GoASTNode &operator=(const GoASTNode &) = delete;
};
"""
print parentClasses.getvalue()
print childClasses.getvalue()
for k, l in kinds.iteritems():
if k == 'Node':
continue
print """
template <typename R, typename V>
R GoAST%s::Visit(V* v) const
{
switch(GetKind())
{""" % k
for subtype in l:
print """ case e%(n)s:
return v->Visit%(n)s(llvm::cast<const GoAST%(n)s>(this));""" % {'n': subtype}
print """ default:
assert(false && "Invalid kind");
}
}"""
print """
template <typename V>
void GoASTNode::WalkChildren(V &v)
{
switch (m_kind)
{
"""
print walker.getvalue()
print"""
case eEmptyStmt:
case eBadDecl:
case eBadExpr:
case eBadStmt:
break;
}
}
} // namespace lldb_private
#endif
"""
|
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hmac
import hashlib
import time
import ssl
import os
from cryptography import x509
import cryptography.exceptions
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.x509.oid import NameOID
from google.cloud import datastore
import OpenSSL.crypto
DOMAIN_PEM = '/tmp/domain.pem'
DOMAIN_KEY = '/tmp/domain.key'
ISSUER = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u'hrefin'),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'Href Ltd.'),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'Default CA Deployment'),
])
def get_ca_cert():
try:
return open('ca.pem').read()
except IOError:
return None
def get_ca_key():
try:
return open('ca.key').read()
except IOError:
return None
def verifyCallback(connection, cert, errnum, errdepth, ok):
if not ok:
der = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, cert.to_cryptography())
issuer_public_key = serialization.load_pem_private_key(get_ca_key(), None, default_backend()).public_key()
cert_to_check = x509.load_der_x509_certificate(der, default_backend())
try:
issuer_public_key.verify(
cert_to_check.signature,
cert_to_check.tbs_certificate_bytes,
padding.PKCS1v15(),
cert_to_check.signature_hash_algorithm,
)
except cryptography.exceptions.InvalidSignature:
return False
return True
return True
def build_csr(domain):
try:
private_key = serialization.load_pem_private_key(DOMAIN_KEY, None, default_backend())
except ValueError:
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
with open(DOMAIN_KEY, "wb") as f:
f.write(private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
))
csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"CA"),
x509.NameAttribute(NameOID.LOCALITY_NAME, u"San Francisco"),
x509.NameAttribute(NameOID.COMMON_NAME, unicode(domain)),
])).sign(private_key, hashes.SHA256(), default_backend())
return csr
def setup_ca():
ds = datastore.Client()
if get_ca_cert():
return
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(ISSUER)
builder = builder.issuer_name(ISSUER)
builder = builder.not_valid_before(datetime.datetime(2018, 1, 1))
builder = builder.not_valid_after(datetime.datetime(2019, 1, 1))
builder = builder.serial_number(1337)
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=private_key, algorithm=hashes.SHA1(),
backend=default_backend()
)
with open("ca.key", "wb") as f:
f.write(private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
))
with open("ca.pem", "wb") as f:
f.write(certificate.public_bytes(encoding=serialization.Encoding.PEM))
def setup_domain(domain):
ca_key = get_ca_key()
assert ca_key
ca_key = serialization.load_pem_private_key(ca_key, None, default_backend())
csr = build_csr(domain)
builder = x509.CertificateBuilder()
builder = builder.subject_name(csr.subject)
builder = builder.issuer_name(ISSUER)
builder = builder.not_valid_before(datetime.datetime(2018, 1, 1))
builder = builder.not_valid_after(datetime.datetime(2019, 1, 1))
builder = builder.serial_number(31337)
builder = builder.public_key(csr.public_key())
builder = builder.add_extension(
x509.BasicConstraints(ca=False, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=ca_key, algorithm=hashes.SHA256(),
backend=default_backend()
)
with open(DOMAIN_PEM, "wb") as f:
f.write(certificate.public_bytes(
encoding=serialization.Encoding.PEM,
))
def setup(domain):
if not os.path.exists("ca.key") or not os.path.exists("ca.pem"):
setup_ca()
if not os.path.exists(DOMAIN_KEY) or not os.path.exists(DOMAIN_PEM):
setup_domain(domain)
def get_subject_dict(subject):
res = {'company': 'None', 'country': 'AA'}
for attr in subject:
if attr.oid == NameOID.COMMON_NAME:
res['name'] = attr.value
if attr.oid == NameOID.ORGANIZATION_NAME:
res['company'] = attr.value
if attr.oid == NameOID.COUNTRY_NAME:
res['country'] = attr.value
return res
def parse_cert(cert, der=False):
if der:
cert = x509.load_der_x509_certificate(cert, default_backend())
else:
cert = x509.load_pem_x509_certificate(str(cert), default_backend())
return get_subject_dict(cert.subject)
def parse_csr(csr):
csr = x509.load_pem_x509_csr(str(csr), default_backend())
return get_subject_dict(csr.subject)
def gen_serial():
# changes every 20min
seed = int(time.time()) / 60 / 20
h = hmac.new('serialzkey!!', str(seed), hashlib.sha256).hexdigest()
return int(h[:16], 16) | 1 << 64
def sign_csr(csr):
csr = x509.load_pem_x509_csr(str(csr), default_backend())
ca_key = serialization.load_pem_private_key(get_ca_key(), None, default_backend())
if csr.public_key().key_size > 2048:
return {'error': 'key_size'}
if len(csr.subject.public_bytes(default_backend())) > 350:
return {'error': 'subject_size'}
if not csr.is_signature_valid:
return {'error': 'invalid_signature'}
if len(csr.subject.get_attributes_for_oid(NameOID.COMMON_NAME)) != 1:
return {'error': 'invalid_name'}
for attr in csr.subject.get_attributes_for_oid(NameOID.COMMON_NAME):
username = attr.value
if username.lower() == 'admin':
return {'error': 'invalid_name'}
builder = x509.CertificateBuilder()
builder = builder.subject_name(csr.subject)
builder = builder.issuer_name(ISSUER)
builder = builder.not_valid_before(datetime.datetime(2018, 1, 1))
builder = builder.not_valid_after(datetime.datetime(2019, 1, 1))
builder = builder.serial_number(gen_serial())
builder = builder.public_key(csr.public_key())
builder = builder.add_extension(
x509.BasicConstraints(ca=False, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=ca_key, algorithm=hashes.MD5(),
backend=default_backend()
)
return {'result': certificate.public_bytes(encoding=serialization.Encoding.PEM)}
|
|
#!/usr/bin/env python
# Just handle unary rules, working out when one is being used
import re
import category
# from, to, keep original dependencies, activated by extra flag
UNARIES = [
('S[adj]\NP','NP\NP',False,False,[
'(ADJP 0)',
'(NP {1} 0) arg:(NP PP ...):',
'(NP 1 0) arg:default:']),
('S[to]\NP','NP\NP',True,False,[
'{(TEMP 0)}',
'(NP {1} (SBAR 0)) arg:(NP PP ...):',
'(NP 1 (SBAR 0)) arg:default:']),
('S[dcl]/NP','NP\NP',True,False,[
'(SBAR 0)',
'(NP {1} 0) arg:(NP PP ...):',
'(NP 1 0) arg:default:']),
('(S[to]\NP)/NP','NP\NP',True,True,[]),
('S[dcl]','NP\NP',False,True,[]),
('S[pss]\NP','S/S',False,True,[]),
('S[ng]\NP','S/S',False,False,[
'(S 0)',
'(S* 0 {1})']),
('S[adj]\NP','S/S',False,True,[]),
('S[ng]\NP','S\S',False,True,[]),
('S[dcl]','S\S',False,True,[]),
('S/S','S\S',False,False,[]),
('S[to]\NP','S/S',False,True,[]),
('S[pss]\NP','(S\NP)\(S\NP)',False,True,[]),
('S[ng]\NP','(S\NP)\(S\NP)',False,False,[
'(S 0)',
'(VP {1} 0)',
'(S 1 0)']),
('S[adj]\NP','(S\NP)\(S\NP)',False,False,[
'(S (ADJP 0))',
'(VP {1} 0)',
'(S 1 0)']),
('S[to]\NP','(S\NP)\(S\NP)',False,False,[
'(S 0)',
'(VP {1} 0)',
'(S 1 0)']),
('S[ng]\NP','NP',False,True,[
'(S {0})']),
('N','NP',False,False,[
'(NP {0})']),
# Need to implement filtering based on self...
### ('N','NP',False,False,[
### '{(TEMP 0)}',
### '(QP {0}) self:(... QP):',
### '(QP 0) self:(... CD):',
### '(NP {0}) self:default:']),
('S[ng]\NP','(S\NP)/(S\NP)',False,True,[]),
('S[to]\NP','N\N',True,False,[]),
('NP','NP/(NP\NP)',False,True,[]),
('S[dcl][conj]','S[dcl]',False,False,[
'{(TEMP 0)}']),
('PP','(S\NP)\((S\NP)/PP)',False,False,[]),
('S[to]\NP','(S\NP)\((S\NP)/(S[to]\NP))',False,False,[]),
('S[adj]\NP','(S\NP)\((S\NP)/(S[adj]\NP))',False,False,[]),
('NP','S/(S\NP)',False,False,[]),
('NP','(S\NP)\((S\NP)/NP)',False,False,[]),
('NP','((S\NP)/NP)\(((S\NP)/NP)/NP)',False,False,[]),
('NP','((S\NP)/(S[to]\NP))\(((S\NP)/(S[to]\NP))/NP)',False,False,[]),
('NP','((S\NP)/PP)\(((S\NP)/PP)/NP)',False,False,[]),
('NP','((S\NP)/(S[adj]\NP))\(((S\NP)/(S[adj]\NP))/NP)',False,False,[]),
('NP','S/(S/NP)',False,False,[
'{(TEMP 0)}',
'(S 0 {1})']),
('S[dcl]','((S\NP)\(S\NP))\((S\NP)\(S\NP))',False,False,[
'(SBAR 0)',
'(NP 1 0)',
'(VP {1} 0)',
'(S 1 0)']),
('S[X]\NP','NP\NP',True,False,[])
]
def get_unary(start_cat, end_cat, markedup=None):
# Note: PP_qus - for questions only, ignored for now
for unary in UNARIES:
start = unary[0]
end_markup = unary[1]
end = category.strip_braces(end_markup)
keep_deps = unary[2]
extra = unary[3]
rules = unary[4]
if category.compare(start_cat, start):
if category.compare(end_cat, end):
if len(rules) > 0:
return rules
elif markedup is not None:
if end in markedup:
return markedup[end][1:]
end_no_brac = category.strip_square_brackets(end)
if end_no_brac in markedup:
return markedup[end_no_brac][1:]
else:
return []
return None
BINARIES = [
(',','NP','(S\NP)\(S\NP)',False,[
'(ADVP {0} 1)',
'(VP {1} {0})',
'(S 1 0)']),
('NP',',','S/S',False,[
'(S (S 0) 1)',
'(S* {0} 1)']),
('S[dcl]\S[dcl]',',','S/S',False,[
'(PRN (SINV 0) 1)',
'(S* 0 1)']),
('S[dcl]/S[dcl]',',','(S\NP)/(S\NP)',False,[
'(S 0 1)',
'(S {0} 1)',
'(S 1 {0})']),
('S[dcl]/S[dcl]',',','(S\NP)\(S\NP)',False,[
'(S 0 1)',
'(S 1 {0})',
'(S 1 {0})']),
('S[dcl]/S[dcl]',',','S/S',False,[
'(S 0 1)',
'(S* {0} {1})']),
('S[dcl]/S[dcl]',',','S\S',False,[
'(S 0 1)',
'(S* {1} {0})']),
# not generated by C&C
('S[dcl]',',','S/S',False,[
'(S {0} 1)',
'(S* 0 {1})']),
('S[dcl]',',','S\S',False,[
'(S (PRN 0) 1)',
'(S* {1} {0})']),
('S[dcl]',',','NP\NP',False,[
'(S {0} 1)',
'(NP 1 0)']),
('S[adj]\NP',',','NP\NP',False,[
'(S {0} 1)',
'(NP 1 0)']),
('S[dcl]',',','(S\NP)\(S\NP)',False,[
'(S 0 1)',
'(VP {1} 0)',
'(S 1 0)']),
('((S[pss]\NP)/PP)/NP','(S\NP)\(S\NP)','((S[pss]\NP)/PP)/NP',False,[
'(VP {0} 1)',
'(VP {0} 3)',
'(VP {0} 2)',
'(S 1 0)']),
('S[dcl]/S[dcl]',',','NP\NP',False,[
'(S {0} 1)',
'(NP 1 0)']),
('S[dcl]\S[dcl]',',','(S\NP)\(S\NP)',False,[
'{(TEMP 0 1)}',
'(VP {1} 0)',
'(S 1 0)']),
('S[dcl]\S[dcl]',',','(S\NP)/(S\NP)',False,[
'(PRN (SINV 0) 1)',
'(S 0 1)',
'(S 1 {0})'])
### ('S[dcl]\S[dcl]',',','S\S',False,[])
### ('((S[dcl]\NP)/PP)/NP','(S\NP)\(S\NP)','((S[dcl]\NP)/PP)/NP',False,[])
### ('((S[dcl]\NP[expl])/(S[to]\NP))/(S[adj]\NP)','(S\NP)\(S\NP)','((S[dcl]\NP[expl])/','(S[to]\NP))/(S[adj]\NP)',False,[])
### ('((S[dcl]\NP[expl])/(S[to]\NP))/NP','(S\NP)\(S\NP)','((S[dcl]\NP[expl])/(S[to]\NP))/NP',False,[])
### ('((S[dcl]\NP[expl])/S[dcl])/(S[adj]\NP)','(S\NP)\(S\NP)','((S[dcl]\NP[expl])/S[dcl])/','(S[adj]\NP)',False,[])
### ('((S[dcl]\NP[expl])/S[dcl])/NP','(S\NP)\(S\NP)','((S[dcl]\NP[expl])/S[dcl])/NP',False,[])
### ('((S[dcl]\NP[expl])/S[qem])/(S[adj]\NP)','(S\NP)\(S\NP)','((S[dcl]\NP[expl])/S[qem])/','(S[adj]\NP)',False,[])
### ('((S[ng]\NP)/PP)/NP','(S\NP)\(S\NP)','((S[ng]\NP)/PP)/NP',False,[])
### ('(S[dcl]\(S[to]\NP))/(S[b]\NP)','S\S','(S[dcl]\(S[to]\NP))/(S[b]\NP)',False,[])
### ('(S[dcl]\S[dcl])\NP','S\S','(S[dcl]\S[dcl])\NP',False,[])
### ('(S[q]/(S[b]\NP))/NP','S\S','(S[q]/(S[b]\NP))/NP',False,[])
### ('(S\NP)/(S\NP)','(S[ng]\NP)\(S[adj]\NP)','(S[ng]\NP)\(S[adj]\NP)',False,['(VP 0 1)','(ADJP 1 0)','(S 1 0)'])
]
def get_binary_for_markedup(left, right, result, markedup=None, flexible=False):
for binary in BINARIES:
if category.compare(left, binary[0]):
if category.compare(right, binary[1]):
if category.compare(result, binary[2]):
keep_deps = binary[3]
rules = binary[4]
if len(rules) > 0:
return rules
elif markedup is not None:
return ['(S 0 1)'] + markedup[result][1:]
else:
return []
if flexible:
for binary in BINARIES:
if category.compare(result, binary[2]):
rules = binary[4]
if len(rules) > 0:
return rules
elif markedup is not None:
return ['(S 0 1)'] + markedup[result][1:]
else:
return []
if markedup is not None:
return ['(S 0 1)'] + markedup[result][1:]
return None
def get_binary(left, right, result, markedup=None):
for binary in BINARIES:
if category.compare(left, binary[0]):
if category.compare(right, binary[1]):
if category.compare(result, binary[2]):
keep_deps = binary[3]
rules = binary[4]
if len(rules) > 0:
return rules
elif markedup is not None:
return ['(S 0 1)'] + markedup[result][1:]
else:
return []
return None
def determine_combinator(source, result):
### print len(source)
### print ' '.join(source), result
if len(source) == 0:
return 'lex'
if len(source) == 1:
if get_unary(source[0].category, result) is not None:
return 'unary'
return 'type'
if len(source) == 2:
left = source[0].category
right = source[1].category
result_parts = category.divide(result)
left_parts = category.divide(left)
right_parts = category.divide(right)
if get_binary(left, right, result) is not None:
return 'binary'
# Coordination
# X = X CONJ X
if left == 'conj' or (result.endswith('[conj]') and not '[conj]' in right):
if right == 'conj\\conj':
return 'fa.b'
return 'conj1'
elif 'conj' in source[1].rule or '[conj]' in right:
if category.compare(left, right):
return 'conj2'
if category.compare(category.divide(left)[2], right) and category.divide(left)[1] == '/':
return 'fa.f'
if category.compare(category.divide(right)[0], left) and category.divide(right)[1] is not None:
if 'conj2' in source[1].rule or '[conj]' in right and category.compare(category.divide(right)[2], left):
return 'fa.b'
else:
return 'conj1'
if category.compare(category.divide(right)[2], left):
return 'fa.b'
if (category.compare(left_parts[2], result_parts[2]) and
category.compare(left_parts[0], right_parts[2]) and
category.compare(right_parts[0], result_parts[0]) and
left_parts[1] == result_parts[1] == '/' and
right_parts[1] == '\\'):
return 'cc.b'
if (category.compare(left_parts[2], right_parts[0]) and
category.compare(left_parts[0], result_parts[0]) and
category.compare(right_parts[2], result_parts[2]) and
left_parts[1] == right_parts[1] == result_parts[1] == '/'):
return 'fc.f'
if (category.compare(left_parts[2], result_parts[2]) and
category.compare(left_parts[0], right_parts[2]) and
category.compare(right_parts[0], result_parts[0]) and
left_parts[1] == right_parts[1] == result_parts[1] == '\\'):
return 'fc.b'
if category.compare(result, left):
if '[conj]' in result:
return 'conj2'
raw_right = right
if '[conj]' in right:
raw_right = right[:-6]
if category.compare(result, raw_right):
return 'conj2'
else:
return 'conj2'
elif 'conj1' in source[0].rule or '[conj]' in left:
return 'conj2'
# consider conj3, to handle , separated lists
# Function application
# X = X/Y + Y
if (left_parts[1] == '/' and
category.compare(left_parts[2], right) and
category.compare(left_parts[0], result)):
return 'fa.f'
# X = Y + X\Y
if (right_parts[1] == '\\' and
category.compare(right_parts[2], left) and
category.compare(right_parts[0], result)):
return 'fa.b'
# Function composition
# X/Z = X/Y + Y/Z
if (category.compare(left_parts[2], right_parts[0]) and
category.compare(left_parts[0], result_parts[0]) and
category.compare(right_parts[2], result_parts[2]) and
left_parts[1] == right_parts[1] == result_parts[1] == '/'):
return 'fc.f'
# X\Z = Y\Z + X\Y
if (category.compare(left_parts[2], result_parts[2]) and
category.compare(left_parts[0], right_parts[2]) and
category.compare(right_parts[0], result_parts[0]) and
left_parts[1] == right_parts[1] == result_parts[1] == '\\'):
return 'fc.b'
# Crossed composition
# X/Z = Y/Z + X\Y
# For example:
# (S\NP)/(S\NP) = (S\NP)/(S\NP) + (S\NP)\(S\NP)
if (category.compare(left_parts[2], result_parts[2]) and
category.compare(left_parts[0], right_parts[2]) and
category.compare(right_parts[0], result_parts[0]) and
left_parts[1] == result_parts[1] == '/' and
right_parts[1] == '\\'):
return 'cc.b'
# Z\X = Z/Y + Y\X
# ((S\NP)/S)/(S\NP) = ((S\NP)/S)/(S\NP) + (S\NP)\(S\NP)
# Backward crossed substitution
# X/Z = B/Z + (X\B)/Z
if (left_parts[1] == right_parts[1] == result_parts[1] == '/' and
category.compare(left_parts[2], result_parts[2]) and
category.compare(right_parts[2], result_parts[2])):
sub_parts = category.divide(right_parts[0])
if (category.compare(sub_parts[0], result_parts[0]) and
category.compare(sub_parts[2], left_parts[0]) and
sub_parts[1] != left_parts[1]):
return 'bs.f'
# X\Z = (X/B)\Z + B\Z
if (left_parts[1] == right_parts[1] == result_parts[1] == '\\' and
category.compare(left_parts[2], result_parts[2]) and
category.compare(right_parts[2], result_parts[2])):
sub_parts = category.divide(left_parts[0])
if (sub_parts[0] == result_parts[0] and
sub_parts[2] == right_parts[0] and
sub_parts[1] != right_parts[1]):
return 'bs.b'
# There are restrictions on what B can be, but since this is a parse, and
# all other options have been exhausted, this must be what is going on
# Uncomment to see what is misc:
### if left == result and '/' not in right and '\\' not in right:
### pass
### elif right == result and '/' not in left and '\\' not in left:
### pass
### elif '[conj]' in left or '[conj]' in right or '[conj]' in result:
### pass
### else:
### print 'misc rule:', left, right, result
### print ' ', left_parts
### print ' ', right_parts
### print ' ', result_parts
if category.divide(result)[0] == right and category.divide(result)[1] is not None:
return 'conj1'
return 'misc'
if __name__ == '__main__':
pass
|
|
#!/usr/bin/env python
#
# This Python script uses FontForge to convert a set of BDF files into a
# TrueType font (TTF) and an SFD file.
#
# Copyright (c) 2013-2016 by Tilman Blumenbach <tilman [AT] ax86 [DOT] net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import fontforge
import sys
from itertools import dropwhile
# Maps argument names to their font attribute names.
_argNameFontAttrMap = {
'name': 'fontname',
'family': 'familyname',
'display_name': 'fullname',
'weight': 'weight',
'copyright': 'copyright',
'font_version': 'version',
}
# Determines which fsSelection and macStyle bits in the OS/2 table get set
# when a certain font weight is specified and OS/2 table tweaks are enabled.
#
# Use lowercase font weights here. The "italic" font weight is special:
# If the font weight is "medium" and the font name ends with "italic"
# (case-insensitive), then "italic" is used when looking up values in this
# dictionary instead of "medium".
#
# The first value of each tuple contains the bits to set in the fsSelection
# field.
#
# The second value of each tuple contains the bits to set in the macStyle
# field in the OS/2 table.
#
# See https://www.microsoft.com/typography/otspec/os2.htm#fss for details.
_weightToStyleMap = {
# fsSelection: Set bit 6 ("REGULAR").
'normal': (0x40, 0),
# fsSelection: Set bit 6 ("REGULAR").
'medium': (0x40, 0),
# fsSelection: Set bits 0 ("ITALIC") and 9 ("OBLIQUE").
# macStyle: Set bit 1 (which presumably also means "ITALIC").
'italic': (0x201, 0x2),
# fsSelection: Set bit 5 ("BOLD").
# macStyle: Set bit 0 (which presumably also means "BOLD").
'bold': (0x20, 0x1),
# fsSelection: Set bits 0 ("ITALIC"), 9 ("OBLIQUE") and 5 ("BOLD").
# macStyle: Set bits 1 (italic) and 0 (bold).
'bolditalic': (0x221, 0x3),
}
def initArgumentParser():
"""Initialize and return an argparse.ArgumentParser that parses this program's arguments."""
argParser = argparse.ArgumentParser(
description='Convert a set of BDF files into a TrueType font (TTF). '
'The BDF files have to be sorted by font size in ascending order.'
)
# Positional arguments.
argParser.add_argument(
'bdf_file',
nargs='+',
help='BDF file to process.'
)
# Optional arguments.
argParser.add_argument(
'-n',
'--name',
help='Font name to use for generated font (default: taken from first BDF file).'
)
argParser.add_argument(
'-f',
'--family',
help='Font family to use for generated font (default: taken from first BDF file).'
)
argParser.add_argument(
'-N',
'--display-name',
help='Full font name (for display) to use for generated font (default: taken from first BDF file).'
)
argParser.add_argument(
'-w',
'--weight',
help='Weight to use for generated font (default: taken from first BDF file).'
)
argParser.add_argument(
'-c',
'--copyright',
help='Copyright notice to use for generated font (default: taken from first BDF file).'
)
argParser.add_argument(
'-C',
'--append-copyright',
help='Copyright notice to use for generated font (appends to notice taken from first BDF file).'
)
argParser.add_argument(
'-V',
'--font-version',
help='Font version to use for generated font (default: taken from first BDF file).'
)
argParser.add_argument(
'-a',
'--prefer-autotrace',
action='store_true',
help='Prefer AutoTrace over Potrace, if possible (default: %(default)s).'
)
argParser.add_argument(
'-A',
'--tracer-args',
default='',
help='Additional arguments for AutoTrace/Potrace (default: none).'
)
argParser.add_argument(
'-s',
'--visual-studio-fixes',
action='store_true',
help='Make generated font compatible with Visual Studio (default: %(default)s).'
)
argParser.add_argument(
'-O',
'--os2-table-tweaks',
action='store_true',
help='Tweak OS/2 table according to the font weight. This may be needed for some '
'buggy FontForge versions which do not do this by themselves.'
)
return argParser
def setFontAttrsFromArgs(font, args):
"""Set font attributes from arguments.
If an argument is None, that means that no value was given. In that case, the font attribute
is not modified.
args is an argparse.Namespace.
font is a fontforge.font.
"""
for argName in _argNameFontAttrMap:
argValue = getattr(args, argName)
if argValue is not None:
# User gave a new value for this font attribute.
setattr(
font,
_argNameFontAttrMap[argName],
argValue
)
# Parse the command line arguments.
args = initArgumentParser().parse_args()
# Set FontForge options.
fontforge.setPrefs("PreferPotrace", not args.prefer_autotrace)
fontforge.setPrefs("AutotraceArgs", args.tracer_args)
# Good, can we open the base font?
try:
baseFont = fontforge.open(args.bdf_file[0])
except EnvironmentError as e:
sys.exit("Could not open base font `%s'!" % args.bdf_file[0])
# Now import all the bitmaps from the other BDF files into this font.
print('Importing bitmaps from %d additional fonts...' % (len(args.bdf_file) - 1))
for fontFile in args.bdf_file[1:]:
try:
baseFont.importBitmaps(fontFile)
except EnvironmentError as e:
sys.exit("Could not import additional font `%s'!" % fontFile)
print('D00')
# Import the last (biggest) BDF font into the glyph background.
#try:
# baseFont.importBitmaps(args.bdf_file[-1], True)
#except EnvironmentError as e:
# sys.exit("Could not import font `%s' into glyph background!" % args.bdf_file[-1])
# Now set font properties.
setFontAttrsFromArgs(baseFont, args)
# Do we want to append to the current copyright notice?
if args.append_copyright is not None:
baseFont.copyright += args.append_copyright
# FontForge won't write the OS/2 table unless we set a vendor and we set it BEFORE modifying
# the OS/2 table in any way (although this is not documented anywhere...).
# "PfEd" is the value FontForge writes when using the GUI.
baseFont.os2_vendor = 'PfEd'
# Newer FontForge releases require us to manually set the macStyle
# and fsSelection (aka "StyleMap") fields in the OS/2 table.
if args.os2_table_tweaks:
if not hasattr(baseFont, "os2_stylemap"):
sys.exit("You requested OS/2 table tweaks, but your FontForge version is too old for these "
"tweaks to work.")
os2_weight = baseFont.weight.lower()
if os2_weight == "medium" and baseFont.fontname.lower().endswith("italic"):
os2_weight = "italic"
elif os2_weight == "bold" and baseFont.fontname.lower().endswith("italic"):
os2_weight = "bolditalic"
try:
styleMap, macStyle = _weightToStyleMap[os2_weight]
except KeyError:
sys.exit("Cannot tweak OS/2 table: No tweaks defined for guessed font weight `%s'!" % os2_weight)
print(
"OS/2 table tweaks: Guessed weight is `%s' -> Adding %#x to StyleMap and %#x to macStyle." % (
os2_weight,
styleMap,
macStyle
)
)
baseFont.os2_stylemap |= styleMap
baseFont.macstyle |= macStyle
# AutoTrace all glyphs, add extrema and simplify.
print('Processing glyphs...')
baseFont.selection.all()
baseFont.autoTrace()
baseFont.addExtrema()
baseFont.simplify()
# Do we need to fixup the font for use with Visual Studio?
# Taken from http://www.electronicdissonance.com/2010/01/raster-fonts-in-visual-studio-2010.html
# Really, it's a MESS that one has to use dirty workarounds like this...
if args.visual_studio_fixes:
print('Applying Visual Studio fixes...')
# Make sure the encoding used for indexing is set to UCS.
baseFont.encoding = 'iso10646-1'
# Need to add CP950 (Traditional Chinese) to OS/2 table.
# According to http://www.microsoft.com/typography/otspec/os2.htm#cpr,
# we need to set bit 20 to enable CP950.
baseFont.os2_codepages = (baseFont.os2_codepages[0] | (1 << 20), baseFont.os2_codepages[1])
# The font needs to include glyphs for certain characters.
# Try to find a fitting glyph to substitute for those glyphs which
# the font does not already contain. U+0000 is the "default character";
# it _should_ be displayed instead of missing characters, so it is a good choice.
# If the font does not contain a glyph for U+0000, try other, less optimal glyphs.
try:
selector = next(dropwhile(lambda x: x not in baseFont, [0, 'question', 'space']))
substGlyph = baseFont[selector]
except StopIteration:
sys.exit(' While applying Visual Studio fixes: Could not find a substitution glyph!')
print(" Chose `%s' as substitution glyph." % substGlyph.glyphname)
baseFont.selection.select(substGlyph)
baseFont.copyReference()
for codePoint in [0x3044, 0x3046, 0x304B, 0x3057, 0x306E, 0x3093]:
if codePoint not in baseFont:
baseFont.selection.select(codePoint)
baseFont.paste()
# Finally, save the files!
basename = baseFont.fontname
if baseFont.version != '':
basename += '-' + baseFont.version
print('Saving TTF file...')
baseFont.generate(basename + '.ttf', 'ttf')
print('Saving SFD file...')
baseFont.save(basename + '.sfd')
print('Done!')
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
from ...extern.six.moves import xmlrpc_client as xmlrpc
from .errors import SAMPHubError
from .utils import ServerProxyPool
from .lockfile_helpers import get_main_running_hub
from .constants import SSL_SUPPORT
if SSL_SUPPORT:
from .ssl_utils import SafeTransport
__all__ = ['SAMPHubProxy']
class SAMPHubProxy(object):
"""
Proxy class to simplify the client interaction with a SAMP hub (via the
standard profile).
"""
def __init__(self):
self.proxy = None
self._connected = False
@property
def is_connected(self):
"""
Whether the hub proxy is currently connected to a hub.
"""
return self._connected
def connect(self, hub=None, hub_params=None,
key_file=None, cert_file=None, cert_reqs=0,
ca_certs=None, ssl_version=None, pool_size=20):
"""
Connect to the current SAMP Hub.
Parameters
----------
hub : `~astropy.vo.samp.SAMPHubServer`, optional
The hub to connect to.
hub_params : dict, optional
Optional dictionary containing the lock-file content of the Hub
with which to connect. This dictionary has the form
``{<token-name>: <token-string>, ...}``.
key_file : str, optional
The path to a file containing the private key for SSL connections.
If the certificate file (``cert_file``) contains the private key,
then ``key_file`` can be omitted.
cert_file : str, optional
The path to a file which contains a certificate to be used to
identify the local side of the secure connection.
cert_reqs : int, optional
Whether a certificate is required from the server side of the
connection, and whether it will be validated if provided. It must
be one of the three values `ssl.CERT_NONE` (certificates ignored),
`ssl.CERT_OPTIONAL` (not required, but validated if provided), or
`ssl.CERT_REQUIRED` (required and validated). If the value of this
parameter is not `ssl.CERT_NONE`, then the ``ca_certs`` parameter
must point to a file of CA certificates.
ca_certs : str, optional
The path to a file containing a set of concatenated "Certification
Authority" certificates, which are used to validate the
certificate passed from the Hub end of the connection.
ssl_version : int, optional
Which version of the SSL protocol to use. Typically, the
server chooses a particular protocol version, and the
client must adapt to the server's choice. Most of the
versions are not interoperable with the other versions. If
not specified, the default SSL version is taken from the
default in the installed version of the Python standard
`ssl` library. See the `ssl` documentation for more
information.
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
self.lockfile = {}
if hub is not None and hub_params is not None:
raise ValueError("Cannot specify both hub and hub_params")
if hub_params is None:
if hub is not None:
if not hub.is_running:
raise SAMPHubError("Hub is not running")
else:
hub_params = hub.params
else:
hub_params = get_main_running_hub()
try:
url = hub_params["samp.hub.xmlrpc.url"].replace("\\", "")
if SSL_SUPPORT and url[0:5] == "https":
transport = SafeTransport(key_file, cert_file, cert_reqs,
ca_certs, ssl_version)
self.proxy = ServerProxyPool(pool_size, xmlrpc.ServerProxy,
url, transport=transport,
allow_none=1)
else:
self.proxy = ServerProxyPool(pool_size, xmlrpc.ServerProxy,
url, allow_none=1)
self.ping()
self.lockfile = copy.deepcopy(hub_params)
self._connected = True
except xmlrpc.ProtocolError as p:
# 401 Unauthorized
if p.errcode == 401:
raise SAMPHubError("Unauthorized access. Basic Authentication "
"required or failed.")
else:
raise SAMPHubError("Protocol Error {}: {}".format(p.errcode,
p.errmsg))
def disconnect(self):
"""
Disconnect from the current SAMP Hub.
"""
self.proxy = None
self._connected = False
self.lockfile = {}
def server_close(self):
self.proxy.server_close()
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for other profiles.
"""
return self.proxy.samp.hub
def ping(self):
"""
Proxy to ``ping`` SAMP Hub method (Standard Profile only).
"""
return self._samp_hub.ping()
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
"""
Proxy to ``setXmlrpcCallback`` SAMP Hub method (Standard Profile only).
"""
return self._samp_hub.setXmlrpcCallback(private_key, xmlrpc_addr)
def register(self, secret):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(secret)
def unregister(self, private_key):
"""
Proxy to ``unregister`` SAMP Hub method.
"""
return self._samp_hub.unregister(private_key)
def declare_metadata(self, private_key, metadata):
"""
Proxy to ``declareMetadata`` SAMP Hub method.
"""
return self._samp_hub.declareMetadata(private_key, metadata)
def get_metadata(self, private_key, client_id):
"""
Proxy to ``getMetadata`` SAMP Hub method.
"""
return self._samp_hub.getMetadata(private_key, client_id)
def declare_subscriptions(self, private_key, subscriptions):
"""
Proxy to ``declareSubscriptions`` SAMP Hub method.
"""
return self._samp_hub.declareSubscriptions(private_key, subscriptions)
def get_subscriptions(self, private_key, client_id):
"""
Proxy to ``getSubscriptions`` SAMP Hub method.
"""
return self._samp_hub.getSubscriptions(private_key, client_id)
def get_registered_clients(self, private_key):
"""
Proxy to ``getRegisteredClients`` SAMP Hub method.
"""
return self._samp_hub.getRegisteredClients(private_key)
def get_subscribed_clients(self, private_key, mtype):
"""
Proxy to ``getSubscribedClients`` SAMP Hub method.
"""
return self._samp_hub.getSubscribedClients(private_key, mtype)
def notify(self, private_key, recipient_id, message):
"""
Proxy to ``notify`` SAMP Hub method.
"""
return self._samp_hub.notify(private_key, recipient_id, message)
def notify_all(self, private_key, message):
"""
Proxy to ``notifyAll`` SAMP Hub method.
"""
return self._samp_hub.notifyAll(private_key, message)
def call(self, private_key, recipient_id, msg_tag, message):
"""
Proxy to ``call`` SAMP Hub method.
"""
return self._samp_hub.call(private_key, recipient_id, msg_tag, message)
def call_all(self, private_key, msg_tag, message):
"""
Proxy to ``callAll`` SAMP Hub method.
"""
return self._samp_hub.callAll(private_key, msg_tag, message)
def call_and_wait(self, private_key, recipient_id, message, timeout):
"""
Proxy to ``callAndWait`` SAMP Hub method.
"""
return self._samp_hub.callAndWait(private_key, recipient_id, message,
timeout)
def reply(self, private_key, msg_id, response):
"""
Proxy to ``reply`` SAMP Hub method.
"""
return self._samp_hub.reply(private_key, msg_id, response)
|
|
"""This module includes common functions for visualization."""
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
from elfi.model.elfi_model import Constant, ElfiModel, NodeReference
def nx_draw(G, internal=False, param_names=False, filename=None, format=None):
"""Draw the `ElfiModel`.
Parameters
----------
G : nx.DiGraph or ElfiModel
Graph or model to draw
internal : boolean, optional
Whether to draw internal nodes (starting with an underscore)
param_names : bool, optional
Show param names on edges
filename : str, optional
If given, save the dot file into the given filename.
format : str, optional
format of the file
Notes
-----
Requires the optional 'graphviz' library.
Returns
-------
dot
A GraphViz dot representation of the model.
"""
try:
from graphviz import Digraph
except ImportError:
raise ImportError("The graphviz library is required for this feature.")
if isinstance(G, ElfiModel):
G = G.source_net
elif isinstance(G, NodeReference):
G = G.model.source_net
dot = Digraph(format=format)
hidden = set()
for n, state in G.nodes(data=True):
if not internal and n[0] == '_' and state['attr_dict'].get('_class') == Constant:
hidden.add(n)
continue
_format = {'shape': 'circle', 'fillcolor': 'gray80', 'style': 'solid'}
if state['attr_dict'].get('_observable'):
_format['style'] = 'filled'
dot.node(n, **_format)
# add edges to graph
for u, v, label in G.edges(data='param', default=''):
if not internal and u in hidden:
continue
label = label if param_names else ''
dot.edge(u, v, str(label))
if filename is not None:
dot.render(filename)
return dot
def _create_axes(axes, shape, **kwargs):
"""Check the axes and create them if necessary.
Parameters
----------
axes : plt.Axes or arraylike of plt.Axes
shape : tuple of int
(x,) or (x,y)
kwargs
Returns
-------
axes : np.array of plt.Axes
kwargs : dict
Input kwargs without items related to creating a figure.
"""
fig_kwargs = {}
kwargs['figsize'] = kwargs.get('figsize', (4 * shape[1], 4 * shape[0]))
for k in ['figsize', 'sharex', 'sharey', 'dpi', 'num']:
if k in kwargs.keys():
fig_kwargs[k] = kwargs.pop(k)
if axes is not None:
axes = np.atleast_2d(axes)
else:
fig, axes = plt.subplots(ncols=shape[1], nrows=shape[0], **fig_kwargs)
axes = np.reshape(axes, shape)
fig.tight_layout(pad=2.0, h_pad=1.08, w_pad=1.08)
fig.subplots_adjust(wspace=0.2, hspace=0.2)
return axes, kwargs
def _limit_params(samples, selector=None):
"""Pick only the selected parameters from all samples.
Parameters
----------
samples : OrderedDict of np.arrays
selector : iterable of ints or strings, optional
Indices or keys to use from samples. Default to all.
Returns
-------
selected : OrderedDict of np.arrays
"""
if selector is None:
return samples
else:
selected = OrderedDict()
for ii, k in enumerate(samples):
if ii in selector or k in selector:
selected[k] = samples[k]
return selected
def plot_marginals(samples, selector=None, bins=20, axes=None, **kwargs):
"""Plot marginal distributions for parameters.
Parameters
----------
samples : OrderedDict of np.arrays
selector : iterable of ints or strings, optional
Indices or keys to use from samples. Default to all.
bins : int, optional
Number of bins in histogram.
axes : one or an iterable of plt.Axes, optional
Returns
-------
axes : np.array of plt.Axes
"""
ncols = len(samples.keys()) if len(samples.keys()) > 5 else 5
ncols = kwargs.pop('ncols', ncols)
samples = _limit_params(samples, selector)
shape = (max(1, len(samples) // ncols), min(len(samples), ncols))
axes, kwargs = _create_axes(axes, shape, **kwargs)
axes = axes.ravel()
for idx, key in enumerate(samples.keys()):
axes[idx].hist(samples[key], bins=bins, **kwargs)
axes[idx].set_xlabel(key)
return axes
def plot_pairs(samples,
selector=None,
bins=20,
reference_value=None,
axes=None,
draw_upper_triagonal=False,
**kwargs):
"""Plot pairwise relationships as a matrix with marginals on the diagonal.
The y-axis of marginal histograms are scaled.
Parameters
----------
samples : OrderedDict of np.arrays
selector : iterable of ints or strings, optional
Indices or keys to use from samples. Default to all.
bins : int, optional
Number of bins in histograms.
reference_value: dict, optional
Dictionary containing reference values for parameters.
axes : one or an iterable of plt.Axes, optional
draw_upper_triagonal: boolean, optional
Boolean indicating whether to draw symmetric upper triagonal part.
Returns
-------
axes : np.array of plt.Axes
"""
samples = _limit_params(samples, selector)
shape = (len(samples), len(samples))
edgecolor = kwargs.pop('edgecolor', 'black')
dot_size = kwargs.pop('s', 2)
axes, kwargs = _create_axes(axes, shape, **kwargs)
for idx_row, key_row in enumerate(samples):
min_samples = samples[key_row].min()
max_samples = samples[key_row].max()
for idx_col, key_col in enumerate(samples):
if idx_row == idx_col:
axes[idx_row, idx_col].hist(samples[key_row], bins=bins, density=True, **kwargs)
if reference_value is not None:
axes[idx_row, idx_col].plot(
reference_value[key_row], 0,
color='red',
alpha=1.0,
linewidth=2,
marker='X',
clip_on=False,
markersize=12)
axes[idx_row, idx_col].get_yaxis().set_ticklabels([])
axes[idx_row, idx_col].set(xlim=(min_samples, max_samples))
else:
if (idx_row > idx_col) or draw_upper_triagonal:
axes[idx_row, idx_col].plot(samples[key_col],
samples[key_row],
linestyle='',
marker='o',
alpha=0.6,
clip_on=False,
markersize=dot_size,
markeredgecolor=edgecolor,
**kwargs)
if reference_value is not None:
axes[idx_row, idx_col].plot(
[samples[key_col].min(), samples[key_col].max()],
[reference_value[key_row], reference_value[key_row]],
color='red', alpha=0.8, linewidth=2)
axes[idx_row, idx_col].plot(
[reference_value[key_col], reference_value[key_col]],
[samples[key_row].min(), samples[key_row].max()],
color='red', alpha=0.8, linewidth=2)
axes[idx_row, idx_col].axis([samples[key_col].min(),
samples[key_col].max(),
samples[key_row].min(),
samples[key_row].max()])
else:
if idx_row < idx_col:
axes[idx_row, idx_col].axis('off')
axes[idx_row, 0].set_ylabel(key_row)
axes[-1, idx_row].set_xlabel(key_row)
return axes
def plot_traces(result, selector=None, axes=None, **kwargs):
"""Trace plot for MCMC samples.
The black vertical lines indicate the used warmup.
Parameters
----------
result : Result_BOLFI
selector : iterable of ints or strings, optional
Indices or keys to use from samples. Default to all.
axes : one or an iterable of plt.Axes, optional
kwargs
Returns
-------
axes : np.array of plt.Axes
"""
samples_sel = _limit_params(result.samples, selector)
shape = (len(samples_sel), result.n_chains)
kwargs['sharex'] = 'all'
kwargs['sharey'] = 'row'
axes, kwargs = _create_axes(axes, shape, **kwargs)
i1 = 0
for i2, k in enumerate(result.samples):
if k in samples_sel:
for i3 in range(result.n_chains):
axes[i1, i3].plot(result.chains[i3, :, i2], **kwargs)
axes[i1, i3].axvline(result.warmup, color='black')
axes[i1, 0].set_ylabel(k)
i1 += 1
for ii in range(result.n_chains):
axes[-1, ii].set_xlabel('Iterations in Chain {}'.format(ii))
return axes
def plot_params_vs_node(node, n_samples=100, func=None, seed=None, axes=None, **kwargs):
"""Plot some realizations of parameters vs. `node`.
Useful e.g. for exploring how a summary statistic varies with parameters.
Currently only nodes with scalar output are supported, though a function `func` can
be given to reduce node output. This allows giving the simulator as the `node` and
applying a summarizing function without incorporating it into the ELFI graph.
If `node` is one of the model parameters, its histogram is plotted.
Parameters
----------
node : elfi.NodeReference
The node which to evaluate. Its output must be scalar (shape=(batch_size,1)).
n_samples : int, optional
How many samples to plot.
func : callable, optional
A function to apply to node output.
seed : int, optional
axes : one or an iterable of plt.Axes, optional
Returns
-------
axes : np.array of plt.Axes
"""
model = node.model
parameters = model.parameter_names
node_name = node.name
if node_name in parameters:
outputs = [node_name]
shape = (1, 1)
bins = kwargs.pop('bins', 20)
else:
outputs = parameters + [node_name]
n_params = len(parameters)
ncols = n_params if n_params < 5 else 5
ncols = kwargs.pop('ncols', ncols)
edgecolor = kwargs.pop('edgecolor', 'none')
dot_size = kwargs.pop('s', 20)
shape = (1 + n_params // (ncols + 1), ncols)
data = model.generate(batch_size=n_samples, outputs=outputs, seed=seed)
if func is not None:
if hasattr(func, '__name__'):
node_name = func.__name__
else:
node_name = 'func'
data[node_name] = func(data[node.name]) # leaves rest of the code unmodified
if data[node_name].shape != (n_samples,):
raise NotImplementedError("The plotted quantity must have shape ({},), was {}."
.format(n_samples, data[node_name].shape))
axes, kwargs = _create_axes(axes, shape, sharey=True, **kwargs)
axes = axes.ravel()
if len(outputs) == 1:
axes[0].hist(data[node_name], bins=bins, normed=True)
axes[0].set_xlabel(node_name)
else:
for idx, key in enumerate(parameters):
axes[idx].scatter(data[key],
data[node_name],
s=dot_size,
edgecolor=edgecolor,
**kwargs)
axes[idx].set_xlabel(key)
axes[0].set_ylabel(node_name)
for idx in range(len(parameters), len(axes)):
axes[idx].set_axis_off()
return axes
def plot_discrepancy(gp, parameter_names, axes=None, **kwargs):
"""Plot acquired parameters vs. resulting discrepancy.
Parameters
----------
axes : plt.Axes or arraylike of plt.Axes
gp : GPyRegression target model, required
parameter_names : dict, required
Parameter names from model.parameters dict('parameter_name':(lower, upper), ... )`
Returns
-------
axes : np.array of plt.Axes
"""
n_plots = gp.input_dim
ncols = len(gp.bounds) if len(gp.bounds) < 5 else 5
ncols = kwargs.pop('ncols', ncols)
kwargs['sharey'] = kwargs.get('sharey', True)
if n_plots > 10:
shape = (1 + (1 + n_plots) // (ncols + 1), ncols)
else:
shape = (1 + n_plots // (ncols + 1), ncols)
axes, kwargs = _create_axes(axes, shape, **kwargs)
axes = axes.ravel()
for ii in range(n_plots):
axes[ii].scatter(gp.X[:, ii], gp.Y[:, 0], **kwargs)
axes[ii].set_xlabel(parameter_names[ii])
if ii % ncols == 0:
axes[ii].set_ylabel('Discrepancy')
for idx in range(len(parameter_names), len(axes)):
axes[idx].set_axis_off()
return axes
def plot_gp(gp, parameter_names, axes=None, resol=50,
const=None, bounds=None, true_params=None, **kwargs):
"""Plot pairwise relationships as a matrix with parameters vs. discrepancy.
Parameters
----------
gp : GPyRegression, required
parameter_names : list, required
Parameter names in format ['mu_0', 'mu_1', ..]
axes : plt.Axes or arraylike of plt.Axes
resol : int, optional
Resolution of the plotted grid.
const : np.array, optional
Values for parameters in plots where held constant. Defaults to minimum evidence.
bounds: list of tuples, optional
List of tuples for axis boundaries.
true_params : dict, optional
Dictionary containing parameter names with corresponding true parameter values.
Returns
-------
axes : np.array of plt.Axes
"""
n_plots = gp.input_dim
shape = (n_plots, n_plots)
axes, kwargs = _create_axes(axes, shape, **kwargs)
x_evidence = gp.X
y_evidence = gp.Y
if const is None:
const = x_evidence[np.argmin(y_evidence), :]
bounds = bounds or gp.bounds
cmap = plt.cm.get_cmap("Blues")
for ix in range(n_plots):
for jy in range(n_plots):
if ix == jy:
axes[jy, ix].scatter(x_evidence[:, ix], y_evidence, edgecolors='black', alpha=0.6)
axes[jy, ix].get_yaxis().set_ticklabels([])
axes[jy, ix].yaxis.tick_right()
axes[jy, ix].set_ylabel('Discrepancy')
axes[jy, ix].yaxis.set_label_position("right")
if true_params is not None:
axes[jy, ix].plot([true_params[parameter_names[ix]],
true_params[parameter_names[ix]]],
[min(y_evidence), max(y_evidence)],
color='red', alpha=1.0, linewidth=1)
axes[jy, ix].axis([bounds[ix][0], bounds[ix][1], min(y_evidence), max(y_evidence)])
elif ix < jy:
x1 = np.linspace(bounds[ix][0], bounds[ix][1], resol)
y1 = np.linspace(bounds[jy][0], bounds[jy][1], resol)
x, y = np.meshgrid(x1, y1)
predictors = np.tile(const, (resol * resol, 1))
predictors[:, ix] = x.ravel()
predictors[:, jy] = y.ravel()
z = gp.predict_mean(predictors).reshape(resol, resol)
axes[jy, ix].contourf(x, y, z, cmap=cmap)
axes[jy, ix].scatter(x_evidence[:, ix],
x_evidence[:, jy],
color="red",
alpha=0.7,
s=5)
if true_params is not None:
axes[jy, ix].plot([true_params[parameter_names[ix]],
true_params[parameter_names[ix]]],
[bounds[jy][0], bounds[jy][1]],
color='red', alpha=1.0, linewidth=1)
axes[jy, ix].plot([bounds[ix][0], bounds[ix][1]],
[true_params[parameter_names[jy]],
true_params[parameter_names[jy]]],
color='red', alpha=1.0, linewidth=1)
if ix == 0:
axes[jy, ix].set_ylabel(parameter_names[jy])
else:
axes[jy, ix].get_yaxis().set_ticklabels([])
axes[jy, ix].axis([bounds[ix][0], bounds[ix][1], bounds[jy][0], bounds[jy][1]])
else:
axes[jy, ix].axis('off')
if jy < n_plots-1:
axes[jy, ix].get_xaxis().set_ticklabels([])
else:
axes[jy, ix].set_xlabel(parameter_names[ix])
return axes
def plot_predicted_summaries(model=None,
summary_names=None,
n_samples=100,
seed=None,
bins=20,
axes=None,
add_observed=True,
draw_upper_triagonal=False,
**kwargs):
"""Pairplots of 1D summary statistics calculated from prior predictive distribution.
Parameters
----------
model: elfi.Model
Model which is explored.
summary_names: list of strings
Summary statistics which are pairplotted.
n_samples: int, optional
How many samples are drawn from the model.
bins : int, optional
Number of bins in histograms.
axes : one or an iterable of plt.Axes, optional
add_observed: boolean, optional
Add observed summary points in pairplots
draw_upper_triagonal: boolean, optional
Boolean indicating whether to draw symmetric upper triagonal part.
"""
dot_size = kwargs.pop('s', 8)
samples = model.generate(batch_size=n_samples, outputs=summary_names, seed=seed)
reference_value = model.generate(with_values=model.observed, outputs=summary_names)
reference_value = reference_value if add_observed else None
plot_pairs(samples,
selector=None,
bins=bins,
axes=axes,
reference_value=reference_value,
s=dot_size,
draw_upper_triagonal=draw_upper_triagonal)
class ProgressBar:
"""Progress bar monitoring the inference process.
Attributes
----------
prefix : str, optional
Prefix string
suffix : str, optional
Suffix string
decimals : int, optional
Positive number of decimals in percent complete
length : int, optional
Character length of bar
fill : str, optional
Bar fill character
scaling : int, optional
Integer used to scale current iteration and total iterations of the progress bar
"""
def __init__(self, prefix='', suffix='', decimals=1, length=100, fill='='):
"""Construct progressbar for monitoring.
Parameters
----------
prefix : str, optional
Prefix string
suffix : str, optional
Suffix string
decimals : int, optional
Positive number of decimals in percent complete
length : int, optional
Character length of bar
fill : str, optional
Bar fill character
"""
self.prefix = prefix
self.suffix = suffix
self.decimals = 1
self.length = length
self.fill = fill
self.scaling = 0
self.finished = False
def update_progressbar(self, iteration, total):
"""Print updated progress bar in console.
Parameters
----------
iteration : int
Integer indicating completed iterations
total : int
Integer indicating total number of iterations
"""
if iteration >= total:
percent = ("{0:." + str(self.decimals) + "f}").\
format(100.0)
bar = self.fill * self.length
if not self.finished:
print('%s [%s] %s%% %s' % (self.prefix, bar, percent, self.suffix))
self.finished = True
elif total - self.scaling > 0:
percent = ("{0:." + str(self.decimals) + "f}").\
format(100 * ((iteration - self.scaling) / float(total - self.scaling)))
filled_length = int(self.length * (iteration - self.scaling) // (total - self.scaling))
bar = self.fill * filled_length + '-' * (self.length - filled_length)
print('%s [%s] %s%% %s' % (self.prefix, bar, percent, self.suffix), end='\r')
def reinit_progressbar(self, scaling=0, reinit_msg=""):
"""Reinitialize new round of progress bar.
Parameters
----------
scaling : int, optional
Integer used to scale current and total iterations of the progress bar
reinit_msg : str, optional
Message printed before restarting an empty progess bar on a new line
"""
self.scaling = scaling
self.finished = False
print(reinit_msg)
|
|
from django.db import models
from django.http import HttpResponse
from django.template import Template, Context
from urllib import request
from urllib.error import HTTPError
from urllib.parse import urlencode
from django.utils import timezone
import json
from django.contrib.auth.models import User, Group
from datetime import timedelta
from django.conf import settings
import sys
from datetime import date
from datetime import timedelta
from csv import reader as csvreader
from time import sleep
from pprint import pprint
# This gets the location for the image files for the Stock model.
def get_upload_location(instance, filename):
return instance.symbol
class StockAPIError(Exception):
pass
class TradeError(Exception):
pass
def userJSON(user):
retval = {}
retval['id'] = user.id
retval['username'] = user.username
retval['players'] = [p.toShortJSON() for p in Player.objects.filter(user=user)]
return retval
def userShortJSON(user):
retval = {}
retval['id'] = user.id
retval['username'] = user.username
retval['players'] = [p.pk for p in Player.objects.filter(user=user)]
return retval
class RemoteStockData:
"""
An object that holds the data received when a stock is updated
from the web API.
@field symbol The symbol of the stock
@field price The newest price available of the stock
@field change The last available change of the stock
@field name The name of the company
"""
def __init__(self, symbol, name, price, change):
self.symbol = symbol
self.name = name
self.price = price
self.change = change
def apply(self, stockObj = None):
if not stockObj:
stockObj = Stock.objects.get(symbol=self.symbol)
stockObj.last_price = stockObj.price
stockObj.price = self.price
stockObj.change = self.change
if stockObj.company_name == "":
stockObj.company_name = self.name
stockObj.symbol = stockObj.symbol.upper()
def __str__(self):
return "{} at {}".format(self.symbol, self.price)
def __repr__(self):
return str(self)
class Stock(models.Model):
company_name = models.CharField(max_length=50, default="", blank=True)
symbol = models.CharField(max_length=4)
# This 20 minute delta is there so that the update() method will actually get a new price the first time
# it's called.
last_updated = models.DateTimeField(default=timezone.now() - timedelta(minutes=20))
image = models.ImageField(upload_to=get_upload_location, blank=True, default=settings.MEDIA_URL + "default")
price = models.DecimalField(max_digits=6, decimal_places=2, default=0)
change = models.DecimalField(max_digits=6, decimal_places=2, default=0)
last_price = models.DecimalField(max_digits=6, decimal_places=2, default=0)
def __str__(self):
return "{} ({})".format(self.company_name, self.symbol)
def has_current_price(self):
return not timezone.now() - self.last_updated > timedelta(minutes=15)
def update(self):
if not self.has_current_price():
price = Stock.remote_load_price(self.symbol)
price.apply(self)
self.last_updated = timezone.now()
self.save()
# The database normalizes the input to two decimal places and makes
# sure that the negative and positive work on the dashboard, so I
# reload it here. With any luck, it's fast, but who knows.
self.refresh_from_db()
score = self.get_score()
# Apply points to owners
for i in Player.objects.filter(stocks__pk=self.pk):
i.points += score
i.save()
def force_update(self):
self.last_updated -= timedelta(minutes=30)
self.update()
def get_price(self):
self.update()
return self.price
def get_change(self):
self.update()
return self.change
def get_score(self):
if self.last_price == 0:
return 0
return (self.price * ((self.price - self.last_price) / self.last_price)) * 100
def format_for_json(self):
return {"symbol": self.symbol, "name": self.company_name}
def toJSON(self):
retval = {}
retval['id'] = self.pk
retval['companyName'] = self.company_name
retval['symbol'] = self.symbol
retval['lastUpdated'] = self.last_updated
retval['price'] = float(self.price)
retval['change'] = float(self.change)
retval['stockSuggestions'] = [s.toShortJSON() for s in StockSuggestion.objects.filter(stock=self)]
return retval
def toShortJSON(self):
retval = {}
retval['id'] = self.pk
retval['companyName'] = self.company_name
retval['symbol'] = self.symbol
retval['lastUpdated'] = self.last_updated
retval['price'] = float(self.price)
retval['change'] = float(self.change)
retval['stockSuggestions'] = [s.pk for s in StockSuggestion.objects.filter(stock=self)]
return retval
@staticmethod
def getAPIKey():
from os import environ
return environ["ALPHAVANTAGE_KEY"]
@staticmethod
def remote_load_price(symbol):
"""
Given a symbol as a string, returns a RemoteStockData object with the given symbol's
name, price, and last change.
"""
# This is the dumbest thing I've ever heard of
SYMBOL_KEY = "symbol"
PRICE_KEY = "latestPrice"
URL = "https://api.iextrading.com/1.0/stock/market/batch?symbols={symbol}&types=quote"
url = URL.format(symbol=symbol)
try:
response = request.urlopen(url).read().decode("UTF-8")
except HTTPError as e:
print("Got an HTTPError; was {}".format(e))
print("URL was {}".format(url))
return Stock.remote_load_price(symbol)
try:
data = json.loads(response)
quote = data[symbol.upper()]["quote"]
except json.JSONDecodeError:
print("It's angry now")
sleep(1)
return Stock.remote_load_price(symbol)
except KeyError:
print("There's no stock by that name. Troubling")
# TODO: If you should ever get around to automatically eliminating non-existent stocks, this would be
# a good place to detect that.
return RemoteStockData(symbol, Stock.nameFromSymbol(symbol), Stock.objects.get(symbol=symbol).price, 0)
assert(quote[SYMBOL_KEY] == symbol)
price = float(quote[PRICE_KEY])
change = price - Stock.getYesterdaysPrice(symbol)
name = Stock.nameFromSymbol(symbol)
return RemoteStockData(quote[SYMBOL_KEY], name, price, change)
@staticmethod
def getYesterdaysPrice(symbol):
CLOSE_PRICE_KEY = "close"
url = "https://api.iextrading.com/1.0/stock/{}/chart/1m".format(symbol)
response = request.urlopen(url).read().decode("UTF-8")
jsonObj = json.loads(response)
todaysDate = date.today()
isAWeekday = lambda x: x.weekday() <= 4
if isAWeekday(todaysDate):
lastQuote = jsonObj[-2]
else:
lastQuote = jsonObj[-1]
return float(lastQuote[CLOSE_PRICE_KEY])
@staticmethod
def nameFromSymbol(symbol):
FILE_PATH = "data/fullCompanyList.csv"
with open(FILE_PATH, "r") as f:
companies = csvreader(f)
for company in companies:
if company[0] == symbol:
return company[1]
return None
class Floor(models.Model):
OPEN = "open"
CLOSED = "closed"
PERMISSIVE = "permissive"
PERMISSIVENESS_CHOICES = (
(OPEN, "Open"),
(CLOSED, "Closed"),
(PERMISSIVE, "Permissive")
)
name = models.CharField(max_length=35, unique=True)
stocks = models.ManyToManyField(Stock)
permissiveness = models.CharField(max_length=15, choices=PERMISSIVENESS_CHOICES, default=PERMISSIVE)
owner = models.ForeignKey(User, null=True)
# By passsing the model for this as a string, we can make it be dynamically set and
# thus get around the fact that we haven't actually defined that class yet (it's below)
floorPlayer = models.ForeignKey("Player", related_name="FloorPlayer", null=True)
public = models.BooleanField(default=True)
num_stocks = models.IntegerField(default=10)
def save(self, *args, **kwargs):
super(Floor, self).save(*args, **kwargs)
if self.floorPlayer == None:
floorUser = User.objects.get(groups__name__exact="Floor")
newFloorPlayer = Player.objects.create(user=floorUser, floor=self)
newFloorPlayer.save()
self.floorPlayer = newFloorPlayer
super(Floor, self).save(*args, **kwargs)
if self.owner and not Player.objects.filter(user=self.owner, floor=self).exists():
# We also need to make a player on the floor for the owner the first time
newPlayer = Player(user=self.owner, floor=self)
newPlayer.save()
super(Floor, self).save(*args, **kwargs)
def __str__(self):
return self.name
def leaders(self):
return Player.objects.filter(floor=self).exclude(user__groups__name__exact="Floor").order_by("-points")
def _render_board(self, player=None, leaderboard=False, stockboard=False, links=False):
"""
The output of this function used to be all over the place over and over, so I consolidated it here.
NB The output from this needs to be surrounded by `<table>` tags.
"""
TEMPLATE_STRING = """
{% load staticfiles %}
<tr>
{% if stockboard %}
<td style="width: 50%">
<table class="stockBoard">
{% for stock in stocks %}
<tr>
<td class="stock" id="{{ stock.symbol }}">
{% if links %}
<a class="noUnderline" href="{% url "trade" pkStock=stock.pk pkFloor=player.floor.pk %}">
{% endif %}
<span style="display: inline-block; float: left">{{ stock.symbol }}</span>
{% if links %}
</a>
{% endif %}
{% if stock.has_current_price %}
{% with change=stock.get_change %}
<span class="stockPrice {% if change > 0 %}green{% elif change == 0 %}blue{% else %}red{% endif %}">{% if change > 0 %}+{% endif %}{{ change }}</span>
{% endwith %}
{% else %}
<span class="loadingPrice stockPrice" id="{{ stock.symbol }}"><img class="loadingWheel" src="{% static "spinning-wheel.gif" %}" /></span>
{% endif %}
</td>
</tr>
{% endfor %}
</tr>
</table>
</td>
{% endif %}
{% if leaderboard %}
<td>
<table class="leaderBoard">
{% for competitor in leaders %}
<tr>
<td {% if forloop.last %}style="border-bottom: none;"{% endif %} class="playerLine" id="{{ competitor.player.pk }}" data-stocks="{{ competitor.stocks|join:"," }}">
<a class="noUnderline" href="{% url "userPage" pkUser=competitor.player.user.pk %}">
<span style="display: inline-block; float: left">{{ forloop.counter }}. {{ competitor.player.get_name }}
</span>
</a>
<span style="display: inline-block; float: right">{{ competitor.player.points }}
</span>
</td>
</tr>
{% endfor %}
</table>
</td>
{% endif %}
</tr>
<script src="{% url "stockBoardJavaScript" %}"></script>
"""
test = {p.pk : [s.symbol for s in p.stocks.all()] for p in self.leaders()}
tem = Template(TEMPLATE_STRING)
con = Context({"leaderboard" : leaderboard,
"stockboard" : stockboard,
"player": player,
"leaders": [{"player": p, "stocks": [s.symbol for s in p.stocks.all()]} for p in self.leaders()],
"stocks": self.stocks.all(),
"links": links})
return tem.render(con)
def render_leaderboard(self, player, links=False):
return self._render_board(player=player, leaderboard=True, links=links)
def render_stockboard(self, player, links=False):
return self._render_board(player=player, stockboard=True, links=links)
def render_both_boards(self, player, links=False):
return self._render_board(player=player, stockboard=True, leaderboard=True, links=links)
def to_json(self):
return {"stocks": ",".join(s.symbol for s in self.stocks.all()),
"name": self.name, "permissiveness": self.permissiveness, "pkOwner": self.owner.pk,
"pkFloorPlayer": self.floorPlayer.pk, "public": self.public, "num_stocks": self.num_stocks}
def toJSON(self):
retval = {}
retval['id'] = self.pk
retval['name'] = self.name
retval['permissiveness'] = self.permissiveness
retval['owner'] = userShortJSON(self.owner)
retval['floorPlayer'] = self.floorPlayer.toShortJSON()
retval['public'] = self.public
retval['numStocks'] = self.num_stocks
retval['stocks'] = [s.toShortJSON() for s in self.stocks.all()]
return retval
def toShortJSON(self):
retval = {}
retval['id'] = self.pk
retval['name'] = self.name
retval['permissiveness'] = self.permissiveness
retval['owner'] = self.owner.pk
retval['floorPlayer'] = self.floorPlayer.pk
retval['public'] = self.public
retval['numStocks'] = self.num_stocks
retval['stocks'] = [s.pk for s in self.stocks.all()]
return retval
class Player(models.Model):
"""
This model represents a specific player on a specific floor. The player account is represented by a Django `User`
object, which this references. Setting these as ForeignKeys as opposed to something else will cause this object to be
deleted if the it's `User` object or its floor is deleted.
"""
user = models.ForeignKey(User)
floor = models.ForeignKey("Floor")
stocks = models.ManyToManyField(Stock, blank=True)
points = models.IntegerField(default=0)
def __str__(self):
return "{} on {}".format(str(self.user), str(self.floor))
def get_name(self):
"""
It's possible that somebody could not have a username, perhaps, so I have this to take
care of that, and also prevent a bunch of ugly `player.user.username` calls.
"""
return self.user.username if self.user.username else self.user.email
def isFloor(self):
return "Floor" in [i.name for i in self.user.groups.all()]
def receivedTrades(self):
return Trade.objects.filter(recipient=self)
def sentTrades(self):
return Trade.objects.filter(sender=self)
def receivedRequests(self):
return StockSuggestion.objects.filter(floor__owner=self.user, floor=self.floor)
def numMessages(self):
num = self.receivedTrades().count()
if self.floor.owner == self.user and self.floor.permissiveness == 'permissive':
num += self.receivedRequests().count()
return num
def seesSuggestions(self):
"""
This returns a boolean telling whether this player needs a suggestions tab on his
dashboard tab.
"""
return self.floor.owner == self.user and self.floor.permissiveness == "permissive"
def get_floor_leaderboard(self):
return self.floor.render_leaderboard(self)
def get_floor_stockboard(self):
return self.floor.render_stockboard(self)
def get_both_floor_boards(self):
return self.floor.render_both_boards(self)
def get_floor_leaderboard_clickable(self):
return self.floor.render_leaderboard(self, links=True)
def get_floor_stockboard_clickable(self):
return self.floor.render_stockboard(self, links=True)
def get_both_floor_boards_clickable(self):
return self.floor.render_both_boards(self, links=True)
def get_users_owned_floors(self):
return Floor.objects.filter(owner=self.user)
def to_json(self):
return {"pkUser": self.user.pk, "pkFloor": self.floor.pk, "stocks": ",".join([s.symbol for s in self.stocks.all()]), "points": self.points}
def toJSON(self):
retval = {}
retval['id'] = self.pk
retval['user'] = userShortJSON(self.user)
retval['floor'] = self.floor.toShortJSON()
retval['stocks'] = [s.toShortJSON() for s in self.stocks.all()]
retval['points'] = self.points
retval['isFloor'] = self.isFloor()
retval['sentTrades'] = [t.toShortJSON() for t in Trade.objects.filter(sender=self)]
retval['receivedTrades'] = [t.toShortJSON() for t in Trade.objects.filter(recipient=self)]
retval['isFloorOwner'] = self.floor.owner == self.user
return retval
def toShortJSON(self):
retval = {}
retval['id'] = self.pk
retval['user'] = self.user.pk
retval['floor'] = self.floor.pk
retval['stocks'] = [s.pk for s in self.stocks.all()]
retval['points'] = self.points
retval['isFloor'] = self.isFloor()
retval['sentTrades'] = [t.pk for t in Trade.objects.filter(sender=self)]
retval['receivedTrades'] = [t.pk for t in Trade.objects.filter(recipient=self)]
retval['isFloorOwner'] = self.floor.owner == self.user
return retval
class Trade(models.Model):
recipient = models.ForeignKey(Player)
# recipientStocks and senderStocks are the stocks that those people have right now and will give away in the trade.
recipientStocks = models.ManyToManyField(Stock, related_name="receivingPlayerStocks")
floor = models.ForeignKey(Floor)
sender = models.ForeignKey(Player, related_name="sendingPlayer")
senderStocks = models.ManyToManyField(Stock)
date = models.DateTimeField(default=timezone.now)
def __str__(self):
return "Trade from {} to {} on {}".format(self.sender.user, self.recipient.user, self.floor)
def accept(self):
if not self.recipient.isFloor():
self.verify()
for s in [i for i in self.recipientStocks.all() if not StockSuggestion.objects.filter(floor=self.floor, stock=i)]:
self.recipient.stocks.remove(s)
self.sender.stocks.add(s)
for s in self.senderStocks.all():
self.sender.stocks.remove(s)
self.recipient.stocks.add(s)
self.sender.save()
self.recipient.save()
self.delete()
def verify(self):
if not (self.recipient.isFloor() and not self.floor.permissiveness == "closed"):
for s in self.recipientStocks.all():
if not s in self.recipient.stocks.all():
raise RuntimeError("One of the recipient stocks ({}) doesn't belong to the recipient ({})".format(s, self.recipient.user))
if not s in self.floor.stocks.all():
raise RuntimeError("One of the recipient stocks ({}) doesn't belong to the floor ({})".format(s, self.floor))
for s in self.senderStocks.all():
if not s in self.sender.stocks.all():
raise RuntimeError("One of the sender stocks ({}) doesn't belong to the sender ({})".format(s, self.recipient.user))
if not s in self.floor.stocks.all():
raise RuntimeError("One of the sender stocks ({}) doesn't belong to the floor ({})".format(s, self.floor))
if self.recipient.stocks.all().count() + self.senderStocks.all().count() - self.recipientStocks.all().count() > self.floor.num_stocks and not self.recipient.isFloor():
raise TradeError("{} will have too many stocks if this trade goes through".format(self.recipient.get_name()))
if self.sender.stocks.all().count() + self.recipientStocks.all().count() - self.senderStocks.all().count() > self.floor.num_stocks:
raise TradeError("{} will have too many stocks if this trade goes through".format(self.sender.get_name()))
if self.recipient.isFloor():
self.accept()
elif self.sender.isFloor():
raise RuntimeError("The floor sent a trade. This isn't good at all.")
def toFormDict(self):
d = {"other_user": self.sender.get_name(),
"user_stocks": ",".join(i.symbol for i in self.recipientStocks.all()),
"other_stocks": ",".join(i.symbol for i in self.senderStocks.all())}
return d
def toJSON(self):
retval = {}
retval['id'] = self.pk
retval['recipientPlayer'] = self.recipient.toShortJSON()
retval['recipientStocks'] = [s.toShortJSON() for s in self.recipientStocks.all()]
retval['senderPlayer'] = self.sender.toShortJSON()
retval['senderStocks'] = [s.toShortJSON() for s in self.senderStocks.all()]
retval['floor'] = self.floor.toShortJSON()
retval['date'] = self.date.isoformat()
return retval
def toShortJSON(self):
retval = {}
retval['id'] = self.pk
retval['recipientPlayer'] = self.recipient.pk
retval['recipientStocks'] = [s.pk for s in self.recipientStocks.all()]
retval['senderPlayer'] = self.sender.pk
retval['senderStocks'] = [s.pk for s in self.senderStocks.all()]
retval['floor'] = self.floor.pk
retval['date'] = self.date.isoformat()
return retval
class StockSuggestion(models.Model):
"""
This is what holds someone's request for a stock to be added to a permissive floor.
"""
stock = models.ForeignKey(Stock)
requesting_player = models.ForeignKey(Player)
floor = models.ForeignKey(Floor)
date = models.DateTimeField(auto_now_add=True)
def accept(self):
if not self.stock in self.floor.stocks.all():
self.floor.stocks.add(self.stock)
self.floor.save()
# Adds a stock to the floor if the person who originally wanted it already has too many.
if self.requesting_player.stocks.all().count() + 1 > self.floor.num_stocks:
self.floor.floorPlayer.stocks.add(self.stock)
self.floor.floorPlayer.save()
else:
self.requesting_player.stocks.add(self.stock)
self.requesting_player.save()
self.delete()
def isValid(self):
if self.stock in self.floor.stocks.all():
return False
if not self.requesting_player.floor == self.floor:
return False
return True
def __str__(self):
return "{} wants {} added to {}".format(self.requesting_player.get_name(), self.stock, self.floor)
def toJSON(self):
retval = {}
retval['id'] = self.pk
retval['stock'] = self.stock.toShortJSON()
retval['requestingPlayer'] = self.requesting_player.toShortJSON()
retval['floor'] = self.floor.toShortJSON()
retval['date'] = self.date.isoformat()
return retval
def toShortJSON(self):
retval = {}
retval['id'] = self.pk
retval['stock'] = self.stock.pk
retval['requestingPlayer'] = self.requesting_player.pk
retval['floor'] = self.floor.pk
retval['date'] = self.date.isoformat()
return retval
|
|
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_approx_equal, assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy.special import xlogy
from scipy.stats.contingency import (margins, expected_freq,
chi2_contingency, association)
def test_margins():
a = np.array([1])
m = margins(a)
assert_equal(len(m), 1)
m0 = m[0]
assert_array_equal(m0, np.array([1]))
a = np.array([[1]])
m0, m1 = margins(a)
expected0 = np.array([[1]])
expected1 = np.array([[1]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(12).reshape(2, 6)
m0, m1 = margins(a)
expected0 = np.array([[15], [51]])
expected1 = np.array([[6, 8, 10, 12, 14, 16]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(24).reshape(2, 3, 4)
m0, m1, m2 = margins(a)
expected0 = np.array([[[66]], [[210]]])
expected1 = np.array([[[60], [92], [124]]])
expected2 = np.array([[[60, 66, 72, 78]]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
assert_array_equal(m2, expected2)
def test_expected_freq():
assert_array_equal(expected_freq([1]), np.array([1.0]))
observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]])
e = expected_freq(observed)
assert_array_equal(e, np.ones_like(observed))
observed = np.array([[10, 10, 20], [20, 20, 20]])
e = expected_freq(observed)
correct = np.array([[12., 12., 16.], [18., 18., 24.]])
assert_array_almost_equal(e, correct)
def test_chi2_contingency_trivial():
# Some very simple tests for chi2_contingency.
# A trivial case
obs = np.array([[1, 2], [1, 2]])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 1)
assert_array_equal(obs, expected)
# A *really* trivial case: 1-D data.
obs = np.array([1, 2, 3])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 0)
assert_array_equal(obs, expected)
def test_chi2_contingency_R():
# Some test cases that were computed independently, using R.
# Rcode = \
# """
# # Data vector.
# data <- c(
# 12, 34, 23, 4, 47, 11,
# 35, 31, 11, 34, 10, 18,
# 12, 32, 9, 18, 13, 19,
# 12, 12, 14, 9, 33, 25
# )
#
# # Create factor tags:r=rows, c=columns, t=tiers
# r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
# c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3")))
# t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2")))
#
# # 3-way Chi squared test of independence
# s = summary(xtabs(data~r+c+t))
# print(s)
# """
# Routput = \
# """
# Call: xtabs(formula = data ~ r + c + t)
# Number of cases in table: 478
# Number of factors: 3
# Test for independence of all factors:
# Chisq = 102.17, df = 17, p-value = 3.514e-14
# """
obs = np.array(
[[[12, 34, 23],
[35, 31, 11],
[12, 32, 9],
[12, 12, 14]],
[[4, 47, 11],
[34, 10, 18],
[18, 13, 19],
[9, 33, 25]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 102.17, significant=5)
assert_approx_equal(p, 3.514e-14, significant=4)
assert_equal(dof, 17)
# Rcode = \
# """
# # Data vector.
# data <- c(
# #
# 12, 17,
# 11, 16,
# #
# 11, 12,
# 15, 16,
# #
# 23, 15,
# 30, 22,
# #
# 14, 17,
# 15, 16
# )
#
# # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
# r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2")))
# c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2")))
# d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2")))
# t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2")))
#
# # 4-way Chi squared test of independence
# s = summary(xtabs(data~r+c+d+t))
# print(s)
# """
# Routput = \
# """
# Call: xtabs(formula = data ~ r + c + d + t)
# Number of cases in table: 262
# Number of factors: 4
# Test for independence of all factors:
# Chisq = 8.758, df = 11, p-value = 0.6442
# """
obs = np.array(
[[[[12, 17],
[11, 16]],
[[11, 12],
[15, 16]]],
[[[23, 15],
[30, 22]],
[[14, 17],
[15, 16]]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 8.758, significant=4)
assert_approx_equal(p, 0.6442, significant=4)
assert_equal(dof, 11)
def test_chi2_contingency_g():
c = np.array([[15, 60], [15, 90]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=False)
assert_allclose(g, 2*xlogy(c, c/e).sum())
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=True)
c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
c = np.array([[10, 12, 10], [12, 10, 10]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
assert_allclose(g, 2*xlogy(c, c/e).sum())
def test_chi2_contingency_bad_args():
# Test that "bad" inputs raise a ValueError.
# Negative value in the array of observed frequencies.
obs = np.array([[-1, 10], [1, 2]])
assert_raises(ValueError, chi2_contingency, obs)
# The zeros in this will result in zeros in the array
# of expected frequencies.
obs = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, chi2_contingency, obs)
# A degenerate case: `observed` has size 0.
obs = np.empty((0, 8))
assert_raises(ValueError, chi2_contingency, obs)
def test_bad_association_args():
# Invalid Test Statistic
assert_raises(ValueError, association, [[1, 2], [3, 4]], "X")
# Invalid array shape
assert_raises(ValueError, association, [[[1, 2]], [[3, 4]]], "cramer")
# chi2_contingency exception
assert_raises(ValueError, association, [[-1, 10], [1, 2]], 'cramer')
# Invalid Array Item Data Type
assert_raises(ValueError, association, [[1, 2], ["dd", 4]], 'cramer')
@pytest.mark.parametrize('stat, expected',
[('cramer', 0.09222412010290792),
('tschuprow', 0.0775509319944633),
('pearson', 0.12932925727138758)])
def test_assoc(stat, expected):
# 2d Array
obs1 = np.array([[12, 13, 14, 15, 16],
[17, 16, 18, 19, 11],
[9, 15, 14, 12, 11]])
a = association(observed=obs1, method=stat)
assert_allclose(a, expected)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import unittest
from pybuilder import extern
from pybuilder.core import (Project, Logger, init, INITIALIZER_ATTRIBUTE,
ENVIRONMENTS_ATTRIBUTE, task, description,
Dependency, RequirementsFile)
from pybuilder.errors import MissingPropertyException
from pybuilder.utils import np, jp
from test_utils import patch
_extern = extern
class ProjectTest(unittest.TestCase):
def setUp(self):
self.project = Project(basedir="/imaginary", name="Unittest")
@patch("pybuilder.core.basename", return_value="imaginary")
def test_should_pick_directory_name_for_project_name_when_name_is_not_given(self, os_path_basename):
project = Project(basedir="/imaginary")
self.assertEqual("imaginary", project.name)
os_path_basename.assert_called_with("/imaginary")
def test_get_property_should_return_default_value_when_property_is_not_set(self):
self.assertEqual("spam", self.project.get_property("spam", "spam"))
def test_get_property_should_return_property_value_when_property_is_set(self):
self.project.set_property("spam", "eggs")
self.assertEqual("eggs", self.project.get_property("spam", "spam"))
def test_has_property_should_return_false_when_property_is_not_set(self):
self.assertFalse(self.project.has_property("spam"))
def test_has_property_should_return_true_when_property_is_set(self):
self.project.set_property("spam", "eggs")
self.assertTrue(self.project.has_property("spam"))
def test_set_property_if_unset_should_set_property_when_property_is_not_set(self):
self.project.set_property_if_unset("spam", "spam")
self.assertEqual("spam", self.project.get_property("spam"))
def test_set_property_if_unset_should_not_set_property_when_property_is_already_set(self):
self.project.set_property("spam", "eggs")
self.project.set_property_if_unset("spam", "spam")
self.assertEqual("eggs", self.project.get_property("spam"))
def test_expand_should_raise_exception_when_property_is_not_set(self):
self.assertRaises(
MissingPropertyException, self.project.expand, "$spam")
def test_expand_should_return_expanded_string_when_property_is_set(self):
self.project.set_property("spam", "eggs")
self.assertEqual("eggs", self.project.expand("$spam"))
def test_expand_should_return_expanded_string_when_two_properties_are_found_and_set(self):
self.project.set_property("spam", "spam")
self.project.set_property("eggs", "eggs")
self.assertEqual(
"spam and eggs", self.project.expand("$spam and $eggs"))
def test_expand_should_expand_property_with_value_being_an_property_expression(self):
self.project.set_property("spam", "spam")
self.project.set_property("eggs", "$spam")
self.assertEqual("spam", self.project.expand("$eggs"))
def test_expand_should_raise_exception_when_first_expansion_leads_to_property_reference_and_property_is_undefined(
self):
self.project.set_property("eggs", "$spam")
self.assertRaises(
MissingPropertyException, self.project.expand, "$eggs")
def test_expand_path_should_return_expanded_path(self):
self.project.set_property("spam", "spam")
self.project.set_property("eggs", "eggs")
self.assertEqual(np(jp(self.project.basedir, "spam", "eggs")),
self.project.expand_path("$spam/$eggs"))
def test_expand_path_should_return_expanded_path_and_additional_parts_when_additional_parts_are_given(self):
self.project.set_property("spam", "spam")
self.project.set_property("eggs", "eggs")
self.assertEqual(np(jp(self.project.basedir, "spam", "eggs", "foo", "bar")),
self.project.expand_path("$spam/$eggs", "foo", "bar"))
def test_should_raise_exception_when_getting_mandatory_propert_and_property_is_not_found(self):
self.assertRaises(MissingPropertyException,
self.project.get_mandatory_property, "i_dont_exist")
def test_should_return_property_value_when_getting_mandatory_propert_and_property_exists(self):
self.project.set_property("spam", "spam")
self.assertEqual("spam", self.project.get_mandatory_property("spam"))
def test_should_add_runtime_dependency_with_name_only(self):
self.project.depends_on("spam")
self.assertEqual(1, len(self.project.dependencies))
self.assertEqual("spam", self.project.dependencies[0].name)
self.assertEqual(None, self.project.dependencies[0].version)
def test_should_add_dependency_with_name_and_version(self):
self.project.depends_on("spam", "0.7")
self.assertEqual(1, len(self.project.dependencies))
self.assertEqual("spam", self.project.dependencies[0].name)
self.assertEqual(">=0.7", self.project.dependencies[0].version)
def test_should_add_dependency_with_name_and_version_only_once(self):
self.project.depends_on("spam", "0.7")
self.project.depends_on("spam", "0.7")
self.assertEqual(1, len(self.project.dependencies))
self.assertEqual("spam", self.project.dependencies[0].name)
self.assertEqual(">=0.7", self.project.dependencies[0].version)
class ProjectManifestTests(unittest.TestCase):
def setUp(self):
self.project = Project(basedir="/imaginary", name="Unittest")
def test_should_raise_exception_when_given_glob_pattern_is_none(self):
self.assertRaises(ValueError, self.project._manifest_include, None)
self.assertRaises(ValueError, self.project._manifest_include_directory, None, ['*'])
def test_should_raise_exception_when_given_glob_pattern_is_empty_string(self):
empty_string = " \n"
self.assertRaises(
ValueError, self.project._manifest_include, empty_string)
self.assertRaises(
ValueError, self.project._manifest_include_directory, empty_string, ['*'])
self.assertRaises(
ValueError, self.project._manifest_include_directory, 'spam', [])
self.assertRaises(
ValueError, self.project._manifest_include_directory, 'spam', [empty_string])
def test_should_add_filename_to_list_of_included_files(self):
self.project._manifest_include("spam")
self.assertEqual(["spam"], self.project.manifest_included_files)
def test_should_add_filenames_in_correct_order_to_list_of_included_files(self):
self.project._manifest_include("spam")
self.project._manifest_include("egg")
self.project._manifest_include("yadt")
self.assertEqual(
["spam", "egg", "yadt"], self.project.manifest_included_files)
def test_should_add_directory_to_list_of_includes(self):
self.project._manifest_include_directory('yadt', ('egg', 'spam',))
self.assertEqual([('yadt', ('egg', 'spam',)), ],
self.project.manifest_included_directories)
def test_should_add_directories_in_correct_order_to_list_of_includes(self):
self.project._manifest_include_directory('spam', ('*',))
self.project._manifest_include_directory('egg', ('*',))
self.project._manifest_include_directory('yadt/spam', ('*',))
self.assertEqual([('spam', ('*',)),
('egg', ('*',)),
('yadt/spam', ('*',)),
],
self.project.manifest_included_directories)
class ProjectPackageDataTests(unittest.TestCase):
def setUp(self):
self.project = Project(basedir="/imaginary", name="Unittest")
def test_should_raise_exception_when_filename_not_given(self):
self.assertRaises(
ValueError, self.project.include_file, "my_package", None)
def test_should_raise_exception_when_filename_is_empty_string(self):
self.assertRaises(
ValueError, self.project.include_file, "eggs", "\t \n")
def test_should_raise_exception_when_package_path_not_given(self):
self.assertRaises(ValueError, self.project.include_directory, None, "spam")
def test_should_raise_exception_when_package_path_is_empty_string(self):
self.assertRaises(ValueError, self.project.include_directory, "\t \n", "spam")
def test_should_raise_exception_when_patterns_list_not_given(self):
self.assertRaises(ValueError, self.project.include_directory, "spam", None)
def test_should_raise_exception_when_patterns_list_is_empty_list(self):
self.assertRaises(ValueError, self.project.include_directory, "spam", ["\t \n"])
def test_should_package_data_dictionary_is_empty(self):
self.assertEqual({}, self.project.package_data)
def test_should_add_filename_to_list_of_included_files_for_package_spam(self):
self.project.include_file("spam", "eggs")
self.assertEqual({"spam": ["eggs"]}, self.project.package_data)
def test_should_add_two_filenames_to_list_of_included_files_for_package_spam(self):
self.project.include_file("spam", "eggs")
self.project.include_file("spam", "ham")
self.assertEqual({"spam": ["eggs", "ham"]}, self.project.package_data)
def test_should_add_two_filenames_to_list_of_included_files_for_two_different_packages(self):
self.project.include_file("spam", "eggs")
self.project.include_file("monty", "ham")
self.assertEqual(
{"monty": ["ham"], "spam": ["eggs"]}, self.project.package_data)
def test_should_add_two_filenames_to_list_of_included_files_and_to_manifest(self):
self.project.include_file("spam", "eggs")
self.project.include_file("monty", "ham")
self.assertEqual(
{"monty": ["ham"], "spam": ["eggs"]}, self.project.package_data)
self.assertEqual(
[np("spam/eggs"), np("monty/ham")], self.project.manifest_included_files)
@patch("pybuilder.core.os.walk")
def test_should_add_pattern_to_list_of_included_filed_for_package_spam(self, walk):
walk.return_value = [[jp(self.project.basedir, "spam"),
("foo", "bar"),
("bacon.eggs", "bacon.noeggs")],
]
self.project.include_directory("spam", "*.eggs")
self.assertEqual({"spam": ["bacon.eggs"]}, self.project.package_data)
class ProjectDataFilesTests(unittest.TestCase):
def setUp(self):
self.project = Project(basedir="/imaginary", name="Unittest")
def test_should_return_empty_list_for_property_files_to_install(self):
self.assertEqual([], self.project.files_to_install)
def test_should_return_file_to_install(self):
self.project.install_file("destination", "filename")
self.assertEqual(
[("destination", ["filename"])], self.project.files_to_install)
def test_should_raise_exception_when_no_destination_given(self):
self.assertRaises(
ValueError, self.project.install_file, None, "Hello world.")
def test_should_raise_exception_when_no_filename_given(self):
self.assertRaises(
ValueError, self.project.install_file, "destination", None)
def test_should_raise_exception_when_filename_empty(self):
self.assertRaises(
ValueError, self.project.install_file, "destination", "\t \n")
def test_should_return_files_to_install_into_same_destination(self):
self.project.install_file("destination", "filename1")
self.project.install_file("destination", "filename2")
self.assertEqual(
[("destination", ["filename1", "filename2"])], self.project.files_to_install)
def test_should_return_files_to_install_into_different_destinations(self):
self.project.install_file("destination_a", "filename_a_1")
self.project.install_file("destination_a", "filename_a_2")
self.project.install_file("destination_b", "filename_b")
self.assertEqual([("destination_a", ["filename_a_1", "filename_a_2"]),
("destination_b", ["filename_b"])], self.project.files_to_install)
def test_should_return_files_to_install_into_different_destinations_and_add_them_to_manifest(self):
self.project.install_file("destination_a", "somepackage1/filename1")
self.project.install_file("destination_a", "somepackage2/filename2")
self.project.install_file("destination_b", "somepackage3/filename3")
self.assertEqual(
[("destination_a", ["somepackage1/filename1", "somepackage2/filename2"]),
("destination_b", ["somepackage3/filename3"])], self.project.files_to_install)
self.assertEqual(
["somepackage1/filename1", "somepackage2/filename2", "somepackage3/filename3"],
self.project.manifest_included_files)
class ProjectValidationTest(unittest.TestCase):
def setUp(self):
self.project = Project(basedir="/imaginary", name="Unittest")
def test_should_validate_empty_project(self):
validation_messages = self.project.validate()
self.assertFalse(validation_messages)
def test_should_not_validate_project_with_duplicate_dependency_but_different_versions(self):
self.project.depends_on('spam', version='1')
self.project.depends_on('spam', version='2')
validation_messages = self.project.validate()
self.assertTrue("Runtime dependency 'spam' has been defined multiple times." in validation_messages)
def test_should_not_validate_project_with_duplicate_dependency_when_version_is_given_for_one(self):
self.project.depends_on('spam')
self.project.depends_on('spam', version='2')
validation_messages = self.project.validate()
self.assertTrue("Runtime dependency 'spam' has been defined multiple times." in validation_messages)
def test_should_not_validate_project_with_duplicate_dependency_when_urls_are_different(self):
self.project.depends_on('spam', url='y')
self.project.depends_on('spam', url='x')
validation_messages = self.project.validate()
self.assertTrue(
"Runtime dependency 'spam' has been defined multiple times." in validation_messages)
def test_should_not_validate_project_with_duplicate_dependency_when_url_is_given_for_one(self):
self.project.depends_on('spam')
self.project.depends_on('spam', url='x')
validation_messages = self.project.validate()
self.assertTrue("Runtime dependency 'spam' has been defined multiple times." in validation_messages)
def test_should_not_validate_project_with_duplicate_dependency_for_more_than_two_times(self):
self.project.depends_on('spam', version='1')
self.project.depends_on('spam', version='2')
self.project.depends_on('spam', version='3')
validation_messages = self.project.validate()
self.assertTrue("Runtime dependency 'spam' has been defined multiple times." in validation_messages)
self.assertEquals(len(validation_messages), 1)
def test_should_not_validate_project_with_duplicate_build_dependency_but_different_versions(self):
self.project.build_depends_on('spam', version='1')
self.project.build_depends_on('spam', version='2')
validation_messages = self.project.validate()
self.assertTrue(
"Build dependency 'spam' has been defined multiple times." in validation_messages)
def test_should_not_validate_project_with_duplicate_build_dependency_when_version_is_given_for_one(self):
self.project.build_depends_on('spam')
self.project.build_depends_on('spam', version='2')
validation_messages = self.project.validate()
self.assertTrue("Build dependency 'spam' has been defined multiple times." in validation_messages)
def test_should_not_validate_project_with_duplicate_build_dependency_when_urls_are_different(self):
self.project.build_depends_on('spam', url='y')
self.project.build_depends_on('spam', url='x')
validation_messages = self.project.validate()
self.assertTrue("Build dependency 'spam' has been defined multiple times." in validation_messages)
def test_should_not_validate_project_with_duplicate_build_dependency_when_url_is_given_for_one(self):
self.project.build_depends_on('spam')
self.project.build_depends_on('spam', url='x')
validation_messages = self.project.validate()
self.assertTrue("Build dependency 'spam' has been defined multiple times." in validation_messages)
def test_should_not_validate_project_with_duplicate_build_dependency_for_more_than_two_times(self):
self.project.build_depends_on('spam', version='1')
self.project.build_depends_on('spam', version='2')
self.project.build_depends_on('spam', version='3')
validation_messages = self.project.validate()
self.assertTrue(
"Build dependency 'spam' has been defined multiple times." in validation_messages)
self.assertEquals(len(validation_messages), 1)
def test_should_not_validate_project_with_runtime_dependency_being_also_given_as_build_dependency(self):
self.project.depends_on('spam')
self.project.build_depends_on('spam')
validation_messages = self.project.validate()
self.assertTrue(
"Runtime dependency 'spam' has also been given as build dependency." in validation_messages)
self.assertEquals(len(validation_messages), 1)
class LoggerTest(unittest.TestCase):
class LoggerMock(Logger):
def __init__(self, threshold):
super(LoggerTest.LoggerMock, self).__init__(threshold)
self._logged = []
def _do_log(self, level, message, *arguments):
self._logged.append((level, message, arguments))
def assert_not_logged(self, level, message, *arguments):
if (level, message, arguments) in self._logged:
raise AssertionError(
"Logged %s %s %s" % (level, message, arguments))
def assert_logged(self, level, message, *arguments):
if (level, message, arguments) not in self._logged:
raise AssertionError(
"Not logged %s %s %s" % (level, message, arguments))
def test_should_log_debug_message_without_arguments(self):
logger = LoggerTest.LoggerMock(Logger.DEBUG)
logger.debug("message")
logger.assert_logged(Logger.DEBUG, "message")
def test_should_log_debug_message_without_arguments_but_percent_sign(self):
logger = LoggerTest.LoggerMock(Logger.DEBUG)
logger.debug("message with %s")
logger.assert_logged(Logger.DEBUG, "message with %s")
def test_should_log_debug_message(self):
logger = LoggerTest.LoggerMock(Logger.DEBUG)
logger.debug("message", "argument one", "argument two")
logger.assert_logged(
Logger.DEBUG, "message", "argument one", "argument two")
def test_should_log_info_message(self):
logger = LoggerTest.LoggerMock(Logger.DEBUG)
logger.info("message", "argument one", "argument two")
logger.assert_logged(
Logger.INFO, "message", "argument one", "argument two")
def test_should_log_warn_message(self):
logger = LoggerTest.LoggerMock(Logger.DEBUG)
logger.warn("message", "argument one", "argument two")
logger.assert_logged(
Logger.WARN, "message", "argument one", "argument two")
def test_should_log_error_message(self):
logger = LoggerTest.LoggerMock(Logger.DEBUG)
logger.error("message", "argument one", "argument two")
logger.assert_logged(
Logger.ERROR, "message", "argument one", "argument two")
def test_should_not_not_log_info_message_when_threshold_is_set_to_warn(self):
logger = LoggerTest.LoggerMock(Logger.WARN)
logger.info("message", "argument one", "argument two")
logger.assert_not_logged(
Logger.INFO, "message", "argument one", "argument two")
def is_callable(function_or_object):
return isinstance(function_or_object, types.FunctionType) or hasattr(function_or_object, "__call__")
class InitTest(unittest.TestCase):
def test_ensure_that_init_can_be_used_without_invocation_parenthesis(self):
@init
def fun():
pass
self.assertTrue(hasattr(fun, INITIALIZER_ATTRIBUTE))
self.assertTrue(is_callable(fun))
def test_ensure_that_init_can_be_used_with_invocation_parenthesis(self):
@init()
def fun():
pass
self.assertTrue(hasattr(fun, INITIALIZER_ATTRIBUTE))
self.assertTrue(is_callable(fun))
def test_ensure_that_init_can_be_used_with_named_arguments(self):
@init(environments="spam")
def fun():
pass
self.assertTrue(hasattr(fun, INITIALIZER_ATTRIBUTE))
self.assertTrue(hasattr(fun, ENVIRONMENTS_ATTRIBUTE))
self.assertTrue(getattr(fun, ENVIRONMENTS_ATTRIBUTE), ["spam"])
self.assertTrue(is_callable(fun))
class TaskTests(unittest.TestCase):
def test_should_name_task_when_no_description_is_used(self):
@task
def task_without_description():
pass
self.assertEqual(task_without_description._pybuilder_task, True)
self.assertEqual(task_without_description._pybuilder_name,
"task_without_description")
def test_should_name_task_when_decorator_called_with_nothing(self):
@task()
def another_task_without_description():
pass
self.assertEqual(another_task_without_description._pybuilder_task, True)
self.assertEqual(another_task_without_description._pybuilder_name,
"another_task_without_description")
def test_should_describe_task_when_description_decorator_is_used(self):
@task
@description("any-description")
def task_with_description():
pass
self.assertEqual(task_with_description._pybuilder_task, True)
self.assertEqual(task_with_description._pybuilder_description, "any-description")
def test_should_describe_named_task_when_description_decorator_is_used(self):
@task("any-task-name")
@description("any-description")
def task_with_description():
pass
self.assertEqual(task_with_description._pybuilder_task, True)
self.assertEqual(task_with_description._pybuilder_name, "any-task-name")
self.assertEqual(task_with_description._pybuilder_description, "any-description")
def test_should_describe_named_task_when_description_kwarg_is_used(self):
@task("any-task-name", description="any-description")
def task_with_description():
pass
self.assertEqual(task_with_description._pybuilder_task, True)
self.assertEqual(task_with_description._pybuilder_name, "any-task-name")
self.assertEqual(task_with_description._pybuilder_description, "any-description")
def test_should_describe_task_when_description_kwarg_is_used(self):
@task(description="any-description")
def task_with_description():
pass
self.assertEqual(task_with_description._pybuilder_task, True)
self.assertEqual(task_with_description._pybuilder_name, "task_with_description")
self.assertEqual(task_with_description._pybuilder_description, "any-description")
class RequirementsFileTests(unittest.TestCase):
def test_requirements_file_should_be_equal_to_itself(self):
requirements_file = RequirementsFile("requirements.txt")
self.assertTrue(requirements_file == requirements_file)
def test_requirements_file_should_not_be_unequal_to_itself(self):
requirements_file = RequirementsFile("requirements.txt")
self.assertFalse(requirements_file != requirements_file)
def test_requirements_file_should_not_be_equal_to_other_when_names_differ(self):
requirements_file = RequirementsFile("requirements.txt")
dev_requirements_file = RequirementsFile("requirements-dev.txt")
self.assertFalse(requirements_file == dev_requirements_file)
def test_requirements_file_should_be_unequal_to_other_when_names_differ(self):
requirements_file = RequirementsFile("requirements.txt")
dev_requirements_file = RequirementsFile("requirements-dev.txt")
self.assertTrue(requirements_file != dev_requirements_file)
def test_requirements_file_should_be_lesser_than_other_when_name_is_lesser(self):
requirements_file = RequirementsFile("requirements.txt")
dev_requirements_file = RequirementsFile("requirements-dev.txt")
self.assertTrue(requirements_file > dev_requirements_file)
class DependencyTests(unittest.TestCase):
def test_requirements_file_should_be_equal_to_itself(self):
dependency = Dependency("foo")
self.assertTrue(dependency == dependency)
def test_dependency_should_not_be_unequal_to_itself(self):
dependency = Dependency("foo")
self.assertFalse(dependency != dependency)
def test_dependency_should_not_be_equal_to_other_when_names_differ(self):
dependency = Dependency("foo")
other_dependency = Dependency("foa")
self.assertFalse(dependency == other_dependency)
def test_dependency_should_be_unequal_to_other_when_names_differ(self):
dependency = Dependency("foo")
other_dependency = Dependency("foa")
self.assertTrue(dependency != other_dependency)
def test_dependency_should_be_lesser_than_other_when_name_is_lesser(self):
dependency = Dependency("foo")
other_dependency = Dependency("foa")
self.assertTrue(dependency > other_dependency)
class DependencyAndRequirementsFileTests(unittest.TestCase):
def test_requirements_file_should_not_be_equal_to_dependency(self):
dependency = Dependency("foo")
requirements = RequirementsFile("requirements.txt")
self.assertFalse(dependency == requirements)
def test_requirements_file_should_not_be_equal_to_dependency_even_when_name_matches(self):
dependency = Dependency("foo")
requirements = RequirementsFile("foo")
self.assertFalse(dependency == requirements)
def test_requirements_file_should_be_unequal_to_dependency(self):
dependency = Dependency("foo")
requirements = RequirementsFile("requirements.txt")
self.assertTrue(dependency != requirements)
def test_requirements_file_should_be_unequal_to_dependency_even_when_name_matches(self):
dependency = Dependency("foo")
requirements = RequirementsFile("foo")
self.assertTrue(dependency != requirements)
def test_requirements_should_always_be_greater_than_dependencies(self):
dependency = Dependency("foo")
requirements = RequirementsFile("requirements.txt")
self.assertTrue(requirements > dependency)
def test_requirements_should_always_be_greater_than_dependencies_even_when_name_matches(self):
dependency = Dependency("foo")
requirements = RequirementsFile("foo")
self.assertTrue(requirements > dependency)
|
|
from __future__ import print_function
import os
import unittest
from shutil import rmtree
import stat as stat_lib
from pysftpserver.server import (SSH2_FILEXFER_ATTR_ACMODTIME,
SSH2_FILEXFER_ATTR_PERMISSIONS,
SSH2_FILEXFER_ATTR_SIZE,
SSH2_FILEXFER_VERSION, SSH2_FXF_CREAT,
SSH2_FXF_EXCL, SSH2_FXF_READ, SSH2_FXF_WRITE,
SSH2_FXP_CLOSE, SSH2_FXP_FSETSTAT,
SSH2_FXP_FSTAT, SSH2_FXP_INIT, SSH2_FXP_LSTAT,
SSH2_FXP_MKDIR, SSH2_FXP_OPEN,
SSH2_FXP_OPENDIR, SSH2_FXP_READ,
SSH2_FXP_READDIR, SSH2_FXP_READLINK,
SSH2_FXP_REMOVE, SSH2_FXP_RENAME,
SSH2_FXP_RMDIR, SSH2_FXP_SETSTAT,
SSH2_FXP_STAT, SSH2_FXP_SYMLINK,
SSH2_FXP_WRITE, SFTPException, SFTPForbidden,
SFTPNotFound, SFTPServer)
from pysftpserver.tests.utils import (get_sftpdata, get_sftphandle,
get_sftpint, get_sftpname, get_sftpstat,
sftpcmd, sftpint, sftpint64, sftpstring,
t_path)
from pysftpserver.virtualchroot import SFTPServerVirtualChroot
class ServerTest(unittest.TestCase):
def setUp(self):
os.chdir(t_path())
self.home = 'home'
if not os.path.isdir(self.home):
os.mkdir(self.home)
self.server = SFTPServer(
SFTPServerVirtualChroot(self.home),
logfile=t_path('log'),
raise_on_error=True
)
def tearDown(self):
os.chdir(t_path())
rmtree(self.home)
def test_mkdir(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_MKDIR, sftpstring(b'foo'), sftpint(0))
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_MKDIR, sftpstring(b'foo'), sftpint(0))
self.assertRaises(SFTPException, self.server.process)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_RMDIR, sftpstring(b'foo')
)
self.server.process()
self.assertRaises(OSError, os.rmdir, 'foo')
def test_mkdir_forbidden(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_MKDIR, sftpstring(b'../foo'), sftpint(0))
self.assertRaises(SFTPForbidden, self.server.process)
self.server.input_queue = sftpcmd(
SSH2_FXP_MKDIR, sftpstring(b'/foo'), sftpint(0))
self.assertRaises(SFTPForbidden, self.server.process)
def test_open_already_existing(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT),
sftpint(0)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_EXCL),
sftpint(0)
)
self.assertRaises(SFTPException, self.server.process)
os.unlink('services')
def test_stat(self):
with open("/etc/services") as f:
with open("services", 'a') as f_bis:
f_bis.write(f.read())
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_STAT,
sftpstring(b'services')
)
self.server.process()
stat = get_sftpstat(self.server.output_queue)
self.assertEqual(stat['size'], os.path.getsize("/etc/services"))
self.assertEqual(stat['uid'], os.getuid())
os.unlink('services')
def test_lstat(self):
os.symlink("foo", "link")
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_LSTAT,
sftpstring(b'link')
)
self.server.process()
stat = get_sftpstat(self.server.output_queue)
self.assertEqual(stat['size'], len("foo"))
self.assertEqual(stat['uid'], os.getuid())
os.unlink('link')
def test_fstat(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT),
sftpint(0)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_FSTAT,
sftpstring(handle)
)
self.server.process()
stat = get_sftpstat(self.server.output_queue)
self.assertEqual(stat['size'], 0)
self.assertEqual(stat['uid'], os.getuid())
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
os.unlink('services')
def test_setstat(self):
atime = 1415626110
mtime = 1415626120
size = 10**2
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE),
sftpint(0)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
etc_services = open('/etc/services', 'rb').read()
self.server.input_queue = sftpcmd(
SSH2_FXP_WRITE,
sftpstring(handle),
sftpint64(0),
sftpstring(etc_services)
)
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_SETSTAT,
sftpstring(b'services'),
sftpint(
SSH2_FILEXFER_ATTR_SIZE |
SSH2_FILEXFER_ATTR_PERMISSIONS |
SSH2_FILEXFER_ATTR_ACMODTIME
),
sftpint64(size), # 1000 bytes
sftpint(33152), # 0o100600
sftpint(atime),
sftpint(mtime)
)
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
self.assertEqual(
0o600,
stat_lib.S_IMODE(os.lstat('services').st_mode)
)
self.assertEqual(
atime,
os.lstat('services').st_atime
)
self.assertEqual(
mtime,
os.lstat('services').st_mtime
)
self.assertEqual(
size,
os.lstat('services').st_size
)
os.unlink('services')
def test_fsetstat(self):
atime = 1415626110
mtime = 1415626120
size = 10**2
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE),
sftpint(0)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
etc_services = open('/etc/services', 'rb').read()
self.server.input_queue = sftpcmd(
SSH2_FXP_WRITE,
sftpstring(handle),
sftpint64(0),
sftpstring(etc_services)
)
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_FSETSTAT,
sftpstring(handle),
sftpint(
SSH2_FILEXFER_ATTR_SIZE |
SSH2_FILEXFER_ATTR_PERMISSIONS |
SSH2_FILEXFER_ATTR_ACMODTIME
),
sftpint64(size), # 1000 bytes
sftpint(33152), # 0o100600
sftpint(atime),
sftpint(mtime)
)
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
self.assertEqual(
0o600,
stat_lib.S_IMODE(os.lstat('services').st_mode)
)
self.assertEqual(
atime,
os.lstat('services').st_atime
)
self.assertEqual(
mtime,
os.lstat('services').st_mtime
)
self.assertEqual(
size,
os.lstat('services').st_size
)
os.unlink('services')
def test_open_forbidden(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN, sftpstring(
b'/etc/services'), sftpint(SSH2_FXF_CREAT), sftpint(0)
)
self.assertRaises(SFTPForbidden, self.server.process)
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN, sftpstring(
b'../../foo'), sftpint(SSH2_FXF_CREAT), sftpint(0)
)
self.assertRaises(SFTPForbidden, self.server.process)
def test_read_subdir(self):
f = {b'.', b'..', b'bar'} # files inside foo
os.mkdir("foo")
foobar_path = os.path.join("foo", "bar")
with open(foobar_path, 'a') as stream:
print("foobar", file=stream)
# bar_size = os.lstat(foobar_path).st_size
self.server.input_queue = sftpcmd(
SSH2_FXP_OPENDIR,
sftpstring(b'foo')
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
l = set()
while (True):
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READDIR,
sftpstring(handle),
)
try:
self.server.process()
filename = get_sftpname(self.server.output_queue)
l.add(filename)
except:
break
self.assertEqual(l, f)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle),
)
self.server.process()
rmtree("foo")
def test_remove(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE),
sftpint(SSH2_FILEXFER_ATTR_PERMISSIONS),
sftpint(0o644)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
self.server.input_queue = sftpcmd(
SSH2_FXP_REMOVE,
sftpstring(b'services'),
sftpint(0)
)
self.server.process()
def test_rename(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE),
sftpint(SSH2_FILEXFER_ATTR_PERMISSIONS),
sftpint(0o644)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle),
)
self.server.process()
self.server.input_queue = sftpcmd(
SSH2_FXP_RENAME,
sftpstring(b'services'),
sftpstring(b'other_services'),
)
self.server.process()
self.assertIn('other_services', os.listdir('.'))
def test_remove_notfound(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_REMOVE,
sftpstring(b'services'),
sftpint(0)
)
self.assertRaises(SFTPNotFound, self.server.process)
def test_remove_forbidden(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_REMOVE,
sftpstring(b'/etc/services'),
sftpint(0)
)
self.assertRaises(SFTPForbidden, self.server.process)
def test_rename_forbidden(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_RENAME,
sftpstring(b'services'),
sftpstring(b'/etc/other_services'),
)
self.assertRaises(SFTPForbidden, self.server.process)
self.server.input_queue = sftpcmd(
SSH2_FXP_RENAME,
sftpstring(b'/etc/services'),
sftpstring(b'/etc/other_services'),
)
self.assertRaises(SFTPForbidden, self.server.process)
def test_mkdir_notfound(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_MKDIR, sftpstring(b'bad/ugly'), sftpint(0))
self.assertRaises(SFTPNotFound, self.server.process)
def test_readdir(self):
f = {b'.', b'..', b'foo', b'bar'}
os.mkdir("foo")
os.close(os.open("bar", os.O_CREAT))
self.server.input_queue = sftpcmd(
SSH2_FXP_OPENDIR,
sftpstring(b'.')
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
l = set()
while (True):
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READDIR,
sftpstring(handle),
)
try:
self.server.process()
filename = get_sftpname(self.server.output_queue)
l.add(filename)
except:
break
self.assertEqual(l, f)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle),
)
self.server.process()
os.unlink("bar")
os.rmdir("foo")
def test_symlink(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_SYMLINK, sftpstring(b'bad/ugly'),
sftpstring(b'bad/ugliest'), sftpint(0))
self.assertRaises(SFTPNotFound, self.server.process)
self.server.input_queue = sftpcmd(
SSH2_FXP_SYMLINK, sftpstring(b'/bad/ugly'),
sftpstring(b'bad/ugliest'), sftpint(0))
self.assertRaises(SFTPForbidden, self.server.process)
self.server.input_queue = sftpcmd(
SSH2_FXP_SYMLINK, sftpstring(b'bad/ugly'),
sftpstring(b'/bad/ugliest'), sftpint(0))
self.assertRaises(SFTPForbidden, self.server.process)
self.server.input_queue = sftpcmd(
SSH2_FXP_SYMLINK, sftpstring(b'ugly'),
sftpstring(b'ugliest'), sftpint(0))
self.server.process()
self.assertIn('ugly', os.listdir('.'))
def test_readlink(self):
os.symlink("infound", "foo")
self.server.input_queue = sftpcmd(
SSH2_FXP_READLINK, sftpstring(b'foo'), sftpint(0))
self.server.process()
link = get_sftpname(self.server.output_queue)
self.assertEqual(link, b"infound")
def test_readdir_broken_symlink(self):
os.symlink("infound", "foo")
self.server.input_queue = sftpcmd(
SSH2_FXP_READLINK, sftpstring(b'foo'), sftpint(0))
self.server.process()
link = get_sftpname(self.server.output_queue)
self.assertEqual(link, b"infound")
self.server.output_queue = b''
f = {b'.', b'..', b'foo'}
self.server.input_queue = sftpcmd(
SSH2_FXP_OPENDIR,
sftpstring(b'.')
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
l = set()
while (True):
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READDIR,
sftpstring(handle),
)
try:
self.server.process()
filename = get_sftpname(self.server.output_queue)
l.add(filename)
except:
break
self.assertEqual(l, f)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle),
)
self.server.process()
def test_init(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_INIT, sftpint(2), sftpint(0)
)
self.server.process()
version = get_sftpint(self.server.output_queue)
self.assertEqual(version, SSH2_FILEXFER_VERSION)
def test_rmdir_notfound(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_RMDIR, sftpstring(b'bad/ugly'), sftpint(0))
self.assertRaises(SFTPNotFound, self.server.process)
def test_copy_services(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE | SSH2_FXF_READ),
sftpint(SSH2_FILEXFER_ATTR_PERMISSIONS),
sftpint(0o644)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
etc_services = open('/etc/services', 'rb').read()
etc_services_size = \
os.lstat('/etc/services').st_size # size of the whole file
self.server.input_queue = sftpcmd(
SSH2_FXP_WRITE,
sftpstring(handle),
sftpint64(0),
sftpstring(etc_services)
)
self.server.process()
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READ,
sftpstring(handle),
sftpint64(0),
sftpint(
etc_services_size
)
)
self.server.process()
data = get_sftpdata(self.server.output_queue)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READ,
sftpstring(handle),
sftpint64(etc_services_size),
sftpint(1) # wait for the EOF
)
# EOF status is raised as an exception
self.assertRaises(SFTPException, self.server.process)
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
self.assertEqual(
etc_services,
open('services', 'rb').read()
)
self.assertEqual(
etc_services,
data
)
self.assertEqual(
0o644,
stat_lib.S_IMODE(os.lstat('services').st_mode)
)
self.assertEqual(
etc_services_size,
os.lstat('services').st_size
)
os.unlink('services')
@classmethod
def tearDownClass(cls):
os.unlink(t_path("log")) # comment me to see the log!
rmtree(t_path("home"), ignore_errors=True)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2018 TVB-HPC contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The model module describes neural mass models.
"""
from typing import List
import itertools
import numpy as np
import loopy as lp
import pymbolic as pm
from pymbolic.mapper.differentiator import DifferentiationMapper
from .base import BaseKernel
from .utils import simplify, vars, exprs
class BaseModel(BaseKernel):
"""
BaseModel parses attributes on subclasses, defining a networked SDE.
"""
dt = 1e-3
state = ''
input = ''
param = ''
drift = '',
diffs = '',
obsrv = '',
const = {}
auxex = []
limit = []
def __init__(self):
# TODO check dependencies etc
self.state_sym = vars(self.state)
self.input_sym = vars(self.input)
self.param_sym = vars(self.param)
self.drift_sym = exprs(self.drift)
self.diffs_sym = exprs(self.diffs)
self.obsrv_sym = exprs(self.obsrv)
@property
def indvars(self):
return np.r_[self.state_sym, self.input_sym, self.param_sym]
def partial(self, expr):
exprs = []
for var in self.indvars:
exprs.append(simplify(DifferentiationMapper(var)(expr)))
return np.array(exprs)
def prep_arrays(self, nnode: int) -> List[np.ndarray]:
"""
Prepare arrays for use with this model.
"""
dtype = np.float32
arrs: List[np.ndarray] = []
for key in 'state input param drift diffs obsrv'.split():
shape = nnode, len(getattr(self, key + '_sym'))
arrs.append(np.zeros(shape, dtype))
state = arrs[0]
for i, (lo, hi) in enumerate(self.limit):
state[:, i] = np.random.uniform(float(lo), float(hi),
size=(nnode, ))
param = arrs[2]
for i, psym in enumerate(self.param_sym):
param[:, i] = self.const[psym.name]
return arrs
# need to suspend kernel domain. but, can't batch w/o an initial domain
# real issue that we want to spatialize parameters
def kernel_domains(self) -> str:
return "{ [i_node]: 0 <= i_node < nnode }"
# another is that we need to manage var names
def kernel_dtypes(self):
dtypes = {'nnode,i_time,ntime': np.uintc}
for key in 'state input param drift diffs obsrv'.split():
dtypes[key] = np.float32
return dtypes
def kernel_data(self):
data = super().kernel_data()
# loopy can't infer bound on first dim of obsrv
shape = pm.var('ntime'), pm.var('nnode'), len(self.obsrv_sym)
data[data.index('obsrv')] = lp.GlobalArg('obsrv', shape=shape)
return data
def kernel_isns(self):
return itertools.chain(
self._insn_constants(),
self._insn_unpack(),
self._insn_auxex(),
self._insn_store(),
)
def _insn_constants(self):
fmt = '<> {key} = {val}'
for key, val in self.const.items():
if key not in self.param:
yield fmt.format(key=key, val=val)
def _insn_unpack(self):
fmt = '<> {var} = {kind}[i_node, {i}]'
for kind in 'state input param'.split():
vars = getattr(self, kind + '_sym')
for i, var in enumerate(vars):
yield fmt.format(kind=kind, var=var.name, i=i)
def _insn_auxex(self):
fmt = '<> {lhs} = {rhs}'
for lhs, rhs in self.auxex:
yield fmt.format(lhs=lhs, rhs=rhs)
def _insn_store(self):
fmt = {
'drift': '{kind}[i_node, {i}] = {expr}',
'diffs': '{kind}[i_node, {i}] = {expr}',
'obsrv': '{kind}[i_time, i_node, {i}] = {expr}',
}
for kind in 'drift diffs obsrv'.split():
exprs = getattr(self, kind + '_sym')
for i, expr in enumerate(exprs):
yield fmt[kind].format(kind=kind, expr=str(expr), i=i)
def _wrap_limit(self, svar_idx):
# generate branchless wrap around
insn = (
'{x} = ({x} < {x0}) * ({x} + {dx}) + ({x} > {x1}) * ({x} - {dx}) '
' + ({x} >= {x0}) * ({x} <= {x1}) * {x}'
)
x0, x1 = self.limit[svar_idx]
yield insn.format(
x='state[i_node, %d]' % (svar_idx, ),
x0=x0,
x1=x1,
dx=x1 - x0,
)
class _TestModel(BaseModel):
state = 'y1 y2'
input = 'i1'
param = 'a b c'
auxex = [('y1_3', 'y1 * y1 * y1')]
drift = '(y1 - y1_3/3 + y2 + e)*b', '(a - y1 + i1 + d)/b'
diffs = 'c', 'c'
obsrv = 'y1',
const = {'d': 3.0, 'e': -12.23904e-2, 'a': 1.05, 'b': 3.0, 'c': 1e-5}
limit = (-1, 1), (-1, 1)
class Kuramoto(BaseModel):
"Kuramoto model of phase synchronization."
state = 'theta'
limit = (0, 2 * np.pi),
input = 'I'
param = 'omega'
drift = 'omega + I',
diffs = 0,
obsrv = 'theta', 'sin(theta)'
const = {'omega': 1.0}
def _insn_store(self):
yield from self._wrap_limit(0)
yield from super()._insn_store()
class HMJE(BaseModel):
"Hindmarsh-Rose-Jirsa Epileptor model of seizure dynamics."
state = 'x1 y1 z x2 y2 g'
limit = (-2, 1), (20, 2), (2, 5), (-2, 0), (0, 2), (-1, 1)
input = 'c1 c2'
param = 'x0 Iext r'
drift = (
'tt * (y1 - z + Iext + Kvf * c1 + ('
' (x1 < 0)*(-a * x1 * x1 + b * x1)' # noqa: E131
'+ (x1 >= 0)*(slope - x2 + 0.6 * (z - 4)**2)' # noqa: E131
') * x1)',
'tt * (c - d * x1 * x1 - y1)',
'tt * (r * (4 * (x1 - x0) - z + Ks * c1))',
'tt * (-y2 + x2 - x2*x2*x2 + Iext2 + 2 * g - 0.3 * (z - 3.5) + Kf * c2)',
'tt * ((-y2 + (x2 >= (-0.25)) * (aa * (x2 + 0.25))) / tau)',
'tt * (-0.01 * (g - 0.1 * x1))'
)
diffs = 0, 0, 0, 0.0003, 0.0003, 0
obsrv = 'x1', 'x2', 'z', '-x1 + x2'
const = {'Iext2': 0.45, 'a': 1.0, 'b': 3.0, 'slope': 0.0, 'tt': 1.0, 'c':
1.0, 'd': 5.0, 'Kvf': 0.0, 'Ks': 0.0, 'Kf': 0.0, 'aa': 6.0, 'tau':
10.0, 'x0': -1.6, 'Iext': 3.1, 'r': 0.00035}
class RWW(BaseModel):
"Reduced Wong-Wang firing rate model."
state = 'S'
limit = (0, 1),
input = 'c'
param = 'w io'
const = {'a': 0.270, 'b': 0.108, 'd': 154.0, 'g': 0.641,
'ts': 100.0, 'J': 0.2609, 'w': 0.6, 'io': 0.33}
auxex = [
('x', 'w * J * S + io + J * c'),
('h', '(a * x - b) / (1 - exp(-d*(a*x - b)))')
]
drift = '- (S / ts) + (1 - S) * h * g',
diffs = 0.01,
obsrv = 'S',
class JansenRit(BaseModel):
"Jansen-Rit model of visual evoked potentials."
state = 'y0 y1 y2 y3 y4 y5'
limit = (-1, 1), (-500, 500), (-50, 50), (-6, 6), (-20, 20), (-500, 500)
const = {'A': 3.25, 'B': 22.0, 'a': 0.1, 'b': 0.05, 'v0': 5.52,
'nu_max': 0.0025, 'r': 0.56, 'J': 135.0, 'a_1': 1.0, 'a_2': 0.8,
'a_3': 0.25, 'a_4': 0.25, 'mu': 0.22}
input = 'lrc'
param = 'v0 r'
auxex = [
('sigm_y1_y2', '2 * nu_max / (1 + exp(r * (v0 - (y1 - y2))))'),
('sigm_y0_1', '2 * nu_max / (1 + exp(r * (v0 - (a_1 * J * y0))))'),
('sigm_y0_3', '2 * nu_max / (1 + exp(r * (v0 - (a_3 * J * y0))))'),
]
drift = (
'y3', 'y4', 'y5',
'A * a * sigm_y1_y2 - 2 * a * y3 - a**2 * y0',
'A * a * (mu + a_2 * J * sigm_y0_1 + lrc) - 2 * a * y4 - a**2 * y1',
'B * b * (a_4 * J * sigm_y0_3) - 2 * b * y5 - b**2 * y2'
)
diffs = 0, 0, 0, 0, 0, 0
obsrv = 'y0 - y1', # TODO check w/ Andreas
class Linear(BaseModel):
'Linear differential equation'
state = 'x'
limit = (-10, 10),
input = 'c'
param = 'lam'
const = {'lam': -1} # default value
drift = 'lam * x + c',
diffs = 1e-2,
obsrv = 'x',
class G2DO(BaseModel):
"Generic nonlinear 2-D (phase plane) oscillator."
state = 'W V'
limit = (-5, 5), (-5, 5)
input = 'c_0'
param = 'a'
const = {'tau': 1.0, 'I': 0.0, 'a': -2.0, 'b': -10.0, 'c': 0.0, 'd': 0.02,
'e': 3.0, 'f': 1.0, 'g': 0.0, 'alpha': 1.0, 'beta': 1.0,
'gamma': 1.0}
drift = (
'd * tau * (alpha*W - f*V**3 + e*V**2 + g*V + gamma*I + gamma*c_0)',
'd * (a + b*V + c*V**2 - beta*W) / tau'
)
diffs = 1e-3, 1e-3
obsrv = 'W', 'V'
|
|
import pytest, py
def exvalue():
return py.std.sys.exc_info()[1]
def f():
return 2
def test_assert():
try:
assert f() == 3
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_within_finally():
excinfo = py.test.raises(ZeroDivisionError, """
try:
1/0
finally:
i = 42
""")
s = excinfo.exconly()
assert py.std.re.search("division.+by zero", s) is not None
#def g():
# A.f()
#excinfo = getexcinfo(TypeError, g)
#msg = getmsg(excinfo)
#assert msg.find("must be called with A") != -1
def test_assert_multiline_1():
try:
assert (f() ==
3)
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_multiline_2():
try:
assert (f() == (4,
3)[-1])
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 ==')
def test_in():
try:
assert "hi" in [1, 2]
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 'hi' in")
def test_is():
try:
assert 1 is 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 is 2")
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_attrib():
class Foo(object):
b = 1
i = Foo()
try:
assert i.b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_attrib_inst():
class Foo(object):
b = 1
try:
assert Foo().b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_len():
l = list(range(42))
try:
assert len(l) == 100
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 42 == 100")
assert "where 42 = len([" in s
def test_assert_keyword_arg():
def f(x=3):
return False
try:
assert f(x=5)
except AssertionError:
e = exvalue()
assert "x=5" in str(e)
# These tests should both fail, but should fail nicely...
class WeirdRepr:
def __repr__(self):
return '<WeirdRepr\nsecond line>'
def bug_test_assert_repr():
v = WeirdRepr()
try:
assert v == 1
except AssertionError:
e = exvalue()
assert str(e).find('WeirdRepr') != -1
assert str(e).find('second line') != -1
assert 0
def test_assert_non_string():
try:
assert 0, ['list']
except AssertionError:
e = exvalue()
assert str(e).find("list") != -1
def test_assert_implicit_multiline():
try:
x = [1,2,3]
assert x != [1,
2, 3]
except AssertionError:
e = exvalue()
assert str(e).find('assert [1, 2, 3] !=') != -1
def test_assert_with_brokenrepr_arg():
class BrokenRepr:
def __repr__(self): 0 / 0
e = AssertionError(BrokenRepr())
if e.msg.find("broken __repr__") == -1:
py.test.fail("broken __repr__ not handle correctly")
def test_multiple_statements_per_line():
try:
a = 1; assert a == 2
except AssertionError:
e = exvalue()
assert "assert 1 == 2" in str(e)
def test_power():
try:
assert 2**3 == 7
except AssertionError:
e = exvalue()
assert "assert (2 ** 3) == 7" in str(e)
class TestView:
def setup_class(cls):
cls.View = py.test.importorskip("py._code._assertionold").View
def test_class_dispatch(self):
### Use a custom class hierarchy with existing instances
class Picklable(self.View):
pass
class Simple(Picklable):
__view__ = object
def pickle(self):
return repr(self.__obj__)
class Seq(Picklable):
__view__ = list, tuple, dict
def pickle(self):
return ';'.join(
[Picklable(item).pickle() for item in self.__obj__])
class Dict(Seq):
__view__ = dict
def pickle(self):
return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
assert Picklable(123).pickle() == '123'
assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
assert Picklable({1:2}).pickle() == '1!2'
def test_viewtype_class_hierarchy(self):
# Use a custom class hierarchy based on attributes of existing instances
class Operation:
"Existing class that I don't want to change."
def __init__(self, opname, *args):
self.opname = opname
self.args = args
existing = [Operation('+', 4, 5),
Operation('getitem', '', 'join'),
Operation('setattr', 'x', 'y', 3),
Operation('-', 12, 1)]
class PyOp(self.View):
def __viewkey__(self):
return self.opname
def generate(self):
return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
class PyBinaryOp(PyOp):
__view__ = ('+', '-', '*', '/')
def generate(self):
return '%s %s %s' % (self.args[0], self.opname, self.args[1])
codelines = [PyOp(op).generate() for op in existing]
assert codelines == ["4 + 5", "getitem('', 'join')",
"setattr('x', 'y', 3)", "12 - 1"]
def test_underscore_api():
py.code._AssertionError
py.code._reinterpret_old # used by pypy
py.code._reinterpret
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_assert_customizable_reprcompare(monkeypatch):
util = pytest.importorskip("_pytest.assertion.util")
monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
try:
assert 3 == 4
except AssertionError:
e = exvalue()
s = str(e)
assert "hello" in s
def test_assert_long_source_1():
try:
assert len == [
(None, ['somet text', 'more text']),
]
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_long_source_2():
try:
assert(len == [
(None, ['somet text', 'more text']),
])
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_raise_alias(testdir):
testdir.makepyfile("""
import sys
EX = AssertionError
def test_hello():
raise EX("hello"
"multi"
"line")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello*",
"*raise EX*",
"*1 failed*",
])
@pytest.mark.skipif("sys.version_info < (2,5)")
def test_assert_raise_subclass():
class SomeEx(AssertionError):
def __init__(self, *args):
super(SomeEx, self).__init__()
try:
raise SomeEx("hello")
except AssertionError as e:
s = str(e)
assert 're-run' not in s
assert 'could not determine' in s
def test_assert_raises_in_nonzero_of_object_pytest_issue10():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return True
try:
assert not(myany(A() < 0))
except AssertionError:
e = exvalue()
s = str(e)
assert "<MY42 object> < 0" in s
|
|
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from scipy.stats import rankdata
from ..utils import assert_all_finite
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array, check_X_y
from ..utils.multiclass import type_of_target
from ..utils.extmath import stable_cumsum
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from ..preprocessing import LabelBinarizer
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels (either {0, 1} or {-1, 1}).
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
.. [2] `Stanford Information Retrieval book
<http://nlp.stanford.edu/IR-book/html/htmledition/
evaluation-of-ranked-retrieval-results-1.html>`_
.. [3] `The PASCAL Visual Object Classes (VOC) Challenge
<http://citeseerx.ist.psu.edu/viewdoc/
download?doi=10.1.1.157.5766&rep=rep1&type=pdf>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.83...
"""
def _binary_uninterpolated_average_precision(
y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
return _average_binary_score(_binary_uninterpolated_average_precision,
y_true, y_score, average,
sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels (either {0, 1} or {-1, 1}).
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int or str, default=None
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.array_equal(classes, [0, 1]) or
np.array_equal(classes, [-1, 1]) or
np.array_equal(classes, [0]) or
np.array_equal(classes, [-1]) or
np.array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
fps = stable_cumsum(weight)[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int or str, default=None
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formatted array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Note: Our implementation's score is 1 greater than the one given in
Tsoumakas et al., 2010. This extends it to handle the degenerate case
in which an instance has 0 true labels.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = np.bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = np.bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
def dcg_score(y_true, y_score, k=5):
"""Discounted cumulative gain (DCG) at rank K.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
score : float
References
----------
.. [1] `Wikipedia entry for the Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gain = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gain / discounts)
def ndcg_score(y_true, y_score, k=5):
"""Normalized discounted cumulative gain (NDCG) at rank K.
Normalized Discounted Cumulative Gain (NDCG) measures the performance of a
recommendation system based on the graded relevance of the recommended
entities. It varies from 0.0 to 1.0, with 1.0 representing the ideal
ranking of the entities.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (true labels represended as integers).
y_score : array, shape = [n_samples, n_classes]
Predicted probabilities.
k : int
Rank.
Returns
-------
score : float
Examples
--------
>>> y_true = [1, 0, 2]
>>> y_score = [[0.15, 0.55, 0.2], [0.7, 0.2, 0.1], [0.06, 0.04, 0.9]]
>>> ndcg_score(y_true, y_score, k=2)
1.0
>>> y_score = [[0.9, 0.5, 0.8], [0.7, 0.2, 0.1], [0.06, 0.04, 0.9]]
>>> ndcg_score(y_true, y_score, k=2)
0.66666666666666663
References
----------
.. [1] `Kaggle entry for the Normalized Discounted Cumulative Gain
<https://www.kaggle.com/wiki/NormalizedDiscountedCumulativeGain>`_
"""
y_score, y_true = check_X_y(y_score, y_true)
# Make sure we use all the labels (max between the length and the higher
# number in the array)
lb = LabelBinarizer()
lb.fit(np.arange(max(np.max(y_true) + 1, len(y_true))))
binarized_y_true = lb.transform(y_true)
if binarized_y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different value ranges")
scores = []
# Iterate over each y_value_true and compute the DCG score
for y_value_true, y_value_score in zip(binarized_y_true, y_score):
actual = dcg_score(y_value_true, y_value_score, k)
best = dcg_score(y_value_true, y_value_true, k)
scores.append(actual / best)
return np.mean(scores)
|
|
import sys
import unittest
from pyramid import testing
from pyramid.config import Configurator
PY3 = sys.version_info[0] == 3
class Test_add_handler(unittest.TestCase):
def _makeOne(self, autocommit=True):
from pyramid.config import Configurator
from pyramid_handlers import add_handler
config = Configurator(autocommit=autocommit)
config.add_directive('add_handler', add_handler)
return config
def test_add_handler_action_in_route_pattern(self):
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
config.add_handler('name', '/:action', DummyHandler)
self._assertRoute(config, 'name', '/:action', 0)
self.assertEqual(len(views), 2)
view = views[0]
preds = view['custom_predicates']
self.assertEqual(len(preds), 1)
pred = preds[0]
request = testing.DummyRequest()
self.assertEqual(pred(None, request), False)
request.matchdict = {'action':'action1'}
self.assertEqual(pred(None, request), True)
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['attr'], 'action1')
self.assertEqual(view['view'], DummyHandler)
view = views[1]
preds = view['custom_predicates']
self.assertEqual(len(preds), 1)
pred = preds[0]
request = testing.DummyRequest()
self.assertEqual(pred(None, request), False)
request.matchdict = {'action':'action2'}
self.assertEqual(pred(None, request), True)
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['attr'], 'action2')
self.assertEqual(view['view'], DummyHandler)
def test_add_handler_action_in_route_pattern_with_xformer(self):
config = self._makeOne()
def x(name):
return name.upper()
config.registry.settings['pyramid_handlers.method_name_xformer'] = x
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
config.add_handler('name', '/:action', DummyHandler)
self._assertRoute(config, 'name', '/:action', 0)
self.assertEqual(len(views), 2)
view = views[0]
preds = view['custom_predicates']
self.assertEqual(len(preds), 1)
pred = preds[0]
request = testing.DummyRequest()
self.assertEqual(pred(None, request), False)
request.matchdict = {'action':'ACTION1'}
self.assertEqual(pred(None, request), True)
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['attr'], 'action1')
self.assertEqual(view['view'], DummyHandler)
view = views[1]
preds = view['custom_predicates']
self.assertEqual(len(preds), 1)
pred = preds[0]
request = testing.DummyRequest()
self.assertEqual(pred(None, request), False)
request.matchdict = {'action':'ACTION2'}
self.assertEqual(pred(None, request), True)
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['attr'], 'action2')
self.assertEqual(view['view'], DummyHandler)
def test_add_handler_with_view_overridden_autoexpose_None(self):
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw) # pragma: no cover
config.add_view = dummy_add_view
class MyView(DummyHandler):
__autoexpose__ = None
config.add_handler('name', '/:action', MyView)
self._assertRoute(config, 'name', '/:action', 0)
self.assertEqual(len(views), 0)
def test_add_handler_with_view_overridden_autoexpose_broken_regex1(self):
from pyramid.exceptions import ConfigurationError
config = self._makeOne()
def dummy_add_view(**kw):
""" """
config.add_view = dummy_add_view
class MyView(DummyHandler):
__autoexpose__ = 1
self.assertRaises(ConfigurationError, config.add_handler,
'name', '/{action}', MyView)
def test_add_handler_with_view_overridden_autoexpose_broken_regex2(self):
from pyramid.exceptions import ConfigurationError
config = self._makeOne()
def dummy_add_view(**kw):
""" """
config.add_view = dummy_add_view
class MyView(DummyHandler):
__autoexpose__ = 'a\\'
self.assertRaises(ConfigurationError, config.add_handler,
'name', '/{action}', MyView)
def test_add_handler_with_view_method_has_expose_config(self):
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
class MyView(object):
def action(self): # pragma: no cover
return 'response'
action.__exposed__ = [{'custom_predicates':(1,)}]
config.add_handler('name', '/:action', MyView)
self._assertRoute(config, 'name', '/:action', 0)
self.assertEqual(len(views), 1)
view = views[0]
preds = view['custom_predicates']
self.assertEqual(len(preds), 2)
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['attr'], 'action')
self.assertEqual(view['view'], MyView)
def test_add_handler_with_view_method_has_expose_config_with_action(self):
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
class MyView(object):
def action(self): # pragma: no cover
return 'response'
action.__exposed__ = [{'name':'action3000'}]
config.add_handler('name', '/:action', MyView)
self._assertRoute(config, 'name', '/:action', 0)
self.assertEqual(len(views), 1)
view = views[0]
preds = view['custom_predicates']
self.assertEqual(len(preds), 1)
pred = preds[0]
request = testing.DummyRequest()
self.assertEqual(pred(None, request), False)
request.matchdict = {'action':'action3000'}
self.assertEqual(pred(None, request), True)
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['attr'], 'action')
self.assertEqual(view['view'], MyView)
def test_add_handler_with_view_method_has_expose_config_with_action_regex(
self):
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
class MyView(object):
def action(self): # pragma: no cover
return 'response'
action.__exposed__ = [{'name':'^action3000$'}]
config.add_handler('name', '/:action', MyView)
self._assertRoute(config, 'name', '/:action', 0)
self.assertEqual(len(views), 1)
view = views[0]
preds = view['custom_predicates']
self.assertEqual(len(preds), 1)
pred = preds[0]
request = testing.DummyRequest()
self.assertEqual(pred(None, request), False)
request.matchdict = {'action':'action3000'}
self.assertEqual(pred(None, request), True)
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['attr'], 'action')
self.assertEqual(view['view'], MyView)
def test_add_handler_with_action_decorator(self):
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
class MyHandler(object):
@classmethod
def __action_decorator__(cls, fn): # pragma: no cover
return fn
def action(self): # pragma: no cover
return 'response'
config.add_handler('name', '/{action}', MyHandler)
self.assertEqual(len(views), 1)
self.assertEqual(views[0]['decorator'], MyHandler.__action_decorator__)
def test_add_handler_with_action_decorator_fail_on_instancemethod(self):
config = self._makeOne()
class MyHandler(object):
def __action_decorator__(self, fn): # pragma: no cover
return fn
def action(self): # pragma: no cover
return 'response'
self.assertRaises(TypeError, config.add_handler,
'name', '/{action}', MyHandler)
def test_add_handler_doesnt_mutate_expose_dict(self):
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
exposed = [{'name':'^action3000$'}]
class MyView(object):
def action(self): # pragma: no cover
return 'response'
action.__exposed__ = exposed
config.add_handler('name', '/{action}', MyView)
self.assertEqual(exposed[0], {'name':'^action3000$'}) # not mutated
def test_add_handler_with_action_and_action_in_path(self):
from pyramid.exceptions import ConfigurationError
config = self._makeOne()
self.assertRaises(ConfigurationError, config.add_handler,
'name', '/{action}', DummyHandler, action='abc')
def test_add_handler_with_explicit_action(self):
config = self._makeOne()
class DummyHandler(object):
def index(self): pass
index.__exposed__ = [{'a':'1'}]
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
config.add_handler('name', '/abc', DummyHandler, action='index')
self.assertEqual(len(views), 1)
view = views[0]
self.assertEqual(view['a'], '1')
self.assertEqual(view['attr'], 'index')
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['view'], DummyHandler)
def test_add_handler_with_implicit_action(self):
config = self._makeOne()
class DummyHandler(object):
def __call__(self): pass
__call__.__exposed__ = [{'a':'1'}]
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
config.add_handler('name', '/abc', DummyHandler)
self.assertEqual(len(views), 1)
view = views[0]
self.assertEqual(view['a'], '1')
self.assertEqual(view['attr'], None)
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['view'], DummyHandler)
def test_add_handler_with_multiple_action(self):
config = self._makeOne()
class DummyHandler(object):
def index(self): pass
def create(self): pass
create.__exposed__ = [{'name': 'index'}]
views = []
def dummy_add_view(**kw):
views.append(kw)
config.add_view = dummy_add_view
config.add_handler('name', '/abc', DummyHandler, action='index')
self.assertEqual(len(views), 2)
view = views[0]
self.assertEqual(view['attr'], 'create')
self.assertEqual(view['route_name'], 'name')
self.assertEqual(view['view'], DummyHandler)
view = views[1]
self.assertEqual(view['attr'], 'index')
def test_add_handler_string(self):
views = []
config = self._makeOne()
def dummy_add_view(**kw):
views.append(kw)
class DummyHandler(object):
def one(self): pass
config.add_view = dummy_add_view
config.add_handler('name', '/abc', DummyHandler)
self.assertEqual(len(views), 1)
view = views[0]
self.assertEqual(view['view'], DummyHandler)
def test_add_handler_pattern_None_no_previous_route(self):
from pyramid.exceptions import ConfigurationError
config = self._makeOne()
self.assertRaises(ConfigurationError, config.add_handler,
'name', None, 'pyramid')
def test_add_handler_pattern_None_with_previous_route(self):
from pyramid.exceptions import ConfigurationError
config = self._makeOne()
config.add_route('name', ':def')
self.assertRaises(ConfigurationError, config.add_handler,
'name', None, 'pyramid')
def test_add_handler_explicit_action_lacking(self):
config = self._makeOne()
views = []
def dummy_add_view(**kw): views.append(kw)
class DummyHandler(object):
def one(self): pass
config.add_view = dummy_add_view # shouldn't be called
config.add_handler('name', ':def', DummyHandler, action='two')
self.assertEqual(len(views), 0)
def test_add_handler_explicit_action_and_extra_exposed(self):
config = self._makeOne()
views = []
def dummy_add_view(**kw): views.append(kw)
class DummyHandler(object):
def two(self): pass
two.__exposed__ = [{'name':'one'}]
config.add_view = dummy_add_view # shouldn't be called
config.add_handler('name', ':def', DummyHandler, action='two')
self.assertEqual(len(views), 1)
view = views[0]
self.assertEqual(view['view'], DummyHandler)
def test_add_handler_with_view_permission_and_action_in_path(self):
from pyramid_handlers import action
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw) # pragma: no cover
config.add_view = dummy_add_view
class MyView(DummyHandler):
@action(permission='different_perm')
def action_with_non_default_permission(self): # pragma: no cover
return 'My permission is different!'
config.add_handler('name', '/{action}', MyView, view_permission='perm')
self._assertRoute(config, 'name', '/{action}', 0)
self.assertEqual(len(views), 3)
for view in views:
self.assertTrue('permission' in view)
if view['attr'] == 'action_with_non_default_permission':
self.assertEqual(view['permission'], 'different_perm')
else:
self.assertEqual(view['permission'], 'perm')
def test_add_handler_with_view_permission_and_action_as_kwarg(self):
from pyramid_handlers import action
config = self._makeOne()
views = []
def dummy_add_view(**kw):
views.append(kw) # pragma: no cover
config.add_view = dummy_add_view
class MyView(DummyHandler):
def index(self): # pragma: no cover
return 'Index'
@action(name='index', permission='different_perm')
def index2(self): # pragma: no cover
return 'Index with different permission.'
config.add_handler('name', '/', MyView, action='index',
view_permission='perm')
self._assertRoute(config, 'name', '/', 0)
self.assertEqual(len(views), 2)
for view in views:
self.assertTrue('permission' in view)
if view['attr'] == 'index':
self.assertEqual(view['permission'], 'perm')
elif view['attr'] == 'index2':
self.assertEqual(view['permission'], 'different_perm')
def _assertRoute(self, config, name, path, num_predicates=0):
from pyramid.interfaces import IRoutesMapper
mapper = config.registry.getUtility(IRoutesMapper)
routes = mapper.get_routes()
route = routes[0]
self.assertEqual(len(routes), 1)
self.assertEqual(route.name, name)
self.assertEqual(route.path, path)
self.assertEqual(len(routes[0].predicates), num_predicates)
return route
def test_conflict_add_handler(self):
class AHandler(object):
def aview(self): pass
config = self._makeOne(autocommit=False)
config.add_handler('h1', '/h1', handler=AHandler)
config.add_handler('h1', '/h1', handler=AHandler)
try:
config.commit()
except Exception as why:
c = list(self._conflictFunctions(why))
self.assertEqual(c[0], 'test_conflict_add_handler')
self.assertEqual(c[1], 'test_conflict_add_handler')
self.assertEqual(c[2], 'test_conflict_add_handler')
self.assertEqual(c[3], 'test_conflict_add_handler')
else: # pragma: no cover
raise AssertionError
def _conflictFunctions(self, e):
conflicts = e._conflicts.values()
for conflict in conflicts:
for confinst in conflict:
try:
# pyramid 1.2
yield confinst[2]
except TypeError:
yield confinst.function
class TestActionPredicate(unittest.TestCase):
def _getTargetClass(self):
from pyramid_handlers import ActionPredicate
return ActionPredicate
def _makeOne(self, action='myaction'):
return self._getTargetClass()(action)
def test_bad_action_regex_string(self):
from pyramid.exceptions import ConfigurationError
cls = self._getTargetClass()
self.assertRaises(ConfigurationError, cls, '[a-z')
def test_bad_action_regex_None(self):
from pyramid.exceptions import ConfigurationError
cls = self._getTargetClass()
self.assertRaises(ConfigurationError, cls, None)
def test___call__no_matchdict(self):
pred = self._makeOne()
request = testing.DummyRequest()
self.assertEqual(pred(None, request), False)
def test___call__no_action_in_matchdict(self):
pred = self._makeOne()
request = testing.DummyRequest()
request.matchdict = {}
self.assertEqual(pred(None, request), False)
def test___call__action_does_not_match(self):
pred = self._makeOne()
request = testing.DummyRequest()
request.matchdict = {'action':'notmyaction'}
self.assertEqual(pred(None, request), False)
def test___call__action_matches(self):
pred = self._makeOne()
request = testing.DummyRequest()
request.matchdict = {'action':'myaction'}
self.assertEqual(pred(None, request), True)
def test___call___matchdict_is_None(self):
pred = self._makeOne()
request = testing.DummyRequest()
request.matchdict = None
self.assertEqual(pred(None, request), False)
def test___hash__(self):
pred1 = self._makeOne()
pred2 = self._makeOne()
pred3 = self._makeOne(action='notthesame')
self.assertEqual(hash(pred1), hash(pred2))
self.assertNotEqual(hash(pred1), hash(pred3))
self.assertNotEqual(hash(pred2), hash(pred3))
class Test_action(unittest.TestCase):
def _makeOne(self, **kw):
from pyramid_handlers import action
return action(**kw)
def test_call_no_previous__exposed__(self):
inst = self._makeOne(a=1, b=2)
def wrapped():
""" """
result = inst(wrapped)
self.assertTrue(result is wrapped)
self.assertEqual(result.__exposed__, [{'a':1, 'b':2}])
def test_call_with_previous__exposed__(self):
inst = self._makeOne(a=1, b=2)
def wrapped():
""" """
wrapped.__exposed__ = [None]
result = inst(wrapped)
self.assertTrue(result is wrapped)
self.assertEqual(result.__exposed__, [None, {'a':1, 'b':2}])
if not PY3:
class TestHandlerDirective(unittest.TestCase):
def setUp(self):
self.config = testing.setUp(autocommit=False)
_ctx = self.config._ctx
if _ctx is None: # pragma: no cover ; will never be true under 1.2a5+
self.config._ctx = self.config._make_context()
def tearDown(self):
testing.tearDown()
def _callFUT(self, *arg, **kw):
from pyramid_handlers.zcml import handler
return handler(*arg, **kw)
def test_it(self):
from pyramid_handlers import action
from zope.interface import Interface
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IRouteRequest
reg = self.config.registry
context = DummyZCMLContext(self.config)
class Handler(object): # pragma: no cover
def __init__(self, request):
self.request = request
action(renderer='json')
def one(self):
return 'OK'
action(renderer='json')
def two(self):
return 'OK'
self._callFUT(context, 'name', '/:action', Handler)
actions = extract_actions(context.actions)
_execute_actions(actions)
request_type = reg.getUtility(IRouteRequest, 'name')
wrapped = reg.adapters.lookup(
(IViewClassifier, request_type, Interface), IView, name='')
self.assertTrue(wrapped)
def test_pattern_is_None(self):
from pyramid.exceptions import ConfigurationError
context = self.config._ctx
class Handler(object):
pass
self.assertRaises(ConfigurationError, self._callFUT,
context, 'name', None, Handler)
class Test_includeme(unittest.TestCase):
def test_it(self):
from pyramid.config import Configurator
from pyramid_handlers import add_handler
from pyramid_handlers import includeme
c = Configurator(autocommit=True)
c.include(includeme)
self.assertTrue(c.add_handler.__func__.__docobj__ is add_handler)
class DummyHandler(object): # pragma: no cover
def __init__(self, request):
self.request = request
def action1(self):
return 'response 1'
def action2(self):
return 'response 2'
try:
from pyramid.config import expand_action
dict_actions = True
except ImportError: # pragma: no cover
from zope.configuration.config import expand_action
dict_actions = False
if dict_actions:
# pyramid 1.Xsomeversion uses dictionary-based actions; the imprecision
# of which is because i'm at a sprint and figuring out exactly which
# version is less important than keeping things moving, sorry.
def extract_actions(native):
return native
else:
# some other version of pyramid uses tuple-based actions
def extract_actions(native): # pragma: no cover
L = []
for action in native:
(discriminator, callable, args, kw, includepath, info, order
) = expand_action(*action)
d = {}
d['discriminator'] = discriminator
d['callable'] = callable
d['args'] = args
d['kw'] = kw
d['order'] = order
L.append(d)
return L
def _execute_actions(actions):
for action in sorted(actions, key=lambda x: x['order']):
if 'callable' in action:
if action['callable']:
action['callable']()
class DummyZCMLContext(object):
config_class = Configurator
introspection = False
def __init__(self, config):
if hasattr(config, '_make_context'): # pragma: no cover
# 1.0, 1.1 b/c
config._ctx = config._make_context()
self.registry = config.registry
self.package = config.package
self.autocommit = config.autocommit
self.route_prefix = getattr(config, 'route_prefix', None)
self.basepath = getattr(config, 'basepath', None)
self.includepath = getattr(config, 'includepath', ())
self.info = getattr(config, 'info', '')
self.actions = config._ctx.actions
self._ctx = config._ctx
def action(self, *arg, **kw): # pragma: no cover
self._ctx.action(*arg, **kw)
|
|
from itertools import combinations as combinations
from operator import attrgetter
import Player as P
###
class PokerPool(P.Player):
'''Derived class for pool of common cards'''
max_cards = 5
def __init__(self, name):
P.Player.__init__(self, name)
self.hand.max_cards = self.max_cards
###
class PokerHand():
'''Class for finding best hand with pool'''
max_cards = 5
#_____________________________
def __init__(self, hand, pool):
self.hand = hand.cards
self.pool = pool.cards
self.score = 0
#_____________________________
def is_tie(self, score, rank_cards, kicker_cards):
'''
Returns true if score is same, rank cards are identical,
and kicker cards are identical
'''
if score != self.score:
return False
if rank_cards != self.rank_cards:
return False
if kicker_cards != self.kicker_cards:
return False
return True
#_______________________
def is_better(self, score, rank_cards, kicker_cards):
'''Returns true if input score, rank, kicker
is better than current hand
'''
if score > self.score:
return True
elif score == self.score:
# Better rank of hand (e.g. KK vs QQ) FIXME be careful about two or more rank cards, order
if rank_cards > self.rank_cards:
return True
# Better kickers (e.g. KK, Ace High vs KK, J high)
elif rank_cards == self.rank_cards and kicker_cards > self.kicker_cards:
return True
# Current hand is better
return False
#__________________
def get_score(self):
my_poker = Poker()
card_pool = self.hand + self.pool
hands = list(combinations(card_pool, self.max_cards))
for h in hands:
i_s, i_rc, i_kc = my_poker.eval_hand(h)
if self.is_better(i_s, i_rc, i_kc):
self.update_hand(i_s, h, i_rc, i_kc)
#_______________________________
def update_hand(self, s, fh, rc, kc):
self.score = s
self.rank_cards = rc
self.kicker_cards = kc
final_hand = P.Hand()
for c in fh:
final_hand.add_card(c)
self.final_hand = final_hand
###
class Poker:
'''Class to evaluate Poker hand'''
def __init__(self):
v = []
s = []
pass
def values(self, cards):
'''Returns sorted values'''
#return sorted([c.value for c in cards], reverse=True)
return [c.value for c in cards]
def suits(self, cards):
'''Returns suits'''
return [c.suit for c in cards]
def n_kind(self, n, values):
'''Returns n-of-a-kind value if exists'''
return set( v for v in values if values.count(v) >= n)
def is_straight(self, values):
'''Returns straight, and ace-low'''
ace_low = len(set(values)) == 5 and values[0]-values[-1] == 12 and values[1] ==5
straight = (len(set(values)) == 5 and values[0]-values[-1] == 4) or ace_low
return straight, ace_low
def is_flush(self, suits):
'''Returns true if all same suit'''
return len(set(suits)) == 1
#______________________________________
def straight_flush(self, cards, rc, kc):
st, al = self.is_straight(self.v)
fl = self.is_flush(self.s)
# Royal Flush
if st and fl:
if self.v[-1] == 10:
sc = 10
#vAce-low straight flush
elif al and fl:
sc = 9
rc.add_card(cards[1])
# Other straight flush
elif st and fl:
sc = 9
rc.add_card(cards[0])
return sc, rc, kc
return False
#______________________________________
def four_of_a_kind(self, cards, rc, kc):
if self.k_4:
sc = 8
for c in cards:
if c.value in self.k_4: rc.add_card(c)
else: kc.add_card(c)
return sc, rc, kc
return False
#__________________________________
def full_house(self, cards, rc, kc):
if self.k_3 and self.k_2 and len(self.k_2-self.k_3) > 0:
sc = 7
for c in cards:
if c.value in self.k_3: rc.add_card(c)
for c in cards:
if c.value in (self.k_2 - self.k_3): rc.add_card(c)
return sc, rc, kc
return False
#______________________________
def flush(self, cards, rc, kc):
if self.is_flush(self.s):
sc = 6
for c in cards:
rc.add_card(c)
return sc, rc, kc
return False
#________________________________
def straight(self, cards, rc, kc):
st, al = self.is_straight(self.v)
if st:
sc = 5
rc.add_card(cards[1]) if al else rc.add_card(cards[0])
return sc, rc, kc
return False
#_______________________________________
def three_of_a_kind(self, cards, rc, kc):
if self.k_3:
sc = 4
for c in cards:
if c.value in self.k_3: rc.add_card(c)
else: kc.add_card(c)
return sc, rc, kc
return False
#________________________________
def pair(self, cards, rc, kc):
# Two pair
if len(self.k_2) > 1:
sc = 3
for c in cards:
if c.value == max(self.k_2): rc.add_card(c)
elif c.value not in self.k_2: kc.add_card(c)
for c in cards:
if c.value == min(self.k_2): rc.add_card(c)
# Pair
elif self.k_2:
sc = 2
for c in cards:
if c.value in self.k_2: rc.add_card(c)
else: kc.add_card(c)
# High card
else:
sc = 1
for c in cards:
if c.value == self.v[0]: rc.add_card(c)
else: kc.add_card(c)
return sc, rc, kc
#_________________________
def eval_hand(self, cards):
poker_hands = [self.straight_flush,self.four_of_a_kind,self.full_house,
self.flush,self.straight,self.three_of_a_kind,self.pair]
s_cards = sorted(cards, key=attrgetter('value','suits'), reverse=True)
self.v = self.values(s_cards)
self.s = self.suits(s_cards)
self.k_4 = self.n_kind(4, self.v)
self.k_3 = self.n_kind(3, self.v)
self.k_2 = self.n_kind(2, self.v)
rank_cards = P.Hand()
kicker_cards = P.Hand()
for ranker in poker_hands:
rank = ranker(s_cards, rank_cards, kicker_cards)
if rank: break
return rank[0], rank[1], rank[2]
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
In-memory persistent stub for the Python datastore API. Gets, queries,
and searches are implemented as in-memory scans over all entities.
Stores entities across sessions as pickled proto bufs in a single file. On
startup, all entities are read from the file and loaded into memory. On
every Put(), the file is wiped and all entities are written from scratch.
Clients can also manually Read() and Write() the file themselves.
"""
import collections
import logging
import os
import struct
import sys
import tempfile
import threading
import weakref
import cPickle as pickle
from google.appengine.api import apiproxy_stub
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_stub_util
from google.appengine.runtime import apiproxy_errors
from google.net.proto import ProtocolBuffer
from google.appengine.datastore import entity_pb
datastore_pb.Query.__hash__ = lambda self: hash(self.Encode())
class _StoredEntity(object):
"""Simple wrapper around an entity stored by the stub.
Public properties:
protobuf: Native protobuf Python object, entity_pb.EntityProto.
encoded_protobuf: Encoded binary representation of above protobuf.
"""
def __init__(self, entity):
"""Create a _StoredEntity object and store an entity.
Args:
entity: entity_pb.EntityProto to store.
"""
self.protobuf = entity
self.encoded_protobuf = entity.Encode()
class KindPseudoKind(object):
"""Pseudo-kind for schema queries.
Provides a Query method to perform the actual query.
Public properties:
name: the pseudo-kind name
"""
name = '__kind__'
def Query(self, entities, query, filters, orders):
"""Perform a query on this pseudo-kind.
Args:
entities: all the app's entities.
query: the original datastore_pb.Query.
filters: the filters from query.
orders: the orders from query.
Returns:
(results, remaining_filters, remaining_orders)
results is a list of entity_pb.EntityProto
remaining_filters and remaining_orders are the filters and orders that
should be applied in memory
"""
kind_range = datastore_stub_util.ParseKindQuery(query, filters, orders)
app_namespace_str = datastore_types.EncodeAppIdNamespace(
query.app(), query.name_space())
kinds = []
for app_namespace, kind in entities:
if app_namespace != app_namespace_str: continue
kind = kind.decode('utf-8')
if not kind_range.Contains(kind): continue
kinds.append(datastore.Entity(self.name, name=kind, _app=query.app(),
namespace=query.name_space())._ToPb())
return (kinds, [], [])
class PropertyPseudoKind(object):
"""Pseudo-kind for schema queries.
Provides a Query method to perform the actual query.
Public properties:
name: the pseudo-kind name
"""
name = '__property__'
def __init__(self, filestub):
"""Constructor.
Initializes a __property__ pseudo-kind definition.
Args:
filestub: the DatastoreFileStub instance being served by this
pseudo-kind.
"""
self.filestub = filestub
def Query(self, entities, query, filters, orders):
"""Perform a query on this pseudo-kind.
Args:
entities: all the app's entities.
query: the original datastore_pb.Query.
filters: the filters from query.
orders: the orders from query.
Returns:
(results, remaining_filters, remaining_orders)
results is a list of entity_pb.EntityProto
remaining_filters and remaining_orders are the filters and orders that
should be applied in memory
"""
property_range = datastore_stub_util.ParsePropertyQuery(query, filters,
orders)
keys_only = query.keys_only()
app_namespace_str = datastore_types.EncodeAppIdNamespace(
query.app(), query.name_space())
properties = []
if keys_only:
usekey = '__property__keys'
else:
usekey = '__property__'
for app_namespace, kind in entities:
if app_namespace != app_namespace_str: continue
app_kind = (app_namespace_str, kind)
kind = kind.decode('utf-8')
(start_cmp, end_cmp) = property_range.MapExtremes(
lambda extreme, inclusive, is_end: cmp(kind, extreme[0]))
if not((start_cmp is None or start_cmp >= 0) and
(end_cmp is None or end_cmp <= 0)):
continue
kind_properties = self.filestub._GetSchemaCache(app_kind, usekey)
if not kind_properties:
kind_properties = []
kind_key = datastore_types.Key.from_path(KindPseudoKind.name, kind,
_app=query.app(),
namespace=query.name_space())
props = collections.defaultdict(set)
for entity in entities[app_kind].values():
for prop in entity.protobuf.property_list():
prop_name = prop.name()
if (prop_name in
datastore_stub_util.GetInvisibleSpecialPropertyNames()):
continue
value_pb = prop.value()
props[prop_name].add(datastore_types.GetPropertyValueTag(value_pb))
for prop in sorted(props):
property_e = datastore.Entity(self.name, name=prop, parent=kind_key,
_app=query.app(),
namespace=query.name_space())
if not keys_only and props[prop]:
property_e['property_representation'] = [
datastore_stub_util._PROPERTY_TYPE_NAMES[tag]
for tag in sorted(props[prop])]
kind_properties.append(property_e._ToPb())
self.filestub._SetSchemaCache(app_kind, usekey, kind_properties)
def InQuery(property_e):
return property_range.Contains(
(kind, property_e.key().path().element_list()[-1].name()))
properties += filter(InQuery, kind_properties)
return (properties, [], [])
class NamespacePseudoKind(object):
"""Pseudo-kind for namespace queries.
Provides a Query method to perform the actual query.
Public properties:
name: the pseudo-kind name
"""
name = '__namespace__'
def Query(self, entities, query, filters, orders):
"""Perform a query on this pseudo-kind.
Args:
entities: all the app's entities.
query: the original datastore_pb.Query.
filters: the filters from query.
orders: the orders from query.
Returns:
(results, remaining_filters, remaining_orders)
results is a list of entity_pb.EntityProto
remaining_filters and remaining_orders are the filters and orders that
should be applied in memory
"""
namespace_range = datastore_stub_util.ParseNamespaceQuery(query, filters,
orders)
app_str = query.app()
namespaces = set()
for app_namespace, _ in entities:
(app_id, namespace) = datastore_types.DecodeAppIdNamespace(app_namespace)
if app_id == app_str and namespace_range.Contains(namespace):
namespaces.add(namespace)
namespace_entities = []
for namespace in namespaces:
if namespace:
namespace_e = datastore.Entity(self.name, name=namespace,
_app=query.app())
else:
namespace_e = datastore.Entity(self.name,
id=datastore_types._EMPTY_NAMESPACE_ID,
_app=query.app())
namespace_entities.append(namespace_e._ToPb())
return (namespace_entities, [], [])
class DatastoreFileStub(datastore_stub_util.BaseDatastore,
apiproxy_stub.APIProxyStub,
datastore_stub_util.DatastoreStub):
""" Persistent stub for the Python datastore API.
Stores all entities in memory, and persists them to a file as pickled
protocol buffers. A DatastoreFileStub instance handles a single app's data
and is backed by files on disk.
"""
def __init__(self,
app_id,
datastore_file,
history_file=None,
require_indexes=False,
service_name='datastore_v3',
trusted=False,
consistency_policy=None,
save_changes=True):
"""Constructor.
Initializes and loads the datastore from the backing files, if they exist.
Args:
app_id: string
datastore_file: string, stores all entities across sessions. Use None
not to use a file.
history_file: DEPRECATED. No-op.
require_indexes: bool, default False. If True, composite indexes must
exist in index.yaml for queries that need them.
service_name: Service name expected for all calls.
trusted: bool, default False. If True, this stub allows an app to
access the data of another app.
consistency_policy: The consistency policy to use or None to use the
default. Consistency policies can be found in
datastore_stub_util.*ConsistencyPolicy
save_changes: bool, default True. If this stub should modify
datastore_file when entities are changed.
"""
datastore_stub_util.BaseDatastore.__init__(self, require_indexes,
consistency_policy)
apiproxy_stub.APIProxyStub.__init__(self, service_name)
datastore_stub_util.DatastoreStub.__init__(self, weakref.proxy(self),
app_id, trusted)
self.__datastore_file = datastore_file
self.__save_changes = save_changes
self.__entities_by_kind = collections.defaultdict(dict)
self.__entities_by_group = collections.defaultdict(dict)
self.__entities_lock = threading.Lock()
self.__schema_cache = {}
self.__query_history = {}
self.__next_id = 1L
self.__id_lock = threading.Lock()
self.__file_lock = threading.Lock()
self._RegisterPseudoKind(KindPseudoKind())
self._RegisterPseudoKind(PropertyPseudoKind(weakref.proxy(self)))
self._RegisterPseudoKind(NamespacePseudoKind())
self.Read()
def Clear(self):
""" Clears the datastore by deleting all currently stored entities and
queries. """
self.__entities_lock.acquire()
try:
datastore_stub_util.BaseDatastore.Clear(self)
datastore_stub_util.DatastoreStub.Clear(self)
self.__entities_by_kind = collections.defaultdict(dict)
self.__entities_by_group = collections.defaultdict(dict)
self.__query_history = {}
self.__schema_cache = {}
finally:
self.__entities_lock.release()
def _GetEntityLocation(self, key):
"""Get keys to self.__entities_by_* from the given key.
Example usage:
app_kind, eg_k, k = self._GetEntityLocation(key)
self.__entities_by_kind[app_kind][k]
self.__entities_by_entity_group[eg_k][k]
Args:
key: entity_pb.Reference
Returns:
Tuple (by_kind key, by_entity_group key, entity key)
"""
app_ns = datastore_types.EncodeAppIdNamespace(key.app(), key.name_space())
kind = key.path().element_list()[-1].type()
entity_group = datastore_stub_util._GetEntityGroup(key)
eg_k = datastore_types.ReferenceToKeyValue(entity_group)
k = datastore_types.ReferenceToKeyValue(key)
return ((app_ns, kind), eg_k, k)
def _StoreEntity(self, entity, insert=False):
""" Store the given entity.
Any needed locking should be managed by the caller.
Args:
entity: The entity_pb.EntityProto to store.
insert: If we should check for existance.
"""
app_kind, eg_k, k = self._GetEntityLocation(entity.key())
assert not insert or k not in self.__entities_by_kind[app_kind]
self.__entities_by_kind[app_kind][k] = _StoredEntity(entity)
self.__entities_by_group[eg_k][k] = entity
if app_kind in self.__schema_cache:
del self.__schema_cache[app_kind]
READ_PB_EXCEPTIONS = (ProtocolBuffer.ProtocolBufferDecodeError, LookupError,
TypeError, ValueError)
READ_ERROR_MSG = ('Data in %s is corrupt or a different version. '
'Try running with the --clear_datastore flag.\n%r')
READ_PY250_MSG = ('Are you using FloatProperty and/or GeoPtProperty? '
'Unfortunately loading float values from the datastore '
'file does not work with Python 2.5.0. '
'Please upgrade to a newer Python 2.5 release or use '
'the --clear_datastore flag.\n')
def Read(self):
""" Reads the datastore and history files into memory.
The in-memory query history is cleared, but the datastore is *not*
cleared; the entities in the files are merged into the entities in memory.
If you want them to overwrite the in-memory datastore, call Clear() before
calling Read().
If the datastore file contains an entity with the same app name, kind, and
key as an entity already in the datastore, the entity from the file
overwrites the entity in the datastore.
Also sets __next_id to one greater than the highest id allocated so far.
"""
if self.__datastore_file and self.__datastore_file != '/dev/null':
for encoded_entity in self.__ReadPickled(self.__datastore_file):
try:
entity = entity_pb.EntityProto(encoded_entity)
except self.READ_PB_EXCEPTIONS, e:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.INTERNAL_ERROR,
self.READ_ERROR_MSG % (self.__datastore_file, e))
except struct.error, e:
if (sys.version_info[0:3] == (2, 5, 0)
and e.message.startswith('unpack requires a string argument')):
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.INTERNAL_ERROR,
self.READ_PY250_MSG + self.READ_ERROR_MSG %
(self.__datastore_file, e))
else:
raise
self._StoreEntity(entity)
last_path = entity.key().path().element_list()[-1]
if last_path.has_id() and last_path.id() >= self.__next_id:
self.__next_id = last_path.id() + 1
def Write(self):
""" Writes out the datastore and history files. Be careful! If the files
already exist, this method overwrites them!
"""
self.__WriteDatastore()
def __WriteDatastore(self):
""" Writes out the datastore file. Be careful! If the file already exists,
this method overwrites it!
"""
if (self.__datastore_file and self.__datastore_file != '/dev/null' and
self.__save_changes):
encoded = []
for kind_dict in self.__entities_by_kind.values():
encoded.extend(entity.encoded_protobuf for entity in kind_dict.values())
self.__WritePickled(encoded, self.__datastore_file)
def __ReadPickled(self, filename):
"""Reads a pickled object from the given file and returns it.
"""
self.__file_lock.acquire()
try:
try:
if filename and filename != '/dev/null' and os.path.isfile(filename):
return pickle.load(open(filename, 'rb'))
else:
logging.warning('Could not read datastore data from %s', filename)
except (AttributeError, LookupError, ImportError, NameError, TypeError,
ValueError, struct.error, pickle.PickleError), e:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.INTERNAL_ERROR,
'Could not read data from %s. Try running with the '
'--clear_datastore flag. Cause:\n%r' % (filename, e))
finally:
self.__file_lock.release()
return []
def __WritePickled(self, obj, filename):
"""Pickles the object and writes it to the given file.
"""
if not filename or filename == '/dev/null' or not obj:
return
descriptor, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename))
tmpfile = os.fdopen(descriptor, 'wb')
pickler = pickle.Pickler(tmpfile, protocol=1)
pickler.fast = True
pickler.dump(obj)
tmpfile.close()
self.__file_lock.acquire()
try:
try:
os.rename(tmp_filename, filename)
except OSError:
try:
os.remove(filename)
except:
pass
os.rename(tmp_filename, filename)
finally:
self.__file_lock.release()
def MakeSyncCall(self, service, call, request, response):
""" The main RPC entry point. service must be 'datastore_v3'."""
self.assertPbIsInitialized(request)
super(DatastoreFileStub, self).MakeSyncCall(service,
call,
request,
response)
self.assertPbIsInitialized(response)
def assertPbIsInitialized(self, pb):
"""Raises an exception if the given PB is not initialized and valid."""
explanation = []
assert pb.IsInitialized(explanation), explanation
pb.Encode()
def QueryHistory(self):
"""Returns a dict that maps Query PBs to times they've been run.
"""
return dict((pb, times) for pb, times in self.__query_history.items()
if pb.app() == self._app_id)
def _GetSchemaCache(self, kind, usekey):
if kind in self.__schema_cache and usekey in self.__schema_cache[kind]:
return self.__schema_cache[kind][usekey]
else:
return None
def _SetSchemaCache(self, kind, usekey, value):
if kind not in self.__schema_cache:
self.__schema_cache[kind] = {}
self.__schema_cache[kind][usekey] = value
def _Put(self, entity, insert):
entity = datastore_stub_util.StoreEntity(entity)
self.__entities_lock.acquire()
try:
self._StoreEntity(entity, insert)
finally:
self.__entities_lock.release()
def _Get(self, key):
app_kind, _, k = self._GetEntityLocation(key)
try:
return datastore_stub_util.LoadEntity(
self.__entities_by_kind[app_kind][k].protobuf)
except KeyError:
pass
def _Delete(self, key):
app_kind, eg_k, k = self._GetEntityLocation(key)
self.__entities_lock.acquire()
try:
del self.__entities_by_kind[app_kind][k]
del self.__entities_by_group[eg_k][k]
if not self.__entities_by_kind[app_kind]:
del self.__entities_by_kind[app_kind]
if not self.__entities_by_group[eg_k]:
del self.__entities_by_group[eg_k]
del self.__schema_cache[app_kind]
except KeyError:
pass
finally:
self.__entities_lock.release()
def _GetEntitiesInEntityGroup(self, entity_group):
eg_k = datastore_types.ReferenceToKeyValue(entity_group)
return self.__entities_by_group[eg_k].copy()
def _GetQueryCursor(self, query, filters, orders):
app_id = query.app()
namespace = query.name_space()
pseudo_kind = None
if query.has_kind() and query.kind() in self._pseudo_kinds:
pseudo_kind = self._pseudo_kinds[query.kind()]
self.__entities_lock.acquire()
try:
app_ns = datastore_types.EncodeAppIdNamespace(app_id, namespace)
if pseudo_kind:
(results, filters, orders) = pseudo_kind.Query(self.__entities_by_kind,
query, filters, orders)
elif query.has_kind():
results = [entity.protobuf for entity in
self.__entities_by_kind[app_ns, query.kind()].values()]
else:
results = []
for (cur_app_ns, _), entities in self.__entities_by_kind.iteritems():
if cur_app_ns == app_ns:
results.extend(entity.protobuf for entity in entities.itervalues())
except KeyError:
results = []
finally:
self.__entities_lock.release()
return datastore_stub_util._ExecuteQuery(results, query, filters, orders)
def _AllocateIds(self, reference, size=1, max_id=None):
datastore_stub_util.Check(not (size and max_id),
'Both size and max cannot be set.')
self.__id_lock.acquire()
try:
start = self.__next_id
if size:
datastore_stub_util.Check(size > 0, 'Size must be greater than 0.')
self.__next_id += size
elif max_id:
datastore_stub_util.Check(max_id >=0,
'Max must be greater than or equal to 0.')
self.__next_id = max(self.__next_id, max_id + 1)
end = self.__next_id - 1
finally:
self.__id_lock.release()
return (start, end)
def _OnApply(self):
self.__WriteDatastore()
def _Dynamic_RunQuery(self, query, query_result):
super(DatastoreFileStub, self)._Dynamic_RunQuery(query, query_result)
clone = datastore_pb.Query()
clone.CopyFrom(query)
clone.clear_hint()
clone.clear_limit()
clone.clear_offset()
clone.clear_count()
if clone in self.__query_history:
self.__query_history[clone] += 1
else:
self.__query_history[clone] = 1
|
|
import os
import time
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import ray
from ray.util.sgd import TorchTrainer
from ray.util.sgd.utils import AverageMeterCollection
from ray.util.sgd.torch import TrainingOperator
import dgl
from dgl.data import RedditDataset
from dgl.nn.pytorch import GATConv
from torch.utils.data import DataLoader
from dgl.dataloading import NodeCollator
print("Current Path: " + os.getcwd())
torch.manual_seed(42)
# define the model class
class GAT(nn.Module):
def __init__(
self,
in_feats,
n_hidden,
n_classes,
n_layers,
n_heads,
activation,
feat_drop,
attn_drop,
negative_slope,
residual,
):
super().__init__()
self.n_layers = n_layers
self.activation = activation
self.n_hidden = n_hidden
self.n_heads = n_heads
self.n_classes = n_classes
self.convs = nn.ModuleList()
# input layer
self.convs.append(
GATConv(
(in_feats, in_feats),
n_hidden,
n_heads,
feat_drop,
attn_drop,
negative_slope,
residual,
self.activation,
)
)
# hidden layer
for _ in range(1, n_layers - 1):
# due to multi-head, the in_dim = num_hidden * num_heads
self.convs.append(
GATConv(
(n_hidden * n_heads, n_hidden * n_heads),
n_hidden,
n_heads,
feat_drop,
attn_drop,
negative_slope,
residual,
self.activation,
)
)
# output layer
self.convs.append(
GATConv(
(n_hidden * n_heads, n_hidden * n_heads),
n_classes,
n_heads,
feat_drop,
attn_drop,
negative_slope,
residual,
None,
)
)
def forward(self, blocks, x):
h = x
for i, (layer, block) in enumerate(zip(self.convs, blocks)):
h_dst = h[: block.number_of_dst_nodes()]
if i != len(self.convs) - 1:
h = layer(block, (h, h_dst)).flatten(1)
h = F.dropout(h, p=0.5, training=self.training)
else:
h = layer(block, (h, h_dst))
h = h.mean(1)
return h.log_softmax(dim=-1)
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred)
class CustomTrainingOperator(TrainingOperator):
def setup(self, config):
# load reddit data
data = RedditDataset()
g = data[0]
g.ndata["features"] = g.ndata["feat"]
g.ndata["labels"] = g.ndata["label"]
self.in_feats = g.ndata["features"].shape[1]
self.n_classes = data.num_classes
# add self loop,
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
# Create csr/coo/csc formats before launching training processes
g.create_formats_()
self.g = g
train_nid = torch.nonzero(g.ndata["train_mask"], as_tuple=True)[0]
val_nid = torch.nonzero(g.ndata["val_mask"], as_tuple=True)[0]
test_nid = torch.nonzero(g.ndata["test_mask"], as_tuple=True)[0]
self.train_nid = train_nid
self.val_nid = val_nid
self.test_nid = test_nid
# Create sampler
sampler = dgl.dataloading.MultiLayerNeighborSampler(
[int(fanout) for fanout in config["fan_out"].split(",")]
)
# Create PyTorch DataLoader for constructing blocks
collator = NodeCollator(g, train_nid, sampler)
train_dataloader = DataLoader(
collator.dataset,
collate_fn=collator.collate,
batch_size=config["batch_size"],
shuffle=False,
drop_last=False,
num_workers=config["sampling_num_workers"],
)
# Define model and optimizer, residual is set to True
model = GAT(
self.in_feats,
config["n_hidden"],
self.n_classes,
config["n_layers"],
config["n_heads"],
F.elu,
config["feat_drop"],
config["attn_drop"],
config["negative_slope"],
True,
)
self.convs = model.convs
# Define optimizer.
optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])
# Register model, optimizer, and loss.
self.model, self.optimizer = self.register(models=model, optimizers=optimizer)
# Register data loaders.
self.register_data(train_loader=train_dataloader)
def train_epoch(self, iterator, info):
meter_collection = AverageMeterCollection()
iter_tput = []
model = self.model
# for batch_idx,batch in enumerate(iterator):
for step, (input_nodes, seeds, blocks) in enumerate(iterator):
tic_step = time.time()
# do some train
optimizer = self.optimizer
device = 0
if self.use_gpu:
blocks = [block.int().to(device) for block in blocks]
batch_inputs = blocks[0].srcdata["features"]
batch_labels = blocks[-1].dstdata["labels"]
batch_pred = model(blocks, batch_inputs)
loss = F.nll_loss(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % 20 == 0:
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = (
torch.cuda.max_memory_allocated() / 1000000
if torch.cuda.is_available()
else 0
)
print(
"Epoch {:05d} | Step {:05d} | Loss {:.4f} | "
"Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU "
"{:.1f} MB".format(
info["epoch_idx"] + 1,
step,
loss.item(),
acc.item(),
np.mean(iter_tput[3:]),
gpu_mem_alloc,
)
)
status = meter_collection.summary()
return status
def validate(self, validation_loader, info):
meter_collection = AverageMeterCollection()
model = self.model
n_layers = self.config["n_layers"]
n_hidden = self.config["n_hidden"]
n_heads = self.config["n_heads"]
batch_size = self.config["batch_size"]
num_workers = self.config["sampling_num_workers"]
g = self.g
train_nid = self.train_nid
val_nid = self.val_nid
test_nid = self.test_nid
device = 0
model.eval()
with torch.no_grad():
x = g.ndata["features"]
for i, layer in enumerate(self.convs):
if i < n_layers - 1:
y = torch.zeros(
g.number_of_nodes(),
n_hidden * n_heads
if i != len(self.convs) - 1
else self.n_classes,
)
else:
y = torch.zeros(
g.number_of_nodes(),
n_hidden if i != len(self.convs) - 1 else self.n_classes,
)
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
collator = NodeCollator(g, torch.arange(g.number_of_nodes()), sampler)
dataloader = DataLoader(
collator.dataset,
collate_fn=collator.collate,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
for input_nodes, output_nodes, blocks in dataloader:
block = blocks[0]
# print("block:",block)
block = block.int().to(device)
h = x[input_nodes].to(device)
h_dst = x[output_nodes].to(device)
if i != len(self.convs) - 1:
h = layer(block, (h, h_dst)).flatten(1)
else:
h = layer(block, (h, h_dst)).mean(1)
h = h.log_softmax(dim=-1)
y[output_nodes] = h.cpu()
x = y
pred = y
labels = g.ndata["labels"]
_, val_acc, test_acc = (
compute_acc(pred[train_nid], labels[train_nid]),
compute_acc(pred[val_nid], labels[val_nid]),
compute_acc(pred[test_nid], labels[test_nid]),
)
metrics = {
"num_samples": pred.size(0),
"val_acc": val_acc.item(),
"test_acc": test_acc.item(),
}
meter_collection.update(metrics, n=metrics.pop("num_samples", 1))
status = meter_collection.summary()
return status
def run(
num_workers,
use_gpu,
num_epochs,
lr,
batch_size,
n_hidden,
n_layers,
n_heads,
fan_out,
feat_drop,
attn_drop,
negative_slope,
sampling_num_workers,
):
trainer = TorchTrainer(
training_operator_cls=CustomTrainingOperator,
num_workers=num_workers,
use_gpu=use_gpu,
backend="nccl",
config={
"lr": lr,
"batch_size": batch_size,
"n_hidden": n_hidden,
"n_layers": n_layers,
"n_heads": n_heads,
"fan_out": fan_out,
"feat_drop": feat_drop,
"attn_drop": attn_drop,
"negative_slope": negative_slope,
"sampling_num_workers": sampling_num_workers,
},
)
for i in range(num_epochs):
trainer.train()
validation_results = trainer.validate()
trainer.shutdown()
print(validation_results)
print("success!")
# Use ray.init(address="auto") if running on a Ray cluster.
if __name__ == "__main__":
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument("--num-workers", type=int, default=2)
argparser.add_argument("--use-gpu", type=bool, default=True)
argparser.add_argument("--num-epochs", type=int, default=2)
argparser.add_argument("--lr", type=float, default=0.001)
argparser.add_argument("--batch-size", type=int, default=1024)
argparser.add_argument("--n-hidden", type=int, default=128)
argparser.add_argument("--n-layers", type=int, default=2)
argparser.add_argument("--n-heads", type=int, default=4)
argparser.add_argument("--fan-out", type=str, default="10,25")
argparser.add_argument("--feat-drop", type=float, default=0.0)
argparser.add_argument("--attn-drop", type=float, default=0.0)
argparser.add_argument("--negative-slope", type=float, default=0.2)
argparser.add_argument(
"--sampling-num-workers",
type=int,
default=0,
help="Number of sampling processes. Use 0 for no extra process.",
)
argparser.add_argument(
"--address", required=False, type=str, help="The address to use for ray"
)
args = argparser.parse_args()
ray.init(address=args.address)
run(
num_workers=args.num_workers,
use_gpu=args.use_gpu,
num_epochs=args.num_epochs,
lr=args.lr,
batch_size=args.batch_size,
n_hidden=args.n_hidden,
n_layers=args.n_layers,
n_heads=args.n_heads,
fan_out=args.fan_out,
feat_drop=args.feat_drop,
attn_drop=args.attn_drop,
negative_slope=args.negative_slope,
sampling_num_workers=args.sampling_num_workers,
)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import pretend
import pytest
from pyramid.location import lineage
from pyramid.security import Allow
from warehouse.packaging.models import ProjectFactory, Dependency, DependencyKind, File
from ...common.db.packaging import (
ProjectFactory as DBProjectFactory,
ReleaseFactory as DBReleaseFactory,
FileFactory as DBFileFactory,
RoleFactory as DBRoleFactory,
)
class TestRole:
def test_role_ordering(self, db_request):
project = DBProjectFactory.create()
owner_role = DBRoleFactory.create(project=project, role_name="Owner")
maintainer_role = DBRoleFactory.create(project=project, role_name="Maintainer")
assert max([maintainer_role, owner_role]) == owner_role
class TestProjectFactory:
@pytest.mark.parametrize(("name", "normalized"), [("foo", "foo"), ("Bar", "bar")])
def test_traversal_finds(self, db_request, name, normalized):
project = DBProjectFactory.create(name=name)
root = ProjectFactory(db_request)
assert root[normalized] == project
def test_travel_cant_find(self, db_request):
project = DBProjectFactory.create()
root = ProjectFactory(db_request)
with pytest.raises(KeyError):
root[project.name + "invalid"]
class TestProject:
def test_traversal_finds(self, db_request):
project = DBProjectFactory.create()
release = DBReleaseFactory.create(project=project)
assert project[release.version] == release
def test_traversal_finds_canonical_version(self, db_request):
project = DBProjectFactory.create()
release = DBReleaseFactory.create(version="1.0", project=project)
assert project["1.0.0"] == release
def test_traversal_finds_canonical_version_if_multiple(self, db_request):
project = DBProjectFactory.create()
release = DBReleaseFactory.create(version="1.0.0", project=project)
DBReleaseFactory.create(version="1.0", project=project)
assert project["1.0.0"] == release
def test_traversal_cant_find(self, db_request):
project = DBProjectFactory.create()
with pytest.raises(KeyError):
project["1.0"]
def test_traversal_cant_find_if_multiple(self, db_request):
project = DBProjectFactory.create()
DBReleaseFactory.create(version="1.0.0", project=project)
DBReleaseFactory.create(version="1.0", project=project)
with pytest.raises(KeyError):
project["1"]
def test_doc_url_doesnt_exist(self, db_request):
project = DBProjectFactory.create()
assert project.documentation_url is None
def test_doc_url(self, pyramid_config, db_request):
db_request.route_url = pretend.call_recorder(
lambda route, **kw: "/the/docs/url/"
)
project = DBProjectFactory.create(has_docs=True)
assert project.documentation_url == "/the/docs/url/"
assert db_request.route_url.calls == [
pretend.call("legacy.docs", project=project.name)
]
def test_acl(self, db_session):
project = DBProjectFactory.create()
owner1 = DBRoleFactory.create(project=project)
owner2 = DBRoleFactory.create(project=project)
maintainer1 = DBRoleFactory.create(project=project, role_name="Maintainer")
maintainer2 = DBRoleFactory.create(project=project, role_name="Maintainer")
acls = []
for location in lineage(project):
try:
acl = location.__acl__
except AttributeError:
continue
if acl and callable(acl):
acl = acl()
acls.extend(acl)
assert acls == [
(Allow, "group:admins", "admin"),
(Allow, str(owner1.user.id), ["manage:project", "upload"]),
(Allow, str(owner2.user.id), ["manage:project", "upload"]),
(Allow, str(maintainer1.user.id), ["upload"]),
(Allow, str(maintainer2.user.id), ["upload"]),
]
class TestRelease:
def test_has_meta_true_with_keywords(self, db_session):
release = DBReleaseFactory.create(keywords="foo, bar")
assert release.has_meta
def test_has_meta_true_with_author(self, db_session):
release = DBReleaseFactory.create(author="Batman")
assert release.has_meta
release = DBReleaseFactory.create(author_email="[email protected]")
assert release.has_meta
def test_has_meta_true_with_maintainer(self, db_session):
release = DBReleaseFactory.create(maintainer="Spiderman")
assert release.has_meta
release = DBReleaseFactory.create(maintainer_email="[email protected]")
assert release.has_meta
def test_has_meta_false(self, db_session):
release = DBReleaseFactory.create()
assert not release.has_meta
@pytest.mark.parametrize(
("home_page", "download_url", "project_urls", "expected"),
[
(None, None, [], OrderedDict()),
(
"https://example.com/home/",
None,
[],
OrderedDict([("Homepage", "https://example.com/home/")]),
),
(
None,
"https://example.com/download/",
[],
OrderedDict([("Download", "https://example.com/download/")]),
),
(
"https://example.com/home/",
"https://example.com/download/",
[],
OrderedDict(
[
("Homepage", "https://example.com/home/"),
("Download", "https://example.com/download/"),
]
),
),
(
None,
None,
["Source Code,https://example.com/source-code/"],
OrderedDict([("Source Code", "https://example.com/source-code/")]),
),
(
None,
None,
["Source Code, https://example.com/source-code/"],
OrderedDict([("Source Code", "https://example.com/source-code/")]),
),
(
"https://example.com/home/",
"https://example.com/download/",
["Source Code,https://example.com/source-code/"],
OrderedDict(
[
("Homepage", "https://example.com/home/"),
("Source Code", "https://example.com/source-code/"),
("Download", "https://example.com/download/"),
]
),
),
(
"https://example.com/home/",
"https://example.com/download/",
[
"Homepage,https://example.com/home2/",
"Source Code,https://example.com/source-code/",
],
OrderedDict(
[
("Homepage", "https://example.com/home2/"),
("Source Code", "https://example.com/source-code/"),
("Download", "https://example.com/download/"),
]
),
),
(
"https://example.com/home/",
"https://example.com/download/",
[
"Source Code,https://example.com/source-code/",
"Download,https://example.com/download2/",
],
OrderedDict(
[
("Homepage", "https://example.com/home/"),
("Source Code", "https://example.com/source-code/"),
("Download", "https://example.com/download2/"),
]
),
),
(
"https://example.com/home/",
"https://example.com/download/",
[
"Homepage,https://example.com/home2/",
"Source Code,https://example.com/source-code/",
"Download,https://example.com/download2/",
],
OrderedDict(
[
("Homepage", "https://example.com/home2/"),
("Source Code", "https://example.com/source-code/"),
("Download", "https://example.com/download2/"),
]
),
),
],
)
def test_urls(self, db_session, home_page, download_url, project_urls, expected):
release = DBReleaseFactory.create(
home_page=home_page, download_url=download_url
)
for urlspec in project_urls:
db_session.add(
Dependency(
release=release,
kind=DependencyKind.project_url.value,
specifier=urlspec,
)
)
# TODO: It'd be nice to test for the actual ordering here.
assert dict(release.urls) == dict(expected)
def test_acl(self, db_session):
project = DBProjectFactory.create()
owner1 = DBRoleFactory.create(project=project)
owner2 = DBRoleFactory.create(project=project)
maintainer1 = DBRoleFactory.create(project=project, role_name="Maintainer")
maintainer2 = DBRoleFactory.create(project=project, role_name="Maintainer")
release = DBReleaseFactory.create(project=project)
acls = []
for location in lineage(release):
try:
acl = location.__acl__
except AttributeError:
continue
if acl and callable(acl):
acl = acl()
acls.extend(acl)
assert acls == [
(Allow, "group:admins", "admin"),
(Allow, str(owner1.user.id), ["manage:project", "upload"]),
(Allow, str(owner2.user.id), ["manage:project", "upload"]),
(Allow, str(maintainer1.user.id), ["upload"]),
(Allow, str(maintainer2.user.id), ["upload"]),
]
@pytest.mark.parametrize(
("home_page", "expected"),
[
(None, None),
(
"https://github.com/pypa/warehouse",
"https://api.github.com/repos/pypa/warehouse",
),
(
"https://github.com/pypa/warehouse/",
"https://api.github.com/repos/pypa/warehouse",
),
(
"https://github.com/pypa/warehouse/tree/master",
"https://api.github.com/repos/pypa/warehouse",
),
(
"https://www.github.com/pypa/warehouse",
"https://api.github.com/repos/pypa/warehouse",
),
("https://github.com/pypa/", None),
("https://google.com/pypa/warehouse/tree/master", None),
("https://google.com", None),
("incorrect url", None),
],
)
def test_github_repo_info_url(self, db_session, home_page, expected):
release = DBReleaseFactory.create(home_page=home_page)
assert release.github_repo_info_url == expected
class TestFile:
def test_requires_python(self, db_session):
""" Attempt to write a File by setting requires_python directly,
which should fail to validate (it should only be set in Release).
"""
with pytest.raises(RuntimeError):
project = DBProjectFactory.create()
release = DBReleaseFactory.create(project=project)
DBFileFactory.create(
release=release,
filename="{}-{}.tar.gz".format(project.name, release.version),
python_version="source",
requires_python="1.0",
)
def test_compute_paths(self, db_session):
project = DBProjectFactory.create()
release = DBReleaseFactory.create(project=project)
rfile = DBFileFactory.create(
release=release,
filename="{}-{}.tar.gz".format(project.name, release.version),
python_version="source",
)
expected = "/".join(
[
rfile.blake2_256_digest[:2],
rfile.blake2_256_digest[2:4],
rfile.blake2_256_digest[4:],
rfile.filename,
]
)
assert rfile.path == expected
assert rfile.pgp_path == expected + ".asc"
def test_query_paths(self, db_session):
project = DBProjectFactory.create()
release = DBReleaseFactory.create(project=project)
rfile = DBFileFactory.create(
release=release,
filename="{}-{}.tar.gz".format(project.name, release.version),
python_version="source",
)
expected = "/".join(
[
rfile.blake2_256_digest[:2],
rfile.blake2_256_digest[2:4],
rfile.blake2_256_digest[4:],
rfile.filename,
]
)
results = (
db_session.query(File.path, File.pgp_path)
.filter(File.id == rfile.id)
.limit(1)
.one()
)
assert results == (expected, expected + ".asc")
|
|
from ucloudclient import uexceptions
from ucloudclient.utils import base
class UnetManager(base.Manager):
'''
net manager class
'''
def eip_create(self, region, operator_name, bandwidth, charge_type=None,
quantity=None):
'''
create an eip
:param region:
:param operator_name:
:param bandwidth:
:param charge_type:
:param quantity:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'AllocateEIP'
body['OperatorName'] = operator_name
body['Bandwidth'] = bandwidth
if charge_type:
body['ChargeType'] = charge_type
if quantity:
body['Quantity'] = quantity
return self._get(body)
def eip_list(self, region, offset=None, limit=None):
'''
query eip in given id
:param region:
:param uhostids:
:param offset:
:param limit:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = "DescribeEIP"
if offset:
body['Offset'] = offset
if limit:
body['Limit'] = limit
return self._get(body)
def eip_get(self, region, eipid):
'''
query eip in given id
:param region:
:param uhostids:
:param offset:
:param limit:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = "DescribeEIP"
body['EIPId'] = eipid
return self._get(body)
def eip_update(self, region, eipid, name=None, tag=None, remark=None):
'''
update an eip
:param region:
:param eipid:
:param name:
:param tag:
:param remark:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'UpdateEIPAttribute'
body['EIPId'] = eipid
if not name and not tag and remark:
raise uexceptions.BadParameters
if name:
body['Name'] = name
if tag:
body['Tag'] = tag
if remark:
body['Remark'] = tag
return self._get(body)
def eip_release(self, region, eipid):
'''
release an eip
:param region:
:param eipid:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'ReleaseEIP'
body['EIPId'] = eipid
return self._get(body)
def eip_bind(self, region, eipid, resource_type, resourceid):
'''
bind ip to given resource
:param region:
:param eipid:
:param resource_type:
:param resourceid:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'BindEIP'
body['EIPId'] = eipid
body['ResourceType'] = resource_type
body['ResourceId'] = resourceid
return self._get(body)
def eip_unbind(self, region, eipid, resource_type, resourceid):
'''
unbind ip to given resource
:param region:
:param eipid:
:param resource_type:
:param resourceid:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'UnBindEIP'
body['EIPId'] = eipid
body['ResourceType'] = resource_type
body['ResourceId'] = resourceid
return self._get(body)
def eip_bandwidth_modify(self, region, eipid, bandwidth):
'''
modify bandwidth of a given eip
:param region:
:param eipid:
:param bandwidth:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'ModifyEIPBandwidth'
body['EIPId'] = eipid
body['Bandwidth'] = bandwidth
return self._get(body)
def eip_weight_modify(self, region, eipid, weight):
'''
modify weight of a given eip
:param region:
:param eipid:
:param weight:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'ModifyEIPWeight'
body['EIPId'] = eipid
body['Weight'] = weight
return self._get(body)
def eip_price_get(self, region, operator_name, bandwidth,
charge_type=None):
'''
get eip price
:param region:
:param operator_name:
:param bandwidth:
:param charge_type:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'GetEIPPrice'
body['OperatorName'] = operator_name
body['Bandwidth'] = bandwidth
if charge_type:
body['ChargeType'] = charge_type
return self._get(body)
def vip_allocate(self, region, count=None):
'''
allocate a vip
:param region:
:param count:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'AllocateVIP'
if count:
body['Count'] = count
return self._get(body)
def vip_get(self, region):
'''
list all vip
:param region:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'DescribeVIP'
return self._get(body)
def vip_release(self, region, vip):
'''
release a vip
:param region:
:param vip:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'ReleaseVIP'
body['VIP'] = vip
return self._get(body)
def sec_get(self, region, resourcetype=None, resourceid=None,
groupid=None):
'''
get security group info
:param region:
:param resourcetype:
:param resourceid:
:param groupid:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'DescribeSecurityGroup'
if resourcetype:
body['ResourceType'] = resourcetype
if resourceid:
body['ResourceId'] = resourceid
if groupid:
body['GroupId'] = groupid
return self._get(body)
def sec_reource_get(self, region, groupid=None):
'''
get resource attached to given security group
:param region:
:param groupid:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'DescribeSecurityGroupResource'
if groupid:
body['GroupId'] = groupid
return self._get(body)
def sec_creat(self, region, group_name, rules, description=None):
'''
create security group
:param region:
:param group_name:
:param rule: [],must be a list, even if only one rule
:param description:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'CreateSecurityGroup'
body['GroupName'] = group_name
if rules:
for i in range(len(rules)):
body['Rule.' + str(i)] = rules[i]
if description:
body['Description'] = description
return self._get(body)
def sec_update(self, region, groupid, rules):
'''
update given security group
:param region:
:param groupid:
:param rules: [],must be a list, even if only one rule
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'UpdateSecurityGroup'
body['GroupId'] = groupid
if rules:
for i in range(len(rules)):
body['Rule.' + str(i)] = rules[i]
return self._get(body)
def sec_grant(self, region, groupid, resource_type, resourceid):
'''
grant given security group to specified resource
:param region:
:param groupid:
:param resource_type:
:param resourceid:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'GrantSecurityGroup'
body['GroupId'] = groupid
body['ResourceType'] = resource_type
body['ResourceId'] = resourceid
return self._get(body)
def sec_delete(self, region, groupid):
'''
delete given security group
:param region:
:param groupip:
:return:
'''
body = {}
body['Region'] = region
body['Action'] = 'DeleteSecurityGroup'
body['GroupId'] = groupid
return self._get(body)
|
|
####################################################################
# FILENAME: urldispatch.py
# PROJECT: Shiji API
# DESCRIPTION: Implements an alternative URL router for Twisted Web.
#
# * Handles routing to the appropriate API->version->call.
# * Allows for multiple versions of the same API to exist
# simultaneously and for the caller to select the
# version they want.
# $Id$
####################################################################
# (C)2015 DigiTar Inc.
# Licensed under the MIT License.
####################################################################
import re, sys, inspect, urllib, traceback
try:
import json
except ImportError:
import simplejson as json
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from twisted.internet import defer
from shiji import webapi, stats, testutil
API_VERSION_HEADER = "X-DigiTar-API-Version"
### Utility Functions
def get_version(request):
"""Locates and parses the API version headers if present."""
raw_version = request.getHeader(API_VERSION_HEADER)
if not raw_version and not request.args.has_key(API_VERSION_HEADER):
return {"api" : "",
"version" : "",
"mode" : ""}
elif not raw_version:
raw_version = request.args[API_VERSION_HEADER][0]
api = raw_version.split("-")[0].lower()
version = raw_version.split("-")[1].split("+")[0]
mode = raw_version.split("-")[1].split("+")[1].lower()
return {"api" : api,
"version" : version,
"mode" : mode}
### Classes
class URLMatchJSONResource(Resource):
"""Handles storage of URL matches."""
premature_finish = False
isLeaf = True
routes = None # Replace with regex pattern string to match at end of URL (e.g. r"route_call")
# For multiple routes pointing to this call use a list (e.g. [r"route1_call", r"route2_call"])
def __init__(self, request, url_matches, call_router=None):
request.setHeader("Content-Type", "application/json; charset=utf-8")
if call_router and \
hasattr(call_router, "version_router") and \
hasattr(call_router.version_router, "api_router"):
if hasattr(call_router.version_router.api_router, "cross_origin_domains") and \
call_router.version_router.api_router.cross_origin_domains:
request.setHeader("Access-Control-Allow-Origin",
call_router.version_router.api_router.cross_origin_domains)
request.setHeader("Access-Control-Allow-Credentials", "true")
if hasattr(call_router.version_router.api_router, "inhibit_http_caching") and \
call_router.version_router.api_router.inhibit_http_caching:
request.setHeader("Cache-Control", "no-cache")
request.setHeader("Pragma", "no-cache")
self.url_matches = url_matches
self.call_router = call_router
Resource.__init__(self)
def render(self, request):
"""
Override render to allow the passing of a deferred instead of NOT_DONE_YET.
"""
def cb_deferred_finish(result):
"Deferred has completed. Finish the request."
if isinstance(request, testutil.DummyRequest):
request._reset_body()
if not result:
result = ""
request.write(result)
request.finish()
def eb_failed(failure):
if isinstance(request, testutil.DummyRequest):
request._reset_body()
print failure.getTraceback()
request.write(str(webapi.UnexpectedServerError(request,
failure.getErrorMessage())))
request.finish()
try:
res = Resource.render(self, request)
except Exception, e:
if isinstance(request, testutil.DummyRequest):
request._reset_body()
tb_text = traceback.format_exc()
print tb_text
return str(webapi.UnexpectedServerError(request,tb_text))
if isinstance(res, defer.Deferred):
res.addCallback(cb_deferred_finish)
res.addErrback(eb_failed)
return NOT_DONE_YET
else:
return res
class CORSInterrogation(Resource):
"""
Returned for any OPTIONS request without API version headers.
"""
isLeaf = True
def __init__(self, request, api_router):
request.setHeader("Content-Type", "application/json; charset=utf-8")
if hasattr(api_router, "cross_origin_domains") and \
api_router.cross_origin_domains:
request.setHeader("Access-Control-Allow-Origin", api_router.cross_origin_domains)
Resource.__init__(self)
def render_OPTIONS(self, request):
allowed_verbs = ["PUT", "GET", "DELETE",
"POST", "HEAD", "TRACE",
"CONNECT", "PROPFIND", "PROPPATCH",
"MKCOL", "COPY", "MOVE",
"LOCK", "UNLOCK"]
print request.getHeader("Access-Control-Request-Headers")
if request.getHeader("Access-Control-Request-Headers"):
allowed_headers = request.getHeader("Access-Control-Request-Headers")
else:
allowed_headers = ",".join(request.getAllHeaders().keys())
request.setResponseCode(200)
request.setHeader("Access-Control-Allow-Methods", ",".join(allowed_verbs))
request.setHeader("Access-Control-Allow-Headers", allowed_headers)
request.setHeader("Access-Control-Allow-Credentials", "true")
return ""
class UnknownAPI(Resource):
"""
Returned for any unknown API.
"""
isLeaf = True
def render_GET(self, request):
print "UnknownAPI: Unknown API %s" % request.uri
request.setResponseCode(404)
request.setHeader("Content-Type", "application/json; charset=utf-8")
return str(webapi.UnknownAPIError(request, request.uri.split("/")[1]))
def render_POST(self, request):
return self.render_GET(request)
class UnknownCall(Resource):
"""
Returned for any unknown API call.
"""
isLeaf = True
def render_GET(self, request):
print "UnknownAPI: Unknown API call %s" % request.uri
request.setResponseCode(404)
request.setHeader("Content-Type", "application/json; charset=utf-8")
return str(webapi.UnknownAPICallError(request, request.uri.split("/")[-1]))
def render_POST(self, request):
return self.render_GET(request)
class UnknownVersion(Resource):
"""
Returned for any unknown API version.
"""
isLeaf = True
def render_GET(self, request):
version_header = request.getHeader(API_VERSION_HEADER) if request.getHeader(API_VERSION_HEADER) != None else ""
print ("UnknownVersion: %s is missing, invalid or specifies " % API_VERSION_HEADER) + \
"an API/version that does not exist. %s" % version_header
request.setResponseCode(406)
request.setHeader("Content-Type", "application/json; charset=utf-8")
return str(webapi.UnknownAPIVersionError(request, version_header))
def render_POST(self, request):
return self.render_GET(request)
class ListVersionsCall (URLMatchJSONResource):
"""
Returns a list of available versions for the current API.
"""
routes = r"list_versions"
def render_GET(self, request):
versions = sorted(self.call_router.version_router.get_version_map().keys())
webapi.write_json(request, {"all_versions" : versions,
"curr_version" : request.api_version })
class CallRouter(Resource):
"""
Dispatches the request to the appropriate API call handler based
on the first 'route' match against the URL.
** If no route match is made the verb is dispatched to the
unknown verb handler.
"""
def __init__(self, calls_module, version_router=None, auto_list_versions=False):
"""Sets up the twisted.web.Resource and loads the route map.
Arguments:
self - Reference to the object instance.
calls_module (module) - Module containing API classes. Each URLMatchJSONResource
class will be introspected for it's route.
auto_list_versions (boolean) - If True, add an API call 'list_versions' that will
list the available versions of the current API.
"""
self.version_router=version_router
self._createRouteMap(calls_module)
if auto_list_versions:
self.route_map.append((re.compile(r"list_versions$"), ListVersionsCall))
Resource.__init__(self)
def _createRouteMap(self, calls_module):
"""Introspects 'calls_module' to find URLMatchJSONResource classes and builds
the internal route map.
Arguments:
calls_module (module) - Module to introspect for routes.
Returns:
Nothing.
"""
self.route_map = []
for cur_class in inspect.getmembers(sys.modules[calls_module.__name__], inspect.isclass):
if issubclass(cur_class[1], URLMatchJSONResource):
if cur_class[1].routes != None:
if isinstance(cur_class[1].routes, (str, unicode)):
self.route_map.append( (re.compile(cur_class[1].routes + "$"), cur_class[1]) )
if isinstance(cur_class[1].routes, list):
for route in cur_class[1].routes:
self.route_map.append( (re.compile(route + "$"), cur_class[1]) )
def getChild(self, name, request):
"""
Dispatches based on the route table. Named groups are passed to the called
Resource object as a dictionary via the url_matches parameter.
"""
# If no API version is attached to the request, we were called directly.
# ...so NULL string the api_version so it doesn't break folks who expect it.
try:
api_mode = get_version(request)["mode"]
except IndexError:
return UnknownVersion()
if not api_mode in ("test", "prod"):
return UnknownVersion()
else:
request.api_mode = api_mode
request.api_config = self.version_router.api_router.config
for route in self.route_map:
route_match = route[0].match("/".join(request.uri.split("?")[0].split("/")[2:]))
if route_match:
match_dict = route_match.groupdict()
for key in match_dict:
match_dict[key] = urllib.unquote(match_dict[key])
return route[1](request, url_matches=match_dict, call_router=self)
return UnknownCall()
class VersionRouter(Resource):
"""
Dispatches the request to the appropriate API version based
on the first match of X-DigiTar-API-Version header against
version map.
"""
def __init__(self, version_map, api_router=None):
"""Sets up the twisted.web.Resource and loads the version map.
Arguments:
self - Reference to the object instance
version_map (dictionary) - Dictionary of mapping tuples containing a version pattern to match, and
the module to handle that version:
{ "0.8" : (r"0.8", v0_8"),
"1.0" : (r"1.0", v1_0") }
"""
self.api_router = api_router
temp_version_map = {}
for version in version_map.keys():
temp_version_map[version]= (re.compile(version_map[version][0] + "$"), version_map[version][1])
self.version_map = temp_version_map
Resource.__init__(self)
def getChild(self, name, request):
"""
Dispatches based on the version table.
"""
try:
header_version = get_version(request)["version"]
except IndexError:
return UnknownVersion()
for version in sorted(self.version_map.keys()):
version_match = self.version_map[version][0].match(header_version)
if version_match:
request.api_version = version
self.version_map[version][1].call_router.version_router = self
return self.version_map[version][1].call_router.getChild(name, request)
return UnknownVersion()
def get_version_map(self):
"""Returns the current API's version map.
Arguments:
NONE
Returns:
version_map (dict) - Dictionary where the keys are the API version numbers and
the values are tuples of the format:
(version_regex, module_reference)
version_regex (raw string) - RegEx used to match against the version number
passed in the request.
module_reference (module literal) - Module containing API.
Ex: { "0.8" : (r"0.8", v0_8) }
"""
return self.version_map
class APIRouter(Resource):
"""
Dispatches the request to the appropriate 'API' module based
on the first 'route' match against the URL.
** If not route match is made the verb is dispatched to the
unknown verb handler.
"""
def __init__(self, route_map, config={}, cross_origin_domains=None, inhibit_http_caching=True):
"""Sets up the twisted.web.Resource and loads the route map.
Arguments:
self - Reference to the object instance.
route_map (list) - List of mapping tuples containing a URL regex string to match, and
the module to handle the API call. Module must implement
version_router member.
(r"^/example/auth/", AuthAPI)
config (dict) - Dictionary of optional configuration settings needed for your API.
"""
self.cross_origin_domains = cross_origin_domains
self.inhibit_http_caching = inhibit_http_caching
for i in range(len(route_map)):
route_map[i] = (re.compile(route_map[i][0] + "$"), route_map[i][1])
self.route_map = route_map
self.config = config
Resource.__init__(self)
def getChild(self, name, request):
"""
Dispatches based on the route table. Named groups are passed to the called
Resource object as a dictionary via the url_matches parameter.
"""
request.metrics = stats.metrics
if request.method.upper() == "OPTIONS":
return CORSInterrogation(request, api_router=self)
try:
header_version = get_version(request)
except IndexError:
return UnknownVersion()
for route in self.route_map:
route_match = route[0].match(name)
if route_match:
request.api_name = route[1].api_name.lower()
if header_version["api"] != request.api_name:
return UnknownVersion()
else:
route[1].version_router.api_router = self
return route[1].version_router
return UnknownAPI()
def get_route_map(self):
"""Returns the API map of API names to API modules."
Arguments:
None
Returns:
route_map (dict) - Dictionary where the keys are the API names and the values
are the API module literals.
Ex: { "utilities" : utils }
"""
return self.route_map
## DEPRECATED: Use CallRouter. Maintained for backwards compatibility.
class URLRouter(Resource):
"""
Dispatches the request to the appropriate 'API' handler based
on the first 'route' match against the URL.
** If not route match is made the verb is dispatched to the
unknown verb handler.
"""
def __init__(self, route_map):
"""Sets up the twisted.web.Resource and loads the route map.
Arguments:
self - Reference to the object instance.
route_map (list) - List of mapping tuples containing a URL regex string to match, and
the Resource object to handle the API call. ?P<arg_name> regex
groups are converted to elements in a the argument dictionary passed
to the API call handler:
(r"^/example/auth/(?P<auth_call>.+)", AuthAPI)
"""
self.route_map = route_map
Resource.__init__(self)
def getChild(self, name, request):
"""
Dispatches based on the route table. Named groups are passed to the called
Resource object as a dictionary via the url_matches parameter.
"""
for route in self.route_map:
route_match = re.match(route[0], request.uri)
if route_match:
return route[1](request, url_matches=route_match.groupdict())
return UnknownAPI()
|
|
from __future__ import division
import os
import time
import math
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from ops import *
from utils import *
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
class DCGAN(object):
def __init__(self, sess, input_height=28, input_width=28, crop=False,
batch_size=64, sample_num = 64, output_height=64, output_width=64, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, dataset_name='mnist',checkpoint_dir=None,sample_dir=None,
adversarial_path=None,ground_truth_path=None,test_path=None,save_path=None):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
"""
self.sess = sess
self.crop = crop
self.batch_size = batch_size
self.sample_num = sample_num
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.adversarial_path = adversarial_path
assert(self.adversarial_path != None)
self.ground_truth_path = ground_truth_path
assert(self.ground_truth_path != None)
if test_path != None:
self.test_path = test_path
else:
self.test_path = adversarial_path
if save_path != None:
self.save_path = save_path
else:
self.save_path = "./data/resAPE-GAN.npy"
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.sample_dir = sample_dir
self.adv = np.load(self.adversarial_path)
if self.dataset_name == "mnist":
self.c_dim = 1
else:
self.c_dim = 3
self.grayscale = (self.c_dim == 1)
self.gt = np.load(self.ground_truth_path)
self.build_model()
def build_model(self):
if self.crop:
image_dims = [self.output_height, self.output_width, self.c_dim]
else:
image_dims = [self.input_height, self.input_width, self.c_dim]
self.gtInputs = tf.placeholder(
tf.float32, [self.batch_size] + image_dims, name='ground_truth_images')
self.advInputs = tf.placeholder(
tf.float32, [self.batch_size] + image_dims, name='adversarial_images')
self.z_sum = histogram_summary("z", self.advInputs)
self.G = self.generator(self.advInputs)
self.D, self.D_logits = self.discriminator(self.gtInputs, reuse=False)
self.sampler = self.sampler(self.advInputs)
self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)
self.d_sum = histogram_summary("d", self.D)
self.d__sum = histogram_summary("d_", self.D_)
self.G_sum = image_summary("G", self.G)
def sigmoid_cross_entropy_with_logits(x, y):
try:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)
except:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y)
self.d_loss_real = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits, tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits_, tf.zeros_like(self.D_)))
self.g_loss_real = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_)))
self.mse_loss = tf.sqrt(tf.reduce_mean(tf.pow(tf.subtract(self.gtInputs, self.G), 2)))
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss = 0.02 * self.g_loss_real + 0.9 * self.mse_loss
self.d_loss_real_sum = scalar_summary("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = scalar_summary("d_loss_fake", self.d_loss_fake)
self.g_loss_real_sum = scalar_summary("g_loss_real", self.g_loss_real)
self.mse_loss_sum = scalar_summary("mse_loss", self.mse_loss)
self.g_loss_sum = scalar_summary("g_loss", self.g_loss)
self.d_loss_sum = scalar_summary("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def train(self, config):
d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.g_loss, var_list=self.g_vars)
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
self.g_sum = merge_summary([self.z_sum, self.d__sum,
self.G_sum, self.d_loss_fake_sum, self.g_loss_sum,self.g_loss_real_sum,self.mse_loss_sum])
self.d_sum = merge_summary(
[self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
self.writer = SummaryWriter("./logs", self.sess.graph)
sample_adv = self.adv[0:self.sample_num]
sample_gt = self.gt[0:self.sample_num]
counter = 1
start_time = time.time()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in xrange(config.epoch):
batch_idxs = min(len(self.adv), config.train_size) // config.batch_size
for idx in xrange(0, batch_idxs):
batch_adv = self.adv[idx*config.batch_size:(idx+1)*config.batch_size]
batch_gt = self.gt[idx*config.batch_size:(idx+1)*config.batch_size]
# Update D network
_, summary_str = self.sess.run([d_optim,self.d_sum],
feed_dict={
self.gtInputs: batch_gt,
self.advInputs: batch_adv
})
self.writer.add_summary(summary_str, counter)
# Update G network
_, summary_str = self.sess.run([g_optim,self.g_sum],
feed_dict={
self.gtInputs: batch_gt,
self.advInputs: batch_adv
})
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_, summary_str = self.sess.run([g_optim,self.g_sum],
feed_dict={
self.gtInputs: batch_gt,
self.advInputs: batch_adv
})
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({
self.advInputs: batch_adv,
})
errD_real = self.d_loss_real.eval({
self.gtInputs: batch_gt,
})
errG_real = self.g_loss_real.eval({
self.advInputs: batch_adv,
})
errMSE = self.mse_loss.eval({
self.gtInputs: batch_gt,
self.advInputs: batch_adv
})
counter += 1
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f, mse_loss: %.8f" \
% (epoch, idx, batch_idxs,
time.time() - start_time, errD_fake+errD_real, errG_real + errMSE, errMSE))
if np.mod(counter, 100) == 1:
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss],
feed_dict={
self.gtInputs: sample_gt,
self.advInputs: sample_adv
}
)
save_images(samples, image_manifold_size(samples.shape[0]),
'./{}/train_{:02d}_{:04d}_reconstructed.png'.format(config.sample_dir, epoch, idx))
save_images(sample_adv, image_manifold_size(sample_adv.shape[0]),
'./{}/train_{:02d}_{:04d}_adv.png'.format(config.sample_dir, epoch, idx))
save_images(sample_gt, image_manifold_size(sample_gt.shape[0]),
'./{}/train_{:02d}_{:04d}_gt.png'.format(config.sample_dir, epoch, idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
if np.mod(counter, 500) == 2:
self.save(config.checkpoint_dir, counter)
def discriminator(self, image, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
h3 = linear(tf.reshape(h2, [self.batch_size, -1]), 1, 'd_h3_lin')
return tf.nn.sigmoid(h3), h3
def generator(self, z):
with tf.variable_scope("generator") as scope:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
gconv1 = lrelu(self.g_bn0(conv2d(z, self.gf_dim, name='g_h0_conv')))
gconv2 = lrelu(self.g_bn1(conv2d(gconv1, self.gf_dim*2, name='g_h1_conv')))
self.h1, self.h1_w, self.h1_b = deconv2d(
gconv2, [self.batch_size, s_h2, s_w2, self.gf_dim], name='g_h2_deconv', with_w=True)
h1 = tf.nn.relu(self.g_bn2(self.h1))
h2, self.h2_w, self.h2_b = deconv2d(
h1, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3_deconv', with_w=True)
return tf.nn.tanh(h2)
def sampler(self, z):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
gconv1 = lrelu(self.g_bn0(conv2d(z, self.gf_dim, name='g_h0_conv')))
gconv2 = lrelu(self.g_bn1(conv2d(gconv1, self.gf_dim*2, name='g_h1_conv')))
self.h1, self.h1_w, self.h1_b = deconv2d(
gconv2, [self.batch_size, s_h2, s_w2, self.gf_dim], name='g_h2_deconv', with_w=True)
h1 = tf.nn.relu(self.g_bn2(self.h1))
h2, self.h2_w, self.h2_b = deconv2d(
h1, [self.batch_size, s_h, s_w, self.c_dim], name='g_h3_deconv', with_w=True)
return tf.nn.tanh(h2)
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.dataset_name, self.batch_size,
self.output_height, self.output_width)
def save(self, checkpoint_dir, step):
model_name = "DCGAN.model"
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DataFlowDebugSessionOperations(object):
"""DataFlowDebugSessionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_data_flow_debug_session_initial(
self,
request, # type: "models.CreateDataFlowDebugSessionRequest"
**kwargs # type: Any
):
# type: (...) -> Optional["models.CreateDataFlowDebugSessionResponse"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.CreateDataFlowDebugSessionResponse"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_data_flow_debug_session_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'CreateDataFlowDebugSessionRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CreateDataFlowDebugSessionResponse', pipeline_response)
if response.status_code == 202:
response_headers['location']=self._deserialize('str', response.headers.get('location'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_data_flow_debug_session_initial.metadata = {'url': '/createDataFlowDebugSession'} # type: ignore
def begin_create_data_flow_debug_session(
self,
request, # type: "models.CreateDataFlowDebugSessionRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.CreateDataFlowDebugSessionResponse"]
"""Creates a data flow debug session.
:param request: Data flow debug session definition.
:type request: ~azure.synapse.artifacts.models.CreateDataFlowDebugSessionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either CreateDataFlowDebugSessionResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.CreateDataFlowDebugSessionResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.CreateDataFlowDebugSessionResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_data_flow_debug_session_initial(
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CreateDataFlowDebugSessionResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_data_flow_debug_session.metadata = {'url': '/createDataFlowDebugSession'} # type: ignore
def query_data_flow_debug_sessions_by_workspace(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.QueryDataFlowDebugSessionsResponse"]
"""Query all active data flow debug sessions.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either QueryDataFlowDebugSessionsResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.synapse.artifacts.models.QueryDataFlowDebugSessionsResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.QueryDataFlowDebugSessionsResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.query_data_flow_debug_sessions_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('QueryDataFlowDebugSessionsResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.CloudError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
query_data_flow_debug_sessions_by_workspace.metadata = {'url': '/queryDataFlowDebugSessions'} # type: ignore
def add_data_flow(
self,
request, # type: "models.DataFlowDebugPackage"
**kwargs # type: Any
):
# type: (...) -> "models.AddDataFlowToDebugSessionResponse"
"""Add a data flow into debug session.
:param request: Data flow debug session definition with debug content.
:type request: ~azure.synapse.artifacts.models.DataFlowDebugPackage
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AddDataFlowToDebugSessionResponse, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.AddDataFlowToDebugSessionResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AddDataFlowToDebugSessionResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.add_data_flow.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'DataFlowDebugPackage')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AddDataFlowToDebugSessionResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_data_flow.metadata = {'url': '/addDataFlowToDebugSession'} # type: ignore
def delete_data_flow_debug_session(
self,
request, # type: "models.DeleteDataFlowDebugSessionRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a data flow debug session.
:param request: Data flow debug session definition for deletion.
:type request: ~azure.synapse.artifacts.models.DeleteDataFlowDebugSessionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.delete_data_flow_debug_session.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'DeleteDataFlowDebugSessionRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete_data_flow_debug_session.metadata = {'url': '/deleteDataFlowDebugSession'} # type: ignore
def _execute_command_initial(
self,
request, # type: "models.DataFlowDebugCommandRequest"
**kwargs # type: Any
):
# type: (...) -> Optional["models.DataFlowDebugCommandResponse"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.DataFlowDebugCommandResponse"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._execute_command_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'DataFlowDebugCommandRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataFlowDebugCommandResponse', pipeline_response)
if response.status_code == 202:
response_headers['location']=self._deserialize('str', response.headers.get('location'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_execute_command_initial.metadata = {'url': '/executeDataFlowDebugCommand'} # type: ignore
def begin_execute_command(
self,
request, # type: "models.DataFlowDebugCommandRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.DataFlowDebugCommandResponse"]
"""Execute a data flow debug command.
:param request: Data flow debug command definition.
:type request: ~azure.synapse.artifacts.models.DataFlowDebugCommandRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DataFlowDebugCommandResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.DataFlowDebugCommandResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DataFlowDebugCommandResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._execute_command_initial(
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DataFlowDebugCommandResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_execute_command.metadata = {'url': '/executeDataFlowDebugCommand'} # type: ignore
|
|
"""Test the Logitech Harmony Hub config flow."""
from unittest.mock import AsyncMock, MagicMock, patch
import aiohttp
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.harmony.config_flow import CannotConnect
from homeassistant.components.harmony.const import DOMAIN, PREVIOUS_ACTIVE_ACTIVITY
from homeassistant.const import CONF_HOST, CONF_NAME
from tests.common import MockConfigEntry
def _get_mock_harmonyapi(connect=None, close=None):
harmonyapi_mock = MagicMock()
type(harmonyapi_mock).connect = AsyncMock(return_value=connect)
type(harmonyapi_mock).close = AsyncMock(return_value=close)
return harmonyapi_mock
async def test_user_form(hass):
"""Test we get the user form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
harmonyapi = _get_mock_harmonyapi(connect=True)
with patch(
"homeassistant.components.harmony.util.HarmonyAPI",
return_value=harmonyapi,
), patch(
"homeassistant.components.harmony.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.2.3.4", "name": "friend"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "friend"
assert result2["data"] == {"host": "1.2.3.4", "name": "friend"}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_ssdp(hass):
"""Test we get the form with ssdp source."""
with patch(
"homeassistant.components.harmony.config_flow.HubConnector.get_remote_id",
return_value=1234,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://192.168.1.12:8088/description",
upnp={
"friendlyName": "Harmony Hub",
},
),
)
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {}
assert result["description_placeholders"] == {
"host": "Harmony Hub",
"name": "192.168.1.12",
}
progress = hass.config_entries.flow.async_progress()
assert len(progress) == 1
assert progress[0]["flow_id"] == result["flow_id"]
assert progress[0]["context"]["confirm_only"] is True
harmonyapi = _get_mock_harmonyapi(connect=True)
with patch(
"homeassistant.components.harmony.util.HarmonyAPI",
return_value=harmonyapi,
), patch(
"homeassistant.components.harmony.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Harmony Hub"
assert result2["data"] == {"host": "192.168.1.12", "name": "Harmony Hub"}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_ssdp_fails_to_get_remote_id(hass):
"""Test we abort if we cannot get the remote id."""
with patch(
"homeassistant.components.harmony.config_flow.HubConnector.get_remote_id",
side_effect=aiohttp.ClientError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://192.168.1.12:8088/description",
upnp={
"friendlyName": "Harmony Hub",
},
),
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_form_ssdp_aborts_before_checking_remoteid_if_host_known(hass):
"""Test we abort without connecting if the host is already known."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "2.2.2.2", "name": "any"},
)
config_entry.add_to_hass(hass)
config_entry_without_host = MockConfigEntry(
domain=DOMAIN,
data={"name": "other"},
)
config_entry_without_host.add_to_hass(hass)
harmonyapi = _get_mock_harmonyapi(connect=True)
with patch(
"homeassistant.components.harmony.util.HarmonyAPI",
return_value=harmonyapi,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://2.2.2.2:8088/description",
upnp={
"friendlyName": "Harmony Hub",
},
),
)
assert result["type"] == "abort"
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.harmony.util.HarmonyAPI",
side_effect=CannotConnect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.2.3.4",
"name": "friend",
"activity": "Watch TV",
"delay_secs": 0.2,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_options_flow(hass, mock_hc, mock_write_config):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data={CONF_HOST: "1.2.3.4", CONF_NAME: "Guest Room"},
options={"activity": "Watch TV", "delay_secs": 0.5},
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"activity": PREVIOUS_ACTIVE_ACTIVITY, "delay_secs": 0.4},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
"activity": PREVIOUS_ACTIVE_ACTIVITY,
"delay_secs": 0.4,
}
|
|
from .ns1_api_url import NS1_ENDPOINTS
class Ns1Url:
def __init__(self, api_endpoint, check):
self.check = check
self.api_endpoint = api_endpoint
# generate url for QPS and usages statistics
# returns dictionary in form of <metric name>:<metric url>}
def get_stats_url_usage(self, key, val, networknames):
urlList = {}
query_string = ""
metric_name = "usage"
metric_zone = "usage.zone"
metric_record = "usage.record"
metric_type = "count"
query_string = "?period=1h&expand=false"
url = NS1_ENDPOINTS["qps.usage"].format(apiendpoint=self.api_endpoint, key=key, query=query_string)
# get account wide stats
tags = [""]
urlList[key] = [url, metric_name, tags, metric_type]
# if list of networks is supplied, query account-wide for each network as well
if networknames and len(networknames) > 0:
for k, v in networknames.items():
query_string = "?period=1h&expand=false&networks={networks}".format(networks=k)
url = NS1_ENDPOINTS["qps.usage"].format(apiendpoint=self.api_endpoint, key=key, query=query_string)
tags = ["network:{network}".format(network=v)]
urlList["{key}.{netid}".format(key=key, netid=k)] = [url, metric_name, tags, metric_type]
if not val:
return urlList
for zoneDict in val:
# zone is again dictionary, with zone name as key and records as list of objects
for domain, records in zoneDict.items():
# here, domain is zone name, records is list of records and record types
# if list of networks is supplied, query zone for each network as well
if networknames and len(networknames) > 0:
for k, v in networknames.items():
query_string = "?period=1h&expand=false&networks={networks}".format(networks=k)
url = NS1_ENDPOINTS["qps.usage.zone"].format(
apiendpoint=self.api_endpoint, key=key, domain=domain, query=query_string
)
tags = ["network:{network}".format(network=v), "zone:{zone}".format(zone=domain)]
urlkey = "{key}.{domain}.{netid}".format(key=key, domain=domain, netid=k)
urlList[urlkey] = [url, metric_name, tags, metric_type]
else:
query_string = "?period=1h&expand=false"
url = NS1_ENDPOINTS["qps.usage.zone"].format(
apiendpoint=self.api_endpoint, key=key, domain=domain, query=query_string
)
tags = ["zone:{zone}".format(zone=domain)]
urlList["{key}.{domain}".format(key=key, domain=domain)] = [url, metric_zone, tags, metric_type]
if not records:
# if records are not specified, get all records for the zone, then build url for each record
records = self.check.get_zone_records(domain)
# for each record, either specified or queried from zone
if records:
for rec in records:
for rname, rtype in rec.items():
if networknames and len(networknames) > 0:
for k, v in networknames.items():
query_string = "?period=1h&expand=false&networks={networks}".format(networks=k)
url = NS1_ENDPOINTS["qps.usage.record"].format(
apiendpoint=self.api_endpoint,
key=key,
domain=domain,
record=rname,
rectype=rtype,
query=query_string,
)
tags = [
"network:{network}".format(network=v),
"zone:{zone}".format(zone=domain),
"record:{record}".format(record=rname),
"type:{rectype}".format(rectype=rtype),
]
urlkey = "{key}.{record}.{rectype}.{netid}".format(
key=key, record=rname, rectype=rtype, netid=k
)
urlList[urlkey] = [url, metric_record, tags, metric_type]
else:
query_string = "?period=1h&expand=false"
url = NS1_ENDPOINTS["qps.usage.record"].format(
apiendpoint=self.api_endpoint,
key=key,
domain=domain,
record=rname,
rectype=rtype,
query=query_string,
)
tags = [
"zone:{zone}".format(zone=domain),
"record:{record}".format(record=rname),
"type:{rectype}".format(rectype=rtype),
]
urlkey = "{key}.{record}.{rectype}".format(key=key, record=rname, rectype=rtype)
urlList[urlkey] = [url, metric_record, tags, metric_type]
return urlList
# generate url for QPS statistics
# returns dictionary in form of <metric name>:<metric url>}
def get_stats_url_qps(self, key, val):
urlList = {}
query_string = ""
metric_name = "qps"
metric_zone = "qps.zone"
metric_record = "qps.record"
metric_type = "gauge"
url = NS1_ENDPOINTS["qps.usage"].format(apiendpoint=self.api_endpoint, key="qps", query=query_string)
# get account wide stats
tags = [""]
urlList[key] = [url, metric_name, tags, metric_type]
if not val:
return urlList
for zoneDict in val:
# zone is again dictionary, with zone name as key and records as list of objects
for domain, records in zoneDict.items():
# here, domain is zone name, records is list of records and record types
url = NS1_ENDPOINTS["qps.usage.zone"].format(
apiendpoint=self.api_endpoint, key=key, domain=domain, query=query_string
)
tags = ["zone:{zone}".format(zone=domain)]
urlList["{key}.{domain}".format(key=key, domain=domain)] = [url, metric_zone, tags, metric_type]
if not records:
# if records are not specified, get all records for the zone, then build url for each record
records = self.check.get_zone_records(domain)
for rec in records:
for k, v in rec.items():
print("{k} = {v}".format(k=k, v=v))
if records:
for rec in records:
for rname, rtype in rec.items():
url = NS1_ENDPOINTS["qps.usage.record"].format(
apiendpoint=self.api_endpoint,
key=key,
domain=domain,
record=rname,
rectype=rtype,
query=query_string,
)
tags = [
"zone:{zone}".format(zone=domain),
"record:{record}".format(record=rname),
"type:{rectype}".format(rectype=rtype),
]
urlkey = "{key}.{record}.{rectype}".format(key=key, record=rname, rectype=rtype)
urlList[urlkey] = [url, metric_record, tags, metric_type]
else:
urlList[urlkey] = [url, metric_record, tags, metric_type]
return urlList
# generate url for DDI lease and lps statistics
def get_ddi_url(self, key, val, scopegroups):
urlList = {}
metric_lease = "leases"
metric_lps = "peak_lps"
metric_type_count = "count"
metric_type_gauge = "gauge"
# first get account-wide lease and lps stats
tags = ["scope_group:account_wide"]
url = NS1_ENDPOINTS["ddi.leases"].format(apiendpoint=self.api_endpoint)
urlList["leases"] = [url, metric_lease, tags, metric_type_count]
url = NS1_ENDPOINTS["ddi.lps"].format(apiendpoint=self.api_endpoint)
urlList["peak_lps"] = [url, metric_lps, tags, metric_type_gauge]
# if scope groups are specified, get stats for those requested
if not val:
return urlList
for scope_id in val:
if scope_id in scopegroups:
tags = ["scope_group:{scope_name}".format(scope_name=scopegroups[scope_id])]
url = NS1_ENDPOINTS["ddi.leases.scope"].format(apiendpoint=self.api_endpoint, scope_group_id=scope_id)
urlList["leases.{scope_group_id}".format(scope_group_id=scope_id)] = [
url,
metric_lease,
tags,
metric_type_count,
]
url = NS1_ENDPOINTS["ddi.lps.scope"].format(apiendpoint=self.api_endpoint, scope_group_id=scope_id)
urlList["peak_lps.{scope_group_id}".format(scope_group_id=scope_id)] = [
url,
metric_lps,
tags,
metric_type_gauge,
]
return urlList
def get_zone_info_url(self, key, val):
urlList = {}
if not val:
return urlList
for accDict in val:
for _, domainList in accDict.items():
metric_record = "ttl"
metric_type = "gauge"
if domainList:
for domain in domainList:
tags = ["record:{zone}".format(zone=domain)]
url = NS1_ENDPOINTS["ttl"].format(apiendpoint=self.api_endpoint, domain=domain)
urlList["{key}.ttl.{domain}".format(key=key, domain=domain)] = [
url,
metric_record,
tags,
metric_type,
]
return urlList
def get_plan_details_url(self, key, val):
urlList = {}
# get account plan limits
url = NS1_ENDPOINTS["billing"].format(apiendpoint=self.api_endpoint)
tags = [""]
metric_record = "billing"
metric_type = "gauge"
urlList["{key}.billing".format(key=key)] = [url, metric_record, tags, metric_type]
return urlList
def get_pulsar_by_record_url(self, val, query_params):
urlList = {}
query_string = "?period=1h&"
if query_params:
if "pulsar_geo" in query_params and query_params["pulsar_geo"] != "*":
query_string = query_string + "geo=" + query_params["pulsar_geo"] + "&"
if "pulsar_asn" in query_params and query_params["pulsar_asn"] != "*":
query_string = query_string + "asn=" + query_params["pulsar_asn"] + "&"
query_string = query_string[:-1]
for record in val:
for domain, rectype in record.items():
tags = ["record:{record}".format(record=domain)]
metric_type = "count"
metric_record = "pulsar.decisions.record"
# pulsar decisions for record
url = NS1_ENDPOINTS["pulsar.decisions.record"].format(
apiendpoint=self.api_endpoint, query=query_string + "&agg=jobid&record=" + domain + "_" + rectype
)
k = "pulsar.decisions.{rec_name}.{rec_type}".format(rec_name=domain, rec_type=rectype)
urlList[k] = [url, metric_record, tags, metric_type]
metric_record = "pulsar.routemap.hit.record"
# route map hits by record
url = NS1_ENDPOINTS["pulsar.routemap.hit.record"].format(
apiendpoint=self.api_endpoint, rec_name=domain, rec_type=rectype, query=query_string
)
k = "pulsar.routemap.hit.{rec_name}.{rec_type}".format(rec_name=domain, rec_type=rectype)
urlList[k] = [url, metric_record, tags, metric_type]
metric_record = "pulsar.routemap.miss.record"
# route map misses by record
url = NS1_ENDPOINTS["pulsar.routemap.miss.record"].format(
apiendpoint=self.api_endpoint, rec_name=domain, rec_type=rectype, query=query_string
)
k = "pulsar.routemap.miss.{rec_name}.{rec_type}".format(rec_name=domain, rec_type=rectype)
urlList[k] = [url, metric_record, tags, metric_type]
return urlList
def get_pulsar_by_app_url(self, val, pulsar_apps, query_params):
# pulsar by app
urlList = {}
metric_type = "gauge"
query_string = "?"
if query_params:
if "pulsar_period" in query_params:
query_string = query_string + "period=" + query_params["pulsar_period"] + "&"
if "pulsar_geo" in query_params and query_params["pulsar_geo"] != "*":
query_string = query_string + "geo=" + query_params["pulsar_geo"] + "&"
if "pulsar_asn" in query_params and query_params["pulsar_asn"] != "*":
query_string = query_string + "asn=" + query_params["pulsar_asn"] + "&"
query_string = query_string[:-1]
for app in val:
# apps[app["appid"]] = [app["name"],jobs]
for appid, v in app.items():
app_name = pulsar_apps[appid][0]
for job in pulsar_apps[appid][1]:
jobid = job["jobid"]
if jobid == v:
tags = [
"app:{pulsar_app_name}".format(pulsar_app_name=app_name),
"resource:{job_name}".format(job_name=job["name"]),
]
# pulsar aggregate performance data
url = NS1_ENDPOINTS["pulsar.performance"].format(
apiendpoint=self.api_endpoint, app_id=appid, job_id=jobid, query=query_string
)
metric_record = "pulsar.performance"
k = "pulsar.performance.{app_id}.{job_id}".format(app_id=appid, job_id=jobid)
urlList[k] = [url, metric_record, tags, metric_type]
# pulsar availability data
url = NS1_ENDPOINTS["pulsar.availability"].format(
apiendpoint=self.api_endpoint, app_id=appid, job_id=jobid, query=query_string
)
metric_record = "pulsar.availability"
k = "pulsar.availability.{app_id}.{job_id}".format(app_id=appid, job_id=jobid)
urlList[k] = [url, metric_record, tags, metric_type]
return urlList
def get_pulsar_url(self, query_params):
urlList = {}
query_string = "?"
# for "pulsar" group of endpoints, override settings and always use period = 1h
# to get properly sumarized stats
query_string = query_string + "period=1h&"
if query_params:
if "pulsar_geo" in query_params and query_params["pulsar_geo"] != "*":
query_string = query_string + "geo=" + query_params["pulsar_geo"] + "&"
if "pulsar_asn" in query_params and query_params["pulsar_asn"] != "*":
query_string = query_string + "asn=" + query_params["pulsar_asn"] + "&"
query_string = query_string[:-1]
tags = [""]
metric_record = "pulsar.decisions"
keyname = "pulsar.decisions"
metric_type = "count"
# pulsar decisions account wide
url = NS1_ENDPOINTS[keyname].format(apiendpoint=self.api_endpoint, query=query_string + "&agg=jobid")
urlList[keyname] = [url, metric_record, tags, metric_type]
tags = [""]
# pulsar insufficient decision data for account
metric_record = "pulsar.decisions.insufficient"
keyname = "pulsar.decisions.insufficient"
url = NS1_ENDPOINTS[keyname].format(
apiendpoint=self.api_endpoint, query=query_string + "&agg=jobid&result=ERR_INSUF"
)
urlList[keyname] = [url, metric_record, tags, metric_type]
# pulsar all route map hits
metric_record = "pulsar.routemap.hit"
keyname = "pulsar.routemap.hit"
url = NS1_ENDPOINTS[keyname].format(apiendpoint=self.api_endpoint, query=query_string)
urlList[keyname] = [url, metric_record, tags, metric_type]
# pulsar all route map misses
metric_record = "pulsar.routemap.miss"
keyname = "pulsar.routemap.miss"
url = NS1_ENDPOINTS[keyname].format(apiendpoint=self.api_endpoint, query=query_string)
urlList[keyname] = [url, metric_record, tags, metric_type]
return urlList
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Support for creating Kay forms from Datastore data models.
Taken from google.appengine.ext.db.djangoforms
Terminology notes:
- forms: always refers to the Kay newforms subpackage
- field: always refers to a Kay forms.Field instance
- property: always refers to a db.Property instance
Mapping between properties and fields:
+====================+===================+==============+====================+
| Property subclass | Field subclass | datatype | widget; notes |
+====================+===================+==============+====================+
| StringProperty | TextField | unicode | Textarea |
| | | | if multiline |
+--------------------+-------------------+--------------+--------------------+
| TextProperty | TextField | unicode | Textarea |
+--------------------+-------------------+--------------+--------------------+
| BlobProperty | FileField | str | skipped in v0.96 |
+--------------------+-------------------+--------------+--------------------+
| DateTimeProperty | DateTimeField | datetime | skipped |
| | | | if auto_now[_add] |
+--------------------+-------------------+--------------+--------------------+
| DateProperty | DateField | date | ditto |
+--------------------+-------------------+--------------+--------------------+
| TimeProperty | TimeField | time | ditto |
+--------------------+-------------------+--------------+--------------------+
| IntegerProperty | IntegerField | int or long | |
+--------------------+-------------------+--------------+--------------------+
| FloatProperty | FloatField | float | CharField in v0.96 |
+--------------------+-------------------+--------------+--------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+--------------------+
| UserProperty | TextField | users.User | |
+--------------------+-------------------+--------------+--------------------+
| StringListProperty | TextField | list of str | Textarea |
+--------------------+-------------------+--------------+--------------------+
| LinkProperty | TextField | str | |
+--------------------+-------------------+--------------+--------------------+
| ReferenceProperty | ModelField* | db.Model | |
+--------------------+-------------------+--------------+--------------------+
| _ReverseReferenceP.| None | <iterable> | always skipped |
+====================+===================+==============+====================+
"""
import itertools
import logging
from google.appengine.api import users
from google.appengine.ext import db
from kay import exceptions
from kay.utils import forms
from kay.utils import datastructures
from kay.i18n import lazy_gettext as _
from kay.exceptions import ImproperlyConfigured
from kay.models import NamedModel
def monkey_patch(name, bases, namespace):
"""A 'metaclass' for adding new methods to an existing class.
In this version, existing methods can't be overridden; this is by
design, to avoid accidents.
Usage example:
class PatchClass(TargetClass):
__metaclass__ = monkey_patch
def foo(self, ...): ...
def bar(self, ...): ...
This is equivalent to:
def foo(self, ...): ...
def bar(self, ...): ...
TargetClass.foo = foo
TargetClass.bar = bar
PatchClass = TargetClass
Note that PatchClass becomes an alias for TargetClass; by convention
it is recommended to give PatchClass the same name as TargetClass.
"""
assert len(bases) == 1, 'Exactly one base class is required'
base = bases[0]
for name, value in namespace.iteritems():
if name not in ('__metaclass__', '__module__'):
assert name not in base.__dict__, "Won't override attribute %r" % (name,)
setattr(base, name, value)
return base
class Property(db.Property):
__metaclass__ = monkey_patch
def get_form_field(self, form_class=forms.TextField, **kwargs):
"""Return a Django form field appropriate for this property.
Args:
form_class: a forms.Field subclass, default forms.CharField
Additional keyword arguments are passed to the form_class constructor,
with certain defaults:
required: self.required
label: prettified self.verbose_name, if not None
widget: a forms.Select instance if self.choices is non-empty
initial: self.default, if not None
Returns:
A fully configured instance of form_class, or None if no form
field should be generated for this property.
"""
defaults = {'required': self.required}
if self.verbose_name is None:
defaults['label'] = self.name.capitalize().replace('_', ' ')
else:
defaults['label'] = self.verbose_name
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((unicode(choice), unicode(choice)))
defaults['choices'] = choices
form_class = forms.ChoiceField
if self.default is not None:
defaults['default'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
Override this to do a property- or field-specific type conversion.
Args:
instance: a db.Model instance
Returns:
The property's value extracted from the instance, possibly
converted to a type suitable for a form field; possibly None.
By default this returns the instance attribute's value unchanged.
"""
return getattr(instance, self.name)
def make_value_from_form(self, value):
"""Convert a form value to a property value.
Override this to do a property- or field-specific type conversion.
Args:
value: the cleaned value retrieved from the form field
Returns:
A value suitable for assignment to a model instance's property;
possibly None.
By default this converts the value to self.data_type if it
isn't already an instance of that type, except if the value is
empty, in which case we return None.
"""
if value in (None, ''):
return None
if not isinstance(value, self.data_type):
value = self.data_type(value)
return value
class UserProperty(db.Property):
"""This class exists solely to log a warning when it is used."""
def __init__(self, *args, **kwds):
logging.warn("Please don't use modelforms.UserProperty; "
"use db.UserProperty instead.")
super(UserProperty, self).__init__(*args, **kwds)
class EmailProperty(db.EmailProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(EmailProperty, self).get_form_field(**defaults)
class StringProperty(db.StringProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a string property.
This sets the widget default to forms.Textarea if the property's
multiline attribute is set.
"""
defaults = {}
if self.multiline:
defaults['widget'] = forms.Textarea
defaults.update(kwargs)
return super(StringProperty, self).get_form_field(**defaults)
class TextProperty(db.TextProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a text property.
This sets the widget default to forms.Textarea.
"""
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextProperty, self).get_form_field(**defaults)
class BlobProperty(db.BlobProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a blob property.
"""
if not hasattr(forms, 'FileField'):
return None
defaults = {'form_class': forms.FileField}
defaults.update(kwargs)
return super(BlobProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
There is no way to convert a Blob into an initial value for a file
upload, so we always return None.
"""
return None
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This extracts the content from the UploadedFile instance returned
by the FileField instance.
"""
if value.__class__.__name__ == 'UploadedFile':
return db.Blob(value.content)
return super(BlobProperty, self).make_value_from_form(value)
class DateTimeProperty(db.DateTimeProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a date-time property.
This defaults to a DateTimeField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeProperty, self).get_form_field(**defaults)
class DateProperty(db.DateProperty):
# TODO:
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a date property.
This defaults to a DateField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateProperty, self).get_form_field(**defaults)
class TimeProperty(db.TimeProperty):
# TODO:
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a time property.
This defaults to a TimeField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeProperty, self).get_form_field(**defaults)
class IntegerProperty(db.IntegerProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an integer property.
This defaults to an IntegerField instance.
"""
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerProperty, self).get_form_field(**defaults)
class FloatProperty(db.FloatProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an integer property.
This defaults to a FloatField instance when using Django 0.97 or
later. For 0.96 this defaults to the CharField class.
"""
defaults = {}
if hasattr(forms, 'FloatField'):
defaults['form_class'] = forms.FloatField
defaults.update(kwargs)
return super(FloatProperty, self).get_form_field(**defaults)
class BooleanProperty(db.BooleanProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a boolean property.
This defaults to a BooleanField.
"""
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanProperty, self).get_form_field(**defaults)
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This is needed to ensure that False is not replaced with None.
"""
if value is None:
return None
if isinstance(value, basestring) and value.lower() == 'false':
return False
return bool(value)
class StringListProperty(db.StringListProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a StringList property.
This defaults to a Textarea widget with a blank initial value.
"""
defaults = {'field': forms.TextField(), 'form_class': forms.LineSeparated,
'min_size': 0}
defaults.update(kwargs)
return super(StringListProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
This joins a list of strings with newlines.
"""
value = super(StringListProperty, self).get_value_for_form(instance)
if not value:
return None
if isinstance(value, list):
value = '\n'.join(value)
return value
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This breaks the string into lines.
"""
if not value:
return []
if isinstance(value, basestring):
value = value.splitlines()
return value
class LinkProperty(db.LinkProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a URL property.
This defaults to a URLField instance.
"""
defaults = {'form_class': forms.TextField}
defaults.update(kwargs)
return super(LinkProperty, self).get_form_field(**defaults)
class _WrapIter(object):
"""Helper class whose iter() calls a given function to get an iterator."""
def __init__(self, function):
self._function = function
def __iter__(self):
return self._function()
class ReferenceProperty(db.ReferenceProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a reference property.
This defaults to a ModelChoiceField instance.
"""
defaults = {'form_class': forms.ModelField,
'model': self.reference_class}
defaults.update(kwargs)
return super(ReferenceProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
This return the key object for the referenced object, or None.
"""
value = super(ReferenceProperty, self).get_value_for_form(instance)
if value is not None:
value = value.key()
return value
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This turns a key string or object into a model instance.
"""
if value:
if not isinstance(value, db.Model):
value = db.get(value)
return value
class _ReverseReferenceProperty(db._ReverseReferenceProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a reverse reference.
This always returns None, since reverse references are always
automatic.
"""
return None
def property_clean(prop, value):
"""Apply Property level validation to value.
Calls .make_value_from_form() and .validate() on the property and catches
exceptions generated by either. The exceptions are converted to
forms.ValidationError exceptions.
Args:
prop: The property to validate against.
value: The value to validate.
Raises:
forms.ValidationError if the value cannot be validated.
"""
if value is not None:
try:
prop.validate(prop.make_value_from_form(value))
except (db.BadValueError, ValueError), e:
raise forms.ValidationError(unicode(e))
class ModelFormOptions(object):
"""A simple class to hold internal options for a ModelForm class.
Instance attributes:
model: a db.Model class, or None
fields: list of field names to be defined, or None
exclude: list of field names to be skipped, or None
These instance attributes are copied from the 'Meta' class that is
usually present in a ModelForm class, and all default to None.
"""
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.help_texts = getattr(options, 'help_texts', {})
class ModelFormMetaclass(forms.FormMeta):
"""The metaclass for the ModelForm class defined below.
See the docs for ModelForm below for a usage example.
"""
bad_attr_names = ('data', 'errors', 'raw_data')
def __new__(cls, class_name, bases, attrs):
"""Constructor for a new ModelForm class instance.
The signature of this method is determined by Python internals.
"""
fields = sorted(((field_name, attrs.pop(field_name))
for field_name, obj in attrs.items()
if isinstance(obj, forms.Field)),
key=lambda obj: obj[1].creation_counter)
for base in bases[::-1]:
if hasattr(base, '_base_fields'):
fields = base._base_fields.items() + fields
declared_fields = datastructures.OrderedDict()
for field_name, obj in fields:
declared_fields[field_name] = obj
opts = ModelFormOptions(attrs.get('Meta', None))
attrs['_meta'] = opts
base_models = []
for base in bases:
base_opts = getattr(base, '_meta', None)
base_model = getattr(base_opts, 'model', None)
if base_model is not None:
base_models.append(base_model)
if len(base_models) > 1:
raise exceptions.ImproperlyConfigured(
"%s's base classes define more than one model." % class_name)
if opts.model is not None:
if base_models and base_models[0] is not opts.model:
raise exceptions.ImproperlyConfigured(
'%s defines a different model than its parent.' % class_name)
model_fields = datastructures.OrderedDict()
for name, prop in sorted(opts.model.properties().iteritems(),
key=lambda prop: prop[1].creation_counter):
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
form_field = prop.get_form_field(
help_text=opts.help_texts.get(name, None))
if form_field is not None:
model_fields[name] = form_field
for bad_attr_name in ModelFormMetaclass.bad_attr_names:
if model_fields.has_key(bad_attr_name):
raise ImproperlyConfigured("When you use ModelForm, you can not"
" use these names as field names: %s"
% str(ModelFormMetaclass.bad_attr_names))
# Preserve order in model definition
original_ordered_names = model_fields.keys()
model_fields.update(declared_fields)
extra_index = len(original_ordered_names)
for name, field in model_fields.iteritems():
if name in original_ordered_names:
field._position_hint = original_ordered_names.index(name)
else:
field._position_hint = extra_index
extra_index += 1
attrs['_base_fields'] = model_fields
props = opts.model.properties()
for name, field in model_fields.iteritems():
prop = props.get(name)
if prop:
def check_for_property_field(form, value, prop=prop):
property_clean(prop, value)
return True
field.validators.append(check_for_property_field)
else:
attrs['_base_fields'] = declared_fields
# corresponds with form not rendered
# maybe i should handle this in forms.FormMeta
return super(ModelFormMetaclass, cls).__new__(cls,
class_name, bases, attrs)
class BaseModelForm(forms.Form):
"""Base class for ModelForm.
This overrides the forms.BaseForm constructor and adds a save() method.
This class does not have a special metaclass; the magic metaclass is
added by the subclass ModelForm.
"""
def __init__(self, instance=None, initial=None, **kwargs):
"""Constructor.
Args (all optional and defaulting to None):
data: dict of data values, typically from a POST request)
initial: dict of initial values
instance: Model instance to be used for additional initial values
Except for initial and instance, these arguments are passed on to
the forms.BaseForm constructor unchanged, but only if not None.
Leave these blank (i.e. None)
"""
opts = self._meta
self.instance = instance
object_data = {}
if instance is not None:
for name, prop in instance.properties().iteritems():
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
object_data[name] = prop.get_value_for_form(instance)
if initial is not None:
object_data.update(initial)
kwargs['initial'] = object_data
kwargs = dict((name, value)
for name, value in kwargs.iteritems()
if value is not None)
super(BaseModelForm, self).__init__(**kwargs)
def save(self, commit=True, **kwargs):
"""Save this form's cleaned data into a model instance.
Args:
commit: optional bool, default True; if true, the model instance
is also saved to the datastore.
Returns:
A model instance. If a model instance was already associated
with this form instance (either passed to the constructor with
instance=... or by a previous save() call), that same instance
is updated and returned; if no instance was associated yet, one
is created by this call.
Raises:
ValueError if the data couldn't be validated.
"""
if not self.is_valid:
raise ValueError('Cannot save a non valid form')
opts = self._meta
instance = self.instance
if instance is None:
fail_message = 'created'
else:
fail_message = 'updated'
if self.errors:
raise ValueError("The %s could not be %s because the data didn't "
'validate.' % (opts.model.kind(), fail_message))
cleaned_data = self.data
converted_data = {}
propiter = itertools.chain(
opts.model.properties().iteritems(),
iter([('key_name', StringProperty(name='key_name'))])
)
for name, prop in propiter:
if cleaned_data.has_key(name):
value = cleaned_data.get(name)
if not value and prop.default is not None:
value = prop.default
converted_data[name] = prop.make_value_from_form(value)
try:
converted_data.update(kwargs)
if instance is None:
if issubclass(opts.model, NamedModel):
logging.debug("commit argument ignored.")
instance = opts.model.create_new_entity(**converted_data)
else:
instance = opts.model(**converted_data)
self.instance = instance
else:
for name, value in converted_data.iteritems():
if name == 'key_name':
continue
setattr(instance, name, value)
except db.BadValueError, err:
raise ValueError('The %s could not be %s (%s)' %
(opts.model.kind(), fail_message, err))
if commit:
instance.put()
return instance
class ModelForm(BaseModelForm):
"""A Django form tied to a Datastore model.
Note that this particular class just sets the metaclass; all other
functionality is defined in the base class, BaseModelForm, above.
Usage example:
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
# First, define a model class
class MyModel(db.Model):
foo = db.StringProperty()
bar = db.IntegerProperty(required=True, default=42)
# Now define a form class
class MyForm(djangoforms.ModelForm):
class Meta:
model = MyModel
You can now instantiate MyForm without arguments to create an
unbound form, or with data from a POST request to create a bound
form. You can also pass a model instance with the instance=...
keyword argument to create an unbound (!) form whose initial values
are taken from the instance. For bound forms, use the save() method
to return a model instance.
Like Django's own corresponding ModelForm class, the nested Meta
class can have two other attributes:
fields: if present and non-empty, a list of field names to be
included in the form; properties not listed here are
excluded from the form
exclude: if present and non-empty, a list of field names to be
excluded from the form
If exclude and fields are both non-empty, names occurring in both
are excluded (i.e. exclude wins). By default all property in the
model have a corresponding form field defined.
It is also possible to define form fields explicitly. This gives
more control over the widget used, constraints, initial value, and
so on. Such form fields are not affected by the nested Meta class's
fields and exclude attributes.
If you define a form field named 'key_name' it will be treated
specially and will be used as the value for the key_name parameter
to the Model constructor. This allows you to create instances with
named keys. The 'key_name' field will be ignored when updating an
instance (although it will still be shown on the form).
"""
__metaclass__ = ModelFormMetaclass
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
GRADIENT_TESTS_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64)
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape([
functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(shape[:ndims - 1] + [
functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
def _NumpyAdd(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
def _NumpySub(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
def _NumpyMul(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
class StatefulScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
with self.cached_session(use_gpu=True):
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(
np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
ref = _AsType(np.random.randn(*(ref_shape)), vtype)
# Scatter via numpy
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref_var = variables.VariableV1(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
# Compare
self.assertAllClose(new, self.evaluate(ref_var))
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
np.complex128):
for itype in (np.int32, np.int64):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def testSimple(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testSimpleResource(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = resource_variable_ops.ResourceVariable(
[0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
sess.run(init)
sess.run(scatter)
self.assertAllClose(ref.eval(), expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([11., 12.], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testSimple3(self):
indices = constant_op.constant([[1]], dtype=dtypes.int32)
updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul.
# def testVariableRankMul(self):
# self._VariableRankTests(_NumpyMul, state_ops.scatter_nd_mul)
# TODO(ebrevdo): Re-enable when we need ScatterNdDiv.
# def testVariableRankDiv(self):
# self._VariableRankTests(_NumpyDiv, state_ops.scatter_nd_div)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul and ScatterNdDiv.
# self._ScatterRepeatIndicesTest(_NumpyMul, state_ops.scatter_nd_mul)
# self._ScatterRepeatIndicesTest(_NumpyDiv, state_ops.scatter_nd_div)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control and this op is re-enabled
# def testBooleanScatterUpdate(self):
# with self.session(use_gpu=False) as session:
# var = tf.Variable([True, False])
# update0 = tf.scatter_nd_update(var, [[1]], [True])
# update1 = tf.scatter_nd_update(
# var, tf.constant(
# [[0]], dtype=tf.int64), [False])
# var.initializer.run()
# session.run([update0, update1])
# self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.cached_session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"indices\[0\] = \[-1\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"indices\[2\] = \[6\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape)
def testResVarInvalidOutputShape(self):
res = variables.Variable(
initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32),
dtype=dtypes.float32)
with self.cached_session():
res.initializer.run()
with self.assertRaisesOpError("Output must be at least 1-D"):
state_ops.scatter_nd_update(res, [[0]], [0.22]).eval()
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
ref.initializer.run()
self.assertAllEqual(expected_result, self.evaluate(scatter_update))
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of input\\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
updates = constant_op.constant(update_values, dtype=dtypes.float64)
expected_result = np.zeros([2, 2], dtype=np.float64)
expected_result[0, 1] = np.sum(update_values)
scatter = state_ops.scatter_nd_add(ref, indices, updates)
init = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init)
result = sess.run(scatter)
assert np.allclose(result, expected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indices out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
class ScatterNdTest(test.TestCase):
non_aliasing_add_test = False
def scatter_nd(self, indices, updates, shape, input_=None):
del input_ # input_ is not used in scatter_nd
return array_ops.scatter_nd(indices, updates, shape)
@test_util.run_in_graph_and_eager_modes
def testBool(self):
indices = constant_op.constant(
[[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant(
[False, True, False, True], dtype=dtypes.bool)
expected = np.array(
[False, False, False, True, False, False, False, True])
scatter = self.scatter_nd(indices, updates, shape=(8,))
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by same value.
indices = constant_op.constant(
[[4], [3], [3], [7]], dtype=dtypes.int32)
updates = constant_op.constant(
[False, True, True, True], dtype=dtypes.bool)
expected = np.array([
False, False, False, True, False, False, False, True])
scatter = self.scatter_nd(indices, updates, shape=(8,))
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
# TODO(apassos) figure out how to unify these errors
with self.assertRaises(errors.InvalidArgumentError
if context.executing_eagerly() else ValueError):
array_ops.scatter_nd(indices=[0], # this should be indices=[[0]]
updates=[0.0],
shape=[1])
def testString(self):
indices = constant_op.constant([[4], [3], [1], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["four", "three", "one", "seven"],
dtype=dtypes.string)
expected = np.array([b"", b"one", b"", b"three", b"four",
b"", b"", b"seven"])
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = sess.run(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by same value.
indices = constant_op.constant([[4], [3], [3], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["a", "b", "b", "c"],
dtype=dtypes.string)
expected = np.array([b"", b"", b"", b"bb", b"a", b"", b"", b"c"])
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = sess.run(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by different value.
indices = constant_op.constant([[4], [3], [3], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["a", "b", "c", "d"],
dtype=dtypes.string)
expected = [np.array([b"", b"", b"", b"bc", b"a", b"", b"", b"d"]),
np.array([b"", b"", b"", b"cb", b"a", b"", b"", b"d"])]
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = sess.run(scatter)
self.assertTrue(np.array_equal(result, expected[0]) or
np.array_equal(result, expected[1]))
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
self.assertAllEqual(
self.scatter_nd(indices, updates, shape).get_shape().as_list(), shape)
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
scatter = self.scatter_nd(indices, updates, shape)
self.assertAllEqual(scatter.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
self.assertAllEqual(expected_result, self.evaluate(scatter))
def testUndefinedIndicesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = constant_op.constant([2, 2, 2], dtypes.int32)
self.scatter_nd(indices, updates, shape)
def testUndefinedUpdatesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([2, 2, 2], dtypes.int32)
self.scatter_nd(indices, updates, shape)
def testUndefinedOutputShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = array_ops.placeholder(dtypes.int32, shape=[None])
self.scatter_nd(indices, updates, shape)
def testEmptyOutputShape1(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Indices and updates specified for empty output shape"):
self.scatter_nd(indices, updates, shape)
def testEmptyOutputShape2(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.cached_session():
with self.assertRaisesOpError(
"Indices and updates specified for empty output"):
self.scatter_nd(indices, updates, shape).eval(feed_dict={
indices: np.zeros([2, 2, 2], dtype=np.int32),
updates: np.zeros([2, 2, 2], dtype=np.int32)
})
def testEmptyOutputShape3(self):
indices = array_ops.zeros([0], dtypes.int32)
updates = array_ops.zeros([0], dtypes.int32)
shape = constant_op.constant([0], dtypes.int32)
scatter = self.scatter_nd(indices, updates, shape)
with self.cached_session():
self.assertEqual(scatter.eval().size, 0)
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
self.scatter_nd(indices, updates, shape)
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of (input|output)\\.shape="):
self.scatter_nd(indices, updates, shape)
def testGradientsRank2ElementUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([1, 4], dtype=dtype)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([1, 4], dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[1, 2], [3, 4]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
def testGradientsRank2SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
updates = constant_op.constant([[3, 4], [1, 2]], dtype=dtype)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[3, 4], [1, 2]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([[1, 2], [3, 4]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[3, 4], [1, 2]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
def testGradientsRank3SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
dtype=dtypes.int32)
updates = constant_op.constant([[[5, 7], [2, 4]], [[1, 3], [6, 8]]],
dtype=dtype)
shape = constant_op.constant([2, 2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([[[3, 4], [5, 6]], [[1, 2], [7, 8]]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
def testGradientsRank7SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant(
[[[[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]]]],
dtype=dtypes.int32)
updates = constant_op.constant(
[[[[[[[5, 6], [2, 4]]]], [[[[1, 3], [6, 8]]]]]]], dtype=dtype)
shape = constant_op.constant([1, 1, 2, 1, 1, 2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant(
[[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array(
[[[[[[[3, 4], [5, 6]]]], [[[[1, 2], [7, 8]]]]]]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array(
[[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
def testScatterNdRepatedIndicesAdd(self):
indices = array_ops.zeros([100000, 1], dtypes.int32)
values = np.random.randn(100000)
shape = [1]
with self.cached_session():
val = self.scatter_nd(indices, values, shape).eval()
self.assertAllClose([np.sum(values)], val)
def testSmokeScatterNdBatch2DSliceDim2(self):
with self.cached_session():
indices = array_ops.zeros([3, 5, 2], dtype=dtypes.int32)
values = array_ops.zeros([3, 5, 7])
shape = [4, 6, 7]
self.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch1DSliceDim2(self):
with self.cached_session():
indices = array_ops.zeros([0, 2], dtype=dtypes.int32)
values = array_ops.zeros([0, 7])
shape = [4, 6, 7]
self.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch1DSliceDim3ShapeRank7(self):
with self.cached_session():
indices = array_ops.zeros([1, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
self.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch2DSliceDim3ShapeRank7(self):
with self.cached_session():
indices = array_ops.zeros([1, 2, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 2, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
self.scatter_nd(indices, values, shape).eval()
class ScatterNdNonAliasingAddTest(ScatterNdTest):
non_aliasing_add_test = True
def scatter_nd(self, indices, updates, shape, input_=None):
input_ = (input_ if input_ is not None else array_ops.zeros(
shape, dtype=updates.dtype))
return array_ops.scatter_nd_non_aliasing_add(input_, indices, updates)
def testString(self):
# Not supported yet.
pass
if __name__ == "__main__":
test.main()
|
|
# -*- test-case-name: calendarserver.provision.test.test_root -*-
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
__all__ = [
"RootResource",
]
try:
from twext.python.sacl import checkSACL
except ImportError:
# OS X Server SACLs not supported on this system, make SACL check a no-op
checkSACL = lambda *ignored: True
from twext.python.log import Logger
from twisted.cred.error import LoginFailed, UnauthorizedLogin
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.python.reflect import namedClass
from twisted.web.error import Error as WebError
from twistedcaldav.cache import DisabledCache
from twistedcaldav.cache import MemcacheResponseCache, MemcacheChangeNotifier
from twistedcaldav.cache import _CachedResponseResource
from twistedcaldav.config import config
from twistedcaldav.directory.principal import DirectoryPrincipalResource
from twistedcaldav.extensions import DAVFile, CachingPropertyStore
from twistedcaldav.extensions import DirectoryPrincipalPropertySearchMixIn
from twistedcaldav.extensions import ReadOnlyResourceMixIn
from twistedcaldav.resource import CalDAVComplianceMixIn
from txdav.who.delegates import CachingDelegates
from txdav.who.wiki import DirectoryService as WikiDirectoryService
from txdav.who.wiki import uidForAuthToken
from txweb2 import responsecode
from txweb2.auth.wrapper import UnauthorizedResponse
from txweb2.dav.xattrprops import xattrPropertyStore
from txweb2.http import HTTPError, StatusResponse, RedirectResponse
log = Logger()
class RootResource(
ReadOnlyResourceMixIn, DirectoryPrincipalPropertySearchMixIn,
CalDAVComplianceMixIn, DAVFile
):
"""
A special root resource that contains support checking SACLs
as well as adding responseFilters.
"""
useSacls = False
# Mapping of top-level resource paths to SACLs. If a request path
# starts with any of these, then the list of SACLs are checked. If the
# request path does not start with any of these, then no SACLs are checked.
saclMap = {
"addressbooks": ("addressbook",),
"calendars": ("calendar",),
"directory": ("addressbook",),
"principals": ("addressbook", "calendar"),
"webcal": ("calendar",),
}
# If a top-level resource path starts with any of these, an unauthenticated
# request is redirected to the auth url (config.WebCalendarAuthPath)
authServiceMap = {
"webcal": True,
}
def __init__(self, path, *args, **kwargs):
super(RootResource, self).__init__(path, *args, **kwargs)
if config.EnableSACLs:
self.useSacls = True
self.contentFilters = []
if (
config.EnableResponseCache and
config.Memcached.Pools.Default.ClientEnabled
):
self.responseCache = MemcacheResponseCache(self.fp)
# These class attributes need to be setup with our memcache\
# notifier
DirectoryPrincipalResource.cacheNotifierFactory = MemcacheChangeNotifier
CachingDelegates.cacheNotifier = MemcacheChangeNotifier(None, cacheHandle="PrincipalToken")
else:
self.responseCache = DisabledCache()
if config.ResponseCompression:
from txweb2.filter import gzip
self.contentFilters.append((gzip.gzipfilter, True))
def deadProperties(self):
if not hasattr(self, "_dead_properties"):
# Get the property store from super
deadProperties = (
namedClass(config.RootResourcePropStoreClass)(self)
)
# Wrap the property store in a memory store
if isinstance(deadProperties, xattrPropertyStore):
deadProperties = CachingPropertyStore(deadProperties)
self._dead_properties = deadProperties
return self._dead_properties
def defaultAccessControlList(self):
return succeed(config.RootResourceACL)
@inlineCallbacks
def checkSACL(self, request):
"""
Check SACLs against the current request
"""
topLevel = request.path.strip("/").split("/")[0]
saclServices = self.saclMap.get(topLevel, None)
if not saclServices:
returnValue(True)
try:
authnUser, authzUser = yield self.authenticate(request)
except Exception:
response = (yield UnauthorizedResponse.makeResponse(
request.credentialFactories,
request.remoteAddr
))
raise HTTPError(response)
# SACLs are enabled in the plist, but there may not actually
# be a SACL group assigned to this service. Let's see if
# unauthenticated users are allowed by calling CheckSACL
# with an empty string.
if authzUser is None:
for saclService in saclServices:
if checkSACL("", saclService):
# No group actually exists for this SACL, so allow
# unauthenticated access
returnValue(True)
# There is a SACL group for at least one of the SACLs, so no
# unauthenticated access
response = (yield UnauthorizedResponse.makeResponse(
request.credentialFactories,
request.remoteAddr
))
log.info("Unauthenticated user denied by SACLs")
raise HTTPError(response)
# Cache the authentication details
request.authnUser = authnUser
request.authzUser = authzUser
# Figure out the "username" from the davxml.Principal object
username = authzUser.record.shortNames[0]
access = False
for saclService in saclServices:
if checkSACL(username, saclService):
# Access is allowed
access = True
break
# Mark SACLs as having been checked so we can avoid doing it
# multiple times
request.checkedSACL = True
if access:
returnValue(True)
log.warn(
"User {user!r} is not enabled with the {sacl!r} SACL(s)",
user=username, sacl=saclServices
)
raise HTTPError(responsecode.FORBIDDEN)
@inlineCallbacks
def locateChild(self, request, segments):
for filter in self.contentFilters:
request.addResponseFilter(filter[0], atEnd=filter[1])
# Examine cookies for wiki auth token; if there, ask the paired wiki
# server for the corresponding record name. If that maps to a
# principal, assign that to authnuser.
# Also, certain non-browser clients send along the wiki auth token
# sometimes, so we now also look for the presence of x-requested-with
# header that the webclient sends. However, in the case of a GET on
# /webcal that header won't be sent so therefore we allow wiki auth
# for any path in the authServiceMap even if that header is missing.
allowWikiAuth = False
topLevel = request.path.strip("/").split("/")[0]
if self.authServiceMap.get(topLevel, False):
allowWikiAuth = True
if not hasattr(request, "checkedWiki"):
# Only do this once per request
request.checkedWiki = True
wikiConfig = config.Authentication.Wiki
cookies = request.headers.getHeader("cookie")
requestedWith = request.headers.hasHeader("x-requested-with")
if (
wikiConfig["Enabled"] and
(requestedWith or allowWikiAuth) and
cookies is not None
):
for cookie in cookies:
if cookie.name == wikiConfig["Cookie"]:
token = cookie.value
break
else:
token = None
if token is not None and token != "unauthenticated":
log.debug(
"Wiki sessionID cookie value: {token}", token=token
)
try:
uid = yield uidForAuthToken(token, wikiConfig["EndpointDescriptor"])
if uid == "unauthenticated":
uid = None
except WebError as w:
uid = None
# FORBIDDEN status means it's an unknown token
if int(w.status) == responsecode.NOT_FOUND:
log.debug(
"Unknown wiki token: {token}", token=token
)
else:
log.error(
"Failed to look up wiki token {token}: {msg}",
token=token,
msg=w.message
)
except Exception as e:
log.error(
"Failed to look up wiki token: {error}",
error=e
)
uid = None
if uid is not None:
log.debug(
"Wiki lookup returned uid: {uid}", uid=uid
)
principal = yield self.principalForUID(request, uid)
if principal:
log.debug(
"Wiki-authenticated principal {uid} "
"being assigned to authnUser and authzUser",
uid=uid
)
request.authzUser = request.authnUser = principal
if not hasattr(request, "authzUser") and config.WebCalendarAuthPath:
topLevel = request.path.strip("/").split("/")[0]
if self.authServiceMap.get(topLevel, False):
# We've not been authenticated and the auth service is enabled
# for this resource, so redirect.
# Use config.ServerHostName if no x-forwarded-host header,
# otherwise use the final hostname in x-forwarded-host.
host = request.headers.getRawHeaders(
"x-forwarded-host",
[config.ServerHostName]
)[-1].split(",")[-1].strip()
port = 443 if (config.EnableSSL or config.BehindTLSProxy) else 80
scheme = "https" if config.EnableSSL else "http"
response = RedirectResponse(
request.unparseURL(
host=host,
port=port,
scheme=scheme,
path=config.WebCalendarAuthPath,
querystring="redirect={}://{}{}".format(
scheme,
host,
request.path
)
),
temporary=True
)
raise HTTPError(response)
# We don't want the /inbox resource to pay attention to SACLs because
# we just want it to use the hard-coded ACL for the imip reply user.
# The /timezones resource is used by the wiki web calendar, so open
# up that resource.
if segments[0] in ("inbox", "timezones"):
request.checkedSACL = True
elif (
(
len(segments) > 2 and
segments[0] in ("calendars", "principals") and
(
segments[1] == "wikis" or
(
segments[1] == "__uids__" and
segments[2].startswith(WikiDirectoryService.uidPrefix)
)
)
)
):
# This is a wiki-related calendar resource. SACLs are not checked.
request.checkedSACL = True
# The authzuser value is set to that of the wiki principal if
# not already set.
if not hasattr(request, "authzUser") and segments[2]:
wikiUid = None
if segments[1] == "wikis":
wikiUid = "{}{}".format(WikiDirectoryService.uidPrefix, segments[2])
else:
wikiUid = segments[2]
if wikiUid:
log.debug(
"Wiki principal {name} being assigned to authzUser",
name=wikiUid
)
request.authzUser = yield self.principalForUID(request, wikiUid)
elif (
self.useSacls and
not hasattr(request, "checkedSACL")
):
yield self.checkSACL(request)
if config.RejectClients:
#
# Filter out unsupported clients
#
agent = request.headers.getHeader("user-agent")
if agent is not None:
for reject in config.RejectClients:
if reject.search(agent) is not None:
log.info("Rejecting user-agent: {agent}", agent=agent)
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
"Your client software ({}) is not allowed to "
"access this service."
.format(agent)
))
if not hasattr(request, "authnUser"):
try:
authnUser, authzUser = yield self.authenticate(request)
request.authnUser = authnUser
request.authzUser = authzUser
except (UnauthorizedLogin, LoginFailed):
response = yield UnauthorizedResponse.makeResponse(
request.credentialFactories,
request.remoteAddr
)
raise HTTPError(response)
if (
config.EnableResponseCache and
request.method == "PROPFIND" and
not getattr(request, "notInCache", False) and
len(segments) > 1
):
try:
if not getattr(request, "checkingCache", False):
request.checkingCache = True
response = yield self.responseCache.getResponseForRequest(
request
)
if response is None:
request.notInCache = True
raise KeyError("Not found in cache.")
returnValue((_CachedResponseResource(response), []))
except KeyError:
pass
child = yield super(RootResource, self).locateChild(
request, segments
)
returnValue(child)
@inlineCallbacks
def principalForUID(self, request, uid):
principal = None
directory = request.site.resource.getDirectory()
record = yield directory.recordWithUID(uid)
if record is not None:
username = record.shortNames[0]
log.debug(
"Wiki user record for user {user}: {record}",
user=username, record=record
)
for collection in self.principalCollections():
principal = yield collection.principalForRecord(record)
if principal is not None:
break
returnValue(principal)
def http_COPY(self, request):
return responsecode.FORBIDDEN
def http_MOVE(self, request):
return responsecode.FORBIDDEN
def http_DELETE(self, request):
return responsecode.FORBIDDEN
|
|
import unittest2 as unittest
import httplib
import uuid
import json
from xml.etree import ElementTree
class HttpTestCase(unittest.TestCase):
"""Performs generic HTTP request testing.
Defines a ``request`` method for use in test cases that makes
HTTP requests, and two new asserts:
* assertResponseSuccessful
* assertResponseStatus
"""
def request(self, host='127.0.0.1', port=80, method='GET', path='/',
headers=None, body=None, assert_status=None):
"""Perform request and fetch httplib.HTTPResponse from the server"""
# Initialize headers dictionary
headers = {} if not headers else headers
# Initialize a connection
connection = httplib.HTTPConnection(host, port, timeout=20)
# Perform the request
connection.request(method, path, body, headers)
# Retrieve the response so can go ahead and close the connection
response = connection.getresponse()
response.body = response.read()
# Close the connection
connection.close()
# Automatically assert HTTP status code
if assert_status:
self.assertResponseStatus(response, assert_status)
else:
self.assertResponseSuccessful(response)
# Contains the response headers, body, etc
return response
def assertResponseSuccessful(self, response):
"""Asserts that a status code lies inside the 2xx range
:param response: :py:class:`httplib.HTTPResponse` to be
verified to have a status code between 200 and 299.
example::
>>> self.assertResponseSuccessful(response, 203)
"""
self.assertTrue(response.status >= 200 and response.status <= 299,
'Status code %d is outside of the expected range (2xx)\n\n%s' %
(response.status, response.body))
def assertResponseStatus(self, response, assert_status):
"""Asserts a specific status code on the response
:param response: :py:class:`httplib.HTTPResponse`
:param assert_status: The specific ``status`` result expected
example::
>>> self.assertResponseStatus(response, 203)
"""
self.assertEqual(response.status, assert_status,
'Status code %s is not %s, as expected)\n\n%s' %
(response.status, assert_status, response.body))
class RestfulTestCase(HttpTestCase):
"""Performs restful HTTP request testing"""
def restful_request(self, headers=None, as_json=None, as_xml=None,
**kwargs):
"""Encodes and decodes (JSON & XML) HTTP requests and responses.
Dynamically encodes json or xml as request body if one is provided.
.. WARNING::
* Existing Content-Type header will be overwritten.
* If both as_json and as_xml are provided, as_xml is ignored.
* If either as_json or as_xml AND a body is provided, the body
is ignored.
Dynamically returns 'as_json' or 'as_xml' attribute based on the
detected response type, and fails the current test case if
unsuccessful.
response.as_json: standard python dictionary
response.as_xml: as_etree.ElementTree
"""
# Initialize headers dictionary
headers = {} if not headers else headers
# Attempt to encode JSON and XML automatically, if requested
if as_json:
body = RestfulTestCase._encode_json(as_json)
headers['Content-Type'] = 'application/json'
elif as_xml:
body = as_xml
headers['Content-Type'] = 'application/xml'
# Assume the client wants xml back if it didn't specify
if 'Accept' not in headers:
headers['Accept'] = 'application/xml'
elif 'body' in kwargs:
body = kwargs.pop('body')
else:
body = None
# Perform the HTTP request/response
response = self.request(headers=headers, body=body, **kwargs)
# Attempt to parse JSON and XML automatically, if detected
response = self._decode_response_body(response)
# Contains the decoded response as_json/as_xml, etc
return response
@staticmethod
def _encode_json(data):
"""Returns a JSON-encoded string of the given python dictionary
:param data: python object to be encoded into JSON
:returns: string of JSON encoded data
"""
return json.dumps(data)
def _decode_response_body(self, response):
"""Detects response body type, and attempts to decode it
:param response: :py:class:`httplib.HTTPResponse`
:returns: response object with additions:
If context type is application/json, the response will have an
additional attribute ``json`` that will have the decoded JSON
result (typically a dict)
If context type is application/xml, the response will have an
additional attribute ``xml`` that will have the an ElementTree
result.
"""
if response.body != None and response.body.strip():
if 'application/json' in response.getheader('Content-Type', ''):
response.json = self._decode_json(response.body)
elif 'application/xml' in response.getheader('Content-Type', ''):
response.xml = self._decode_xml(response.body)
return response
@staticmethod
def _decode_json(json_str):
"""Returns a dict of the given JSON string"""
return json.loads(json_str)
@staticmethod
def _decode_xml(xml_str):
"""Returns an ElementTree of the given XML string"""
return ElementTree.XML(xml_str)
class ApiTestCase(RestfulTestCase):
"""Abstracts REST verbs & resources of the service & admin API."""
service_token = None
admin_token = None
def service_request(self, version='2.0', path='', port=5000, headers=None,
**kwargs):
"""Returns a request to the service API"""
# Initialize headers dictionary
headers = {} if not headers else headers
path = ApiTestCase._version_path(version, path)
if self.service_token:
headers['X-Auth-Token'] = self.service_token
elif self.admin_token:
headers['X-Auth-Token'] = self.admin_token
return self.restful_request(port=port, path=path, headers=headers,
**kwargs)
def admin_request(self, version='2.0', path='', port=35357, headers=None,
**kwargs):
"""Returns a request to the admin API"""
# Initialize headers dictionary
headers = {} if not headers else headers
path = ApiTestCase._version_path(version, path)
if self.admin_token:
headers['X-Auth-Token'] = self.admin_token
return self.restful_request(port=port, path=path, headers=headers,
**kwargs)
@staticmethod
def _version_path(version, path):
"""Prepend the given path with the API version.
An empty version results in no version being prepended."""
if version:
return '/v' + str(version) + str(path)
else:
return str(path)
def post_token(self, **kwargs):
"""POST /tokens"""
return self.service_request(method='POST', path='/tokens', **kwargs)
def get_token(self, token_id, **kwargs):
"""GET /tokens/{token_id}"""
return self.admin_request(method='GET',
path='/tokens/%s' % (token_id,), **kwargs)
def get_token_belongsto(self, token_id, tenant_id, **kwargs):
"""GET /tokens/{token_id}?belongsTo={tenant_id}"""
return self.admin_request(method='GET',
path='/tokens/%s?belongsTo=%s' % (token_id, tenant_id), **kwargs)
def check_token(self, token_id, **kwargs):
"""HEAD /tokens/{token_id}"""
return self.admin_request(method='HEAD',
path='/tokens/%s' % (token_id,), **kwargs)
def check_token_belongs_to(self, token_id, tenant_id, **kwargs):
"""HEAD /tokens/{token_id}?belongsTo={tenant_id}"""
return self.admin_request(method='HEAD',
path='/tokens/%s?belongsTo=%s' % (token_id, tenant_id), **kwargs)
def delete_token(self, token_id, **kwargs):
"""DELETE /tokens/{token_id}"""
return self.admin_request(method='DELETE',
path='/tokens/%s' % (token_id,), **kwargs)
def post_tenant(self, **kwargs):
"""POST /tenants"""
return self.admin_request(method='POST', path='/tenants', **kwargs)
def get_tenants(self, **kwargs):
"""GET /tenants"""
if 'request_type' in kwargs and \
kwargs.pop('request_type') == 'service':
return self.service_request(method='GET',
path='/tenants', **kwargs)
else:
return self.admin_request(method='GET', path='/tenants', **kwargs)
def get_tenant(self, tenant_id, **kwargs):
"""GET /tenants/{tenant_id}"""
return self.admin_request(method='GET',
path='/tenants/%s' % (tenant_id,), **kwargs)
def get_tenant_by_name(self, tenant_name, **kwargs):
"""GET /tenants?name=tenant_name"""
return self.admin_request(method='GET',
path='/tenants?name=%s' % (tenant_name,), **kwargs)
def post_tenant_for_update(self, tenant_id, **kwargs):
"""GET /tenants/{tenant_id}"""
return self.admin_request(method='POST',
path='/tenants/%s' % (tenant_id,), **kwargs)
def get_tenant_users(self, tenant_id, **kwargs):
"""GET /tenants/{tenant_id}/users"""
return self.admin_request(method='GET',
path='/tenants/%s/users' % (tenant_id,), **kwargs)
def get_tenant_users_by_role(self, tenant_id, role_id, **kwargs):
"""GET /tenants/{tenant_id}/users?roleId={roleId}"""
return self.admin_request(method='GET',
path='/tenants/%s/users?roleId=%s' % (\
tenant_id, role_id), **kwargs)
def delete_tenant(self, tenant_id, **kwargs):
"""DELETE /tenants/{tenant_id}"""
return self.admin_request(method='DELETE',
path='/tenants/%s' % (tenant_id,), **kwargs)
def post_user(self, **kwargs):
"""POST /users"""
return self.admin_request(method='POST', path='/users', **kwargs)
def get_users(self, **kwargs):
"""GET /users"""
return self.admin_request(method='GET', path='/users', **kwargs)
def get_user(self, user_id, **kwargs):
"""GET /users/{user_id}"""
return self.admin_request(method='GET',
path='/users/%s' % (user_id,), **kwargs)
def query_user(self, user_name, **kwargs):
"""GET /users?name={user_name}"""
return self.admin_request(method='GET',
path='/users?name=%s' % (user_name,), **kwargs)
def post_user_for_update(self, user_id, **kwargs):
"""POST /users/{user_id}"""
return self.admin_request(method='POST',
path='/users/%s' % (user_id,), **kwargs)
def put_user_password(self, user_id, **kwargs):
"""PUT /users/{user_id}/password"""
return self.admin_request(method='PUT',
path='/users/%s/password' % (user_id,), **kwargs)
def put_user_tenant(self, user_id, **kwargs):
"""PUT /users/{user_id}/tenant"""
return self.admin_request(method='PUT',
path='/users/%s/tenant' % (user_id,), **kwargs)
def put_user_enabled(self, user_id, **kwargs):
"""PUT /users/{user_id}/enabled"""
return self.admin_request(method='PUT',
path='/users/%s/enabled' % (user_id,), **kwargs)
def delete_user(self, user_id, **kwargs):
"""DELETE /users/{user_id}"""
return self.admin_request(method='DELETE',
path='/users/%s' % (user_id,), **kwargs)
def get_user_roles(self, user_id, **kwargs):
"""GET /users/{user_id}/roles"""
return self.admin_request(method='GET',
path='/users/%s/roles' % (user_id,), **kwargs)
def put_user_role(self, user_id, role_id, tenant_id, **kwargs):
if tenant_id is None:
"""PUT /users/{user_id}/roles/OS-KSADM/{role_id}"""
return self.admin_request(method='PUT',
path='/users/%s/roles/OS-KSADM/%s' %
(user_id, role_id), **kwargs)
else:
"""PUT /tenants/{tenant_id}/users/{user_id}/
roles/OS-KSADM/{role_id}"""
return self.admin_request(method='PUT',
path='/tenants/%s/users/%s/roles/OS-KSADM/%s' % (tenant_id,
user_id, role_id,), **kwargs)
def delete_user_role(self, user_id, role_id, tenant_id, **kwargs):
"""DELETE /users/{user_id}/roles/{role_id}"""
if tenant_id is None:
return self.admin_request(method='DELETE',
path='/users/%s/roles/OS-KSADM/%s'
% (user_id, role_id), **kwargs)
else:
return self.admin_request(method='DELETE',
path='/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id), **kwargs)
def post_role(self, **kwargs):
"""POST /roles"""
return self.admin_request(method='POST',
path='/OS-KSADM/roles', **kwargs)
def get_roles(self, **kwargs):
"""GET /roles"""
return self.admin_request(method='GET',
path='/OS-KSADM/roles', **kwargs)
def get_role(self, role_id, **kwargs):
"""GET /roles/{role_id}"""
return self.admin_request(method='GET',
path='/OS-KSADM/roles/%s' % (role_id,), **kwargs)
def get_role_by_name(self, role_name, **kwargs):
"""GET /roles?name={role_name}"""
return self.admin_request(method='GET',
path='/OS-KSADM/roles?name=%s' % (role_name,), **kwargs)
def delete_role(self, role_id, **kwargs):
"""DELETE /roles/{role_id}"""
return self.admin_request(method='DELETE',
path='/OS-KSADM/roles/%s' % (role_id,), **kwargs)
def get_endpoint_templates(self, **kwargs):
"""GET /OS-KSCATALOG/endpointTemplates"""
return self.admin_request(method='GET',
path='/OS-KSCATALOG/endpointTemplates',
**kwargs)
def post_endpoint_template(self, **kwargs):
"""POST /OS-KSCATALOG/endpointTemplates"""
return self.admin_request(method='POST',
path='/OS-KSCATALOG/endpointTemplates',
**kwargs)
def put_endpoint_template(self, endpoint_template_id, **kwargs):
"""PUT /OS-KSCATALOG/endpointTemplates/{endpoint_template_id}"""
return self.admin_request(method='PUT',
path='/OS-KSCATALOG/endpointTemplates/%s'
% (endpoint_template_id,),
**kwargs)
def get_endpoint_template(self, endpoint_template_id, **kwargs):
"""GET /OS-KSCATALOG/endpointTemplates/{endpoint_template_id}"""
return self.admin_request(method='GET',
path='/OS-KSCATALOG/endpointTemplates/%s'
% (endpoint_template_id,),
**kwargs)
def delete_endpoint_template(self, endpoint_template_id, **kwargs):
"""DELETE /OS-KSCATALOG/endpointTemplates/{endpoint_template_id}"""
return self.admin_request(method='DELETE',
path='/OS-KSCATALOG/endpointTemplates/%s' %
(endpoint_template_id,),
**kwargs)
def get_tenant_endpoints(self, tenant_id, **kwargs):
"""GET /tenants/{tenant_id}/OS-KSCATALOG/endpoints"""
return self.admin_request(method='GET',
path='/tenants/%s/OS-KSCATALOG/endpoints' %
(tenant_id,),
**kwargs)
def post_tenant_endpoint(self, tenant_id, **kwargs):
"""POST /tenants/{tenant_id}/OS-KSCATALOG/endpoints"""
return self.admin_request(method='POST',
path='/tenants/%s/OS-KSCATALOG/endpoints' %
(tenant_id,), **kwargs)
def delete_tenant_endpoint(self, tenant_id, endpoint_id, **kwargs):
"""DELETE /tenants/{tenant_id}/OS-KSCATALOG/endpoints/{endpoint_id}"""
return self.admin_request(method='DELETE',
path='/tenants/%s/OS-KSCATALOG/endpoints/%s' %
(tenant_id, endpoint_id,),
**kwargs)
def get_token_endpoints(self, token_id, **kwargs):
"""GET /tokens/{token_id}/endpoints"""
return self.admin_request(method='GET',
path='/tokens/%s/endpoints' %
(token_id,),
**kwargs)
def post_service(self, **kwargs):
"""POST /services"""
return self.admin_request(method='POST',
path='/OS-KSADM/services', **kwargs)
def get_services(self, **kwargs):
"""GET /services"""
return self.admin_request(method='GET',
path='/OS-KSADM/services', **kwargs)
def get_service(self, service_id, **kwargs):
"""GET /services/{service_id}"""
return self.admin_request(method='GET',
path='/OS-KSADM/services/%s' % (service_id,), **kwargs)
def get_service_by_name(self, service_name, **kwargs):
"""GET /services?name={service_name}"""
return self.admin_request(method='GET',
path='/OS-KSADM/services?name=%s' % (service_name,), **kwargs)
def delete_service(self, service_id, **kwargs):
"""DELETE /services/{service_id}"""
return self.admin_request(method='DELETE',
path='/OS-KSADM/services/%s' % (service_id,), **kwargs)
def get_root(self, **kwargs):
"""GET /"""
return self.service_request(method='GET', path='/', **kwargs)
def get_extensions(self, **kwargs):
"""GET /extensions"""
return self.service_request(method='GET', path='/extensions', **kwargs)
def get_admin_guide(self, **kwargs):
"""GET /identityadminguide.pdf"""
return self.service_request(method='GET',
path='/identityadminguide.pdf', **kwargs)
def get_admin_wadl(self, **kwargs):
"""GET /identity-admin.wadl"""
return self.service_request(method='GET', path='/identity-admin.wadl',
**kwargs)
def get_common_ent(self, **kwargs):
"""GET /common.ent"""
return self.service_request(method='GET', path='/common.ent',
**kwargs)
def get_xsd(self, filename, **kwargs):
"""GET /xsd/{xsd}"""
return self.service_request(method='GET', path='/xsd/%s' % (filename,),
**kwargs)
def get_xsd_atom(self, filename, **kwargs):
"""GET /xsd/atom/{xsd}"""
return self.service_request(method='GET',
path='/xsd/atom/%s' % (filename,), **kwargs)
def get_xslt(self, filename, **kwargs):
"""GET /xslt/{file:.*}"""
return self.service_request(method='GET',
path='/xslt/%s' % (filename,), **kwargs)
def get_javascript(self, filename, **kwargs):
"""GET /js/{file:.*}"""
return self.service_request(method='GET', path='/js/%s' % (filename,),
**kwargs)
def get_style(self, filename, **kwargs):
"""GET /style/{file:.*}"""
return self.service_request(method='GET',
path='/style/%s' % (filename,), **kwargs)
def get_sample(self, filename, **kwargs):
"""GET /samples/{file:.*}"""
return self.service_request(method='GET',
path='/samples/%s' % (filename,), **kwargs)
def get_user_credentials(self, user_id, **kwargs):
"""GET /users/{user_id}/OS-KSADM/credentials"""
return self.admin_request(method='GET',
path='/users/%s/OS-KSADM/credentials' % (user_id,), **kwargs)
def get_user_credentials_by_type(self,
user_id, credentials_type, **kwargs):
"""GET /users/{user_id}/OS-KSADM/credentials/{credentials_type}"""
return self.admin_request(method='GET',
path='/users/%s/OS-KSADM/credentials/%s'\
% (user_id, credentials_type,), **kwargs)
def post_credentials(self, user_id, **kwargs):
"""POST /users/{user_id}/OS-KSADM/credentials"""
return self.admin_request(method='POST',
path='/users/%s/OS-KSADM/credentials' % (user_id,), **kwargs)
def post_credentials_by_type(self, user_id, credentials_type, **kwargs):
"""POST /users/{user_id}/OS-KSADM/credentials/{credentials_type}"""
return self.admin_request(method='POST',
path='/users/%s/OS-KSADM/credentials/%s' %\
(user_id, credentials_type), **kwargs)
def delete_user_credentials_by_type(self, user_id,\
credentials_type, **kwargs):
"""DELETE /users/{user_id}/OS-KSADM/credentials/{credentials_type}"""
return self.admin_request(method='DELETE',
path='/users/%s/OS-KSADM/credentials/%s' %\
(user_id, credentials_type,), **kwargs)
# Generates and return a unique string
unique_str = lambda: str(uuid.uuid4())
# Generates and return a unique email
unique_email = lambda: str(unique_str() + '@openstack.org')
# Generates and return a unique email
unique_url = lambda: str('http://' + unique_str())
# Automatically populates optional string fields
optional_str = lambda x: x if x is not None else unique_str()
# Automatically populates optional email fields
optional_email = lambda x: x if x is not None else unique_email()
# Automatically populates optional url fields
optional_url = lambda x: x if x is not None else unique_url()
class FunctionalTestCase(ApiTestCase):
"""Abstracts functional CRUD of the identity API"""
service_token = None
admin_token = None
admin_user_id = None
admin_username = 'admin'
admin_password = 'secrete'
expired_admin_token = '000999'
disabled_admin_token = '999888777'
service_admin_token = '111222333444'
xmlns = 'http://docs.openstack.org/identity/api/v2.0'
xmlns_ksadm = 'http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0'
xmlns_kscatalog = "http://docs.openstack.org/identity/api/ext"\
+ "/OSKSCATALOG/v1.0"
def setUp(self):
"""Prepare keystone for system tests"""
# Authenticate as admin user to establish admin_token
access = self.authenticate(self.admin_username, self.admin_password).\
json['access']
self.admin_token = access['token']['id']
self.admin_user_id = access['user']['id']
def authenticate(self, user_name=None, user_password=None, tenant_id=None,
**kwargs):
user_name = optional_str(user_name)
user_password = optional_str(user_password)
data = {
"auth": {
"passwordCredentials": {
"username": user_name,
"password": user_password}}}
if tenant_id:
data["auth"]["tenantId"] = tenant_id
return self.post_token(as_json=data, **kwargs)
def validate_token(self, token_id=None, tenant_id=None, **kwargs):
token_id = optional_str(token_id)
if tenant_id:
# validate scoped token
return self.get_token_belongsto(token_id, tenant_id, **kwargs)
else:
# validate unscoped token
return self.get_token(token_id, **kwargs)
def remove_token(self, token_id=None, **kwargs):
token_id = optional_str(token_id)
return self.delete_token(token_id, **kwargs)
def create_tenant(self, tenant_name=None, tenant_description=None,
tenant_enabled=True, **kwargs):
tenant_name = optional_str(tenant_name)
tenant_description = optional_str(tenant_description)
data = {
"tenant": {
"name": tenant_name,
"description": tenant_description,
"enabled": tenant_enabled}}
return self.post_tenant(as_json=data, **kwargs)
def list_tenants(self, **kwargs):
return self.get_tenants(**kwargs)
def fetch_tenant(self, tenant_id=None, **kwargs):
tenant_id = optional_str(tenant_id)
return self.get_tenant(tenant_id, **kwargs)
def fetch_tenant_by_name(self, tenant_name=None, **kwargs):
tenant_name = optional_str(tenant_name)
if tenant_name:
return self.get_tenant_by_name(tenant_name, **kwargs)
def update_tenant(self, tenant_id=None, tenant_name=None,
tenant_description=None, tenant_enabled=True, **kwargs):
tenant_id = optional_str(tenant_id)
tenant_description = optional_str(tenant_description)
data = {"tenant": {}}
if tenant_name is not None:
data['tenant']['name'] = tenant_name
data['tenant']['description'] = tenant_description
if tenant_enabled is not None:
data['tenant']['enabled'] = tenant_enabled
return self.post_tenant_for_update(tenant_id, as_json=data, **kwargs)
def list_tenant_users(self, tenant_id, role_id=None, **kwargs):
tenant_id = optional_str(tenant_id)
if role_id:
return self.get_tenant_users_by_role(tenant_id, role_id, **kwargs)
else:
return self.get_tenant_users(tenant_id, **kwargs)
def remove_tenant(self, tenant_id=None, **kwargs):
tenant_id = optional_str(tenant_id)
return self.delete_tenant(tenant_id, **kwargs)
def create_user(self, user_name=None, user_password=None, user_email=None,
tenant_id=None, user_enabled=True, **kwargs):
user_name = optional_str(user_name)
user_password = optional_str(user_password)
user_email = optional_email(user_email)
data = {
"user": {
"password": user_password,
"name": user_name,
"tenantId": tenant_id,
"email": user_email,
"enabled": user_enabled}}
return self.post_user(as_json=data, **kwargs)
def create_user_with_known_password(self, **kwargs):
"""Manually injects the new user's password into the response data"""
password = unique_str()
r = self.create_user(user_password=password, **kwargs)
r.json['user']['password'] = password
return r
def list_users(self, **kwargs):
return self.get_users(**kwargs)
def fetch_user(self, user_id=None, **kwargs):
user_id = optional_str(user_id)
return self.get_user(user_id, **kwargs)
def fetch_user_by_name(self, user_name=None, **kwargs):
user_name = optional_str(user_name)
return self.query_user(user_name, **kwargs)
def update_user(self, user_id=None, user_email=None, user_enabled=None,
user_name=None, **kwargs):
user_id = optional_str(user_id)
data = {"user": {}}
if user_email is not None:
data['user']['email'] = user_email
if user_enabled is not None:
data['user']['enabled'] = user_enabled
if user_name is not None:
data['user']['name'] = user_name
return self.post_user_for_update(user_id, as_json=data, **kwargs)
def update_user_password(self, user_id=None, user_password=None, **kwargs):
user_id = optional_str(user_id)
user_password = optional_str(user_password)
data = {"user": {"password": user_password}}
return self.put_user_password(user_id, as_json=data, **kwargs)
def update_user_tenant(self, user_id=None, tenant_id=None, **kwargs):
user_id = optional_str(user_id)
tenant_id = optional_str(tenant_id)
data = {"user": {"tenantId": tenant_id}}
return self.put_user_tenant(user_id, as_json=data, **kwargs)
def _enable_disable_user(self, user_id, user_enabled, **kwargs):
"""Private function to enable and disable a user.
Use enable_user() and disable_user() instead."""
data = {"user": {"enabled": user_enabled}}
return self.put_user_enabled(user_id, as_json=data, **kwargs)
def enable_user(self, user_id=None, **kwargs):
user_id = optional_str(user_id)
return self._enable_disable_user(user_id, True, **kwargs)
def disable_user(self, user_id=None, **kwargs):
user_id = optional_str(user_id)
return self._enable_disable_user(user_id, False, **kwargs)
def remove_user(self, user_id=None, **kwargs):
user_id = optional_str(user_id)
return self.delete_user(user_id, **kwargs)
def grant_role_to_user(self, user_id=None, role_id=None, tenant_id=None,
**kwargs):
user_id = optional_str(user_id)
role_id = optional_str(role_id)
tenant_id = optional_str(tenant_id)
return self.put_user_role(user_id, role_id, tenant_id, **kwargs)
def grant_global_role_to_user(self, user_id=None, role_id=None,
**kwargs):
user_id = optional_str(user_id)
role_id = optional_str(role_id)
return self.put_user_role(user_id, role_id, None, **kwargs)
def revoke_global_role_from_user(self,
user_id=None, role_id=None, **kwargs):
user_id = optional_str(user_id)
role_id = optional_str(role_id)
return self.delete_user_role(user_id, role_id, **kwargs)
def revoke_role_from_user(self,
user_id=None, role_id=None, tenant_id=None, **kwargs):
user_id = optional_str(user_id)
role_id = optional_str(role_id)
tenant_id = optional_str(tenant_id)
return self.delete_user_role(user_id, tenant_id, **kwargs)
def create_role(self, role_name=None, role_description=None,
service_id=None, **kwargs):
role_name = optional_str(role_name)
role_description = optional_str(role_description)
data = {
"role": {
"name": role_name,
"description": role_description}}
if service_id is not None:
data['role']['serviceId'] = service_id
return self.post_role(as_json=data, **kwargs)
def list_roles(self, **kwargs):
return self.get_roles(**kwargs)
def fetch_role(self, role_id=None, **kwargs):
role_id = optional_str(role_id)
return self.get_role(role_id, **kwargs)
def fetch_role_by_name(self, role_name=None, **kwargs):
role_name = optional_str(role_name)
return self.get_role_by_name(role_name, **kwargs)
def remove_role(self, role_id=None, **kwargs):
role_id = optional_str(role_id)
return self.delete_role(role_id, **kwargs)
def create_service(self, service_name=None, service_type=None,
service_description=None, **kwargs):
service_name = optional_str(service_name)
service_type = optional_str(service_type)
service_description = optional_str(service_description)
data = {
"OS-KSADM:service": {
"name": service_name,
"type": service_type,
"description": service_description}}
return self.post_service(as_json=data, **kwargs)
def list_services(self, **kwargs):
return self.get_services(**kwargs)
def fetch_service(self, service_id=None, **kwargs):
service_id = optional_str(service_id)
return self.get_service(service_id, **kwargs)
def fetch_service_by_name(self, service_name=None, **kwargs):
service_name = optional_str(service_name)
return self.get_service_by_name(service_name, **kwargs)
def remove_service(self, service_id=None, **kwargs):
service_id = optional_str(service_id)
self.delete_service(service_id, **kwargs)
def create_endpoint_for_tenant(self, tenant_id=None,
endpoint_template_id=None, **kwargs):
tenant_id = optional_str(tenant_id)
endpoint_template_id = optional_str(endpoint_template_id)
data = {"OS-KSCATALOG:endpointTemplate": {"id": endpoint_template_id}}
return self.post_tenant_endpoint(tenant_id, as_json=data, **kwargs)
def list_tenant_endpoints(self, tenant_id=None, **kwargs):
tenant_id = optional_str(tenant_id)
return self.get_tenant_endpoints(tenant_id, **kwargs)
def remove_endpoint_from_tenant(self, tenant_id=None, endpoint_id=None,
**kwargs):
tenant_id = optional_str(tenant_id)
endpoint_id = optional_str(endpoint_id)
"""TODO: Should this be an 'endpoint_id' or 'endpoint_template_id'??"""
return self.delete_tenant_endpoint(tenant_id, endpoint_id, **kwargs)
def remove_tenant_endpoint(self, tenant_id=None, endpoint_id=None,
**kwargs):
tenant_id = optional_str(tenant_id)
endpoint_id = optional_str(endpoint_id)
"""TODO: Should this be an 'endpoint_id' or 'endpoint_template_id'??"""
return self.delete_tenant_endpoint(tenant_id, endpoint_id, **kwargs)
def list_endpoint_templates(self, **kwargs):
return self.get_endpoint_templates(**kwargs)
def create_endpoint_template(self, region=None, name=None, type=None,
public_url=None, admin_url=None, internal_url=None, enabled=True,
is_global=True, version_id=None,
version_list=None, version_info=None, **kwargs):
region = optional_str(region)
name = optional_str(name)
type = optional_str(type)
public_url = optional_url(public_url)
admin_url = optional_url(admin_url)
internal_url = optional_url(internal_url)
version_id = optional_url(version_id)
version_list = optional_url(version_list)
version_info = optional_url(version_info)
data = {
"OS-KSCATALOG:endpointTemplate": {
"region": region,
"name": name,
"type": type,
"publicURL": public_url,
"adminURL": admin_url,
"internalURL": internal_url,
"enabled": enabled,
"global": is_global,
"versionId": version_id,
"versionInfo": version_info,
"versionList": version_list
}}
return self.post_endpoint_template(as_json=data, **kwargs)
def remove_endpoint_template(self, endpoint_template_id=None, **kwargs):
endpoint_template_id = optional_str(endpoint_template_id)
return self.delete_endpoint_template(endpoint_template_id, **kwargs)
def fetch_endpoint_template(self, endpoint_template_id, **kwargs):
endpoint_template_id = optional_str(endpoint_template_id)
return self.get_endpoint_template(endpoint_template_id, **kwargs)
def update_endpoint_template(self, endpoint_template_id=None, region=None,
name=None, type=None, public_url=None, admin_url=None,
internal_url=None, enabled=None, is_global=None,
version_id=None, version_list=None, version_info=None, **kwargs):
data = {"OS-KSCATALOG:endpointTemplate": {}}
if region is not None:
data['OS-KSCATALOG:endpointTemplate']['region'] = region
if name is not None:
data['OS-KSCATALOG:endpointTemplate']['name'] = name
if type is not None:
data['OS-KSCATALOG:endpointTemplate']['type'] = type
if public_url is not None:
data['OS-KSCATALOG:endpointTemplate']['publicURL'] = public_url
if admin_url is not None:
data['OS-KSCATALOG:endpointTemplate']['adminURL'] = admin_url
if internal_url is not None:
data['OS-KSCATALOG:endpointTemplate']['internalURL'] = internal_url
if enabled is not None:
data['OS-KSCATALOG:endpointTemplate']['enabled'] = enabled
if is_global is not None:
data['OS-KSCATALOG:endpointTemplate']['global'] = is_global
if version_id is not None:
data['OS-KSCATALOG:endpointTemplate']['versionId'] = version_id
if version_list is not None:
data['OS-KSCATALOG:endpointTemplate']['versionList'] = version_list
if version_info is not None:
data['OS-KSCATALOG:endpointTemplate']['versionInfo'] = version_info
return self.put_endpoint_template(endpoint_template_id, as_json=data,
**kwargs)
def fetch_user_credentials(self, user_id=None, **kwargs):
user_id = optional_str(user_id)
return self.get_user_credentials(user_id, **kwargs)
def fetch_password_credentials(self, user_id=None, **kwargs):
user_id = optional_str(user_id)
return self.get_user_credentials_by_type(
user_id, 'passwordCredentials', **kwargs)
def create_password_credentials(self, user_id, user_name, **kwargs):
user_id = optional_str(user_id)
password = unique_str()
data = {
"passwordCredentials": {
"username": user_name,
"password": password}}
return self.post_credentials(user_id, as_json=data, **kwargs)
def update_password_credentials(self, user_id, user_name,
password=None, **kwargs):
user_id = optional_str(user_id)
password = optional_str(password)
data = {
"passwordCredentials": {
"username": user_name,
"password": password}}
return self.post_credentials_by_type(
user_id, 'passwordCredentials', as_json=data, **kwargs)
def delete_password_credentials(self, user_id, **kwargs):
user_id = optional_str(user_id)
return self.delete_user_credentials_by_type(
user_id, 'passwordCredentials', **kwargs)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from libsentry.api import get_api
from libsentry.sentry_site import get_sentry_server_admin_groups
from hadoop.cluster import get_defaultfs
from beeswax.api import autocomplete
def fetch_hive_path(request):
path = request.GET['path']
database = None
table = None
if path:
database = path
if '/' in path:
database, table = path.split('/')
resp = autocomplete(request, database, table)
if database and request.GET['doas'] != request.user.username:
request.GET = request.GET.copy()
request.GET['doas'] = request.GET['doas']
resp = autocomplete(request, database, table)
return resp
def list_sentry_roles_by_group(request):
result = {'status': -1, 'message': 'Error'}
try:
if request.POST['groupName']:
groupName = request.POST['groupName']
else:
# Admins can see everything, other only the groups they belong too
groupName = None if request.user.groups.filter(name__in=get_sentry_server_admin_groups()).exists() else '*'
roles = get_api(request.user).list_sentry_roles_by_group(groupName)
result['roles'] = sorted(roles, key=lambda role: role['name'])
result['message'] = ''
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def list_sentry_privileges_by_role(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = request.POST['roleName']
sentry_privileges = get_api(request.user).list_sentry_privileges_by_role(roleName)
result['sentry_privileges'] = sorted(sentry_privileges, key=lambda privilege: '%s.%s.%s.%s' % (privilege['server'], privilege['database'], privilege['table'], privilege['URI']))
result['message'] = ''
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def _to_sentry_privilege(privilege):
return {
'privilegeScope': privilege['privilegeScope'],
'serverName': privilege['serverName'],
'dbName': privilege['dbName'],
'tableName': privilege['tableName'],
'URI': _massage_uri(privilege['URI']),
'action': privilege['action'],
'createTime': privilege['timestamp'],
'grantOption': 1 if privilege['grantOption'] else 0,
}
def _hive_add_privileges(user, role, privileges):
api = get_api(user)
_privileges = []
for privilege in privileges:
if privilege['status'] not in ('deleted',):
api.alter_sentry_role_grant_privilege(role['name'], _to_sentry_privilege(privilege))
# Mocked until Sentry API returns the info. Not used currently as we refresh the whole role.
_privileges.append({
'timestamp': int(time.time()),
'database': privilege.get('dbName'),
'action': privilege.get('action'),
'scope': privilege.get('privilegeScope'),
'table': privilege.get('tableName'),
'URI': privilege.get('URI'),
'server': privilege.get('serverName'),
'grantOption': privilege.get('grantOption') == 1
})
return _privileges
def _massage_uri(uri):
if uri:
if uri.startswith('hdfs:///'):
uri = uri.replace('hdfs://', get_defaultfs())
elif uri.startswith('/'):
uri = get_defaultfs() + uri
return uri
def _drop_sentry_privilege(user, role, authorizable):
return get_api(user).alter_sentry_role_revoke_privilege(role['name'], _to_sentry_privilege(authorizable))
def create_role(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
api = get_api(request.user)
api.create_sentry_role(role['name'])
privileges = [privilege for privilege in role['privileges'] if privilege['status'] != 'deleted']
result['privileges'] = _hive_add_privileges(request.user, role, privileges)
api.alter_sentry_role_add_groups(role['name'], role['groups'])
result['role'] = {"name": role['name'], "groups": role['groups']}
result['message'] = _('Role created!')
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def update_role_groups(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
new_groups = set(role['groups']) - set(role['originalGroups'])
deleted_groups = set(role['originalGroups']) - set(role['groups'])
api = get_api(request.user)
if new_groups:
api.alter_sentry_role_add_groups(role['name'], new_groups)
if deleted_groups:
api.alter_sentry_role_delete_groups(role['name'], deleted_groups)
result['message'] = ''
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def save_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
new_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'new']
result['privileges'] = _hive_add_privileges(request.user, role, new_privileges)
deleted_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'deleted']
for privilege in deleted_privileges:
_drop_sentry_privilege(request.user, role, privilege)
modified_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'modified']
old_privileges_ids = [privilege['id'] for privilege in modified_privileges]
_hive_add_privileges(request.user, role, modified_privileges)
for privilege in role['originalPrivileges']:
if privilege['id'] in old_privileges_ids:
_drop_sentry_privilege(request.user, role, privilege)
result['message'] = _('Privileges updated')
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def grant_privilege(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = json.loads(request.POST['roleName'])
privilege = json.loads(request.POST['privilege'])
result['privileges'] = _hive_add_privileges(request.user, {'name': roleName}, [privilege])
result['message'] = _('Privilege granted successfully to %s.') % roleName
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def create_sentry_role(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = request.POST['roleName']
get_api(request.user).create_sentry_role(roleName)
result['message'] = _('Role and privileges created.')
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def drop_sentry_role(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = request.POST['roleName']
get_api(request.user).drop_sentry_role(roleName)
result['message'] = _('Role and privileges deleted.')
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def list_sentry_privileges_by_authorizable(request):
result = {'status': -1, 'message': 'Error'}
try:
groups = [request.POST['groupName']] if request.POST['groupName'] else None
authorizableSet = [json.loads(request.POST['authorizableHierarchy'])]
_privileges = []
for authorizable, roles in get_api(request.user).list_sentry_privileges_by_authorizable(authorizableSet=authorizableSet, groups=groups):
for role, privileges in roles.iteritems():
for privilege in privileges:
privilege['roleName'] = role
_privileges.append(privilege)
result['privileges'] = sorted(_privileges, key=lambda privilege: privilege['roleName'])
result['message'] = ''
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def bulk_delete_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
checkedPaths = json.loads(request.POST['checkedPaths'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
for path in [path['path'] for path in checkedPaths]:
if '.' in path:
db, table = path.split('.')
else:
db, table = path, ''
authorizableHierarchy.update({
'db': db,
'table': table,
})
get_api(request.user).drop_sentry_privileges(authorizableHierarchy)
result['message'] = _('Privileges deleted.')
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def bulk_add_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
privileges = json.loads(request.POST['privileges'])
checkedPaths = json.loads(request.POST['checkedPaths'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
privileges = [privilege for privilege in privileges if privilege['status'] == '']
for path in [path['path'] for path in checkedPaths]:
if '.' in path:
db, table = path.split('.')
else:
db, table = path, ''
privilegeScope = 'TABLE' if table else 'DATABASE' if db else 'SERVER'
authorizableHierarchy.update({
'db': db,
'table': table,
})
for privilege in privileges:
privilege['dbName'] = db
privilege['tableName'] = table
privilege['privilegeScope'] = privilegeScope
_hive_add_privileges(request.user, {'name': privilege['roleName']}, [privilege])
result['message'] = _('Privileges added.')
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def rename_sentry_privilege(request):
result = {'status': -1, 'message': 'Error'}
try:
oldAuthorizable = json.loads(request.POST['oldAuthorizable'])
newAuthorizable = json.loads(request.POST['newAuthorizable'])
get_api(request.user).rename_sentry_privilege(oldAuthorizable, newAuthorizable)
result['message'] = _('Privilege deleted.')
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
def list_sentry_privileges_for_provider(request):
result = {'status': -1, 'message': 'Error'}
try:
groups = json.loads(request.POST['groups'])
roleSet = json.loads(request.POST['roleSet'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
sentry_privileges = get_api(request.user).list_sentry_privileges_for_provider(groups=groups, roleSet=roleSet, authorizableHierarchy=authorizableHierarchy)
result['sentry_privileges'] = sentry_privileges
result['message'] = ''
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return HttpResponse(json.dumps(result), mimetype="application/json")
|
|
import hashlib
import logging
import random
import re
import time
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from celery.task import task
from django_statsd.clients import statsd
from timezone_field import TimeZoneField
from kitsune.lib.countries import COUNTRIES
from kitsune.search.es_utils import UnindexMeBro
from kitsune.search.models import (
SearchMappingType, SearchMixin, register_for_indexing,
register_mapping_type)
from kitsune.sumo import email_utils
from kitsune.sumo.models import ModelBase, LocaleField
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files, chunked
from kitsune.users.validators import TwitterValidator
log = logging.getLogger('k.users')
SHA1_RE = re.compile('^[a-f0-9]{40}$')
CONTRIBUTOR_GROUP = 'Registered as contributor'
@auto_delete_files
class Profile(ModelBase, SearchMixin):
"""Profile model for django users."""
user = models.OneToOneField(User, primary_key=True,
verbose_name=_lazy(u'User'))
name = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Display name'))
public_email = models.BooleanField( # show/hide email
default=False, verbose_name=_lazy(u'Make my email public'))
avatar = models.ImageField(upload_to=settings.USER_AVATAR_PATH, null=True,
blank=True, verbose_name=_lazy(u'Avatar'),
max_length=settings.MAX_FILEPATH_LENGTH)
bio = models.TextField(null=True, blank=True,
verbose_name=_lazy(u'Biography'),
help_text=_lazy(u'Some HTML supported: <abbr title> ' +
'<acronym title> <b> ' +
'<blockquote> <code> ' +
'<em> <i> <li> ' +
'<ol> <strong> <ul>. ' +
'Links are forbidden.'))
website = models.URLField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Website'))
twitter = models.CharField(max_length=15, null=True, blank=True, validators=[TwitterValidator],
verbose_name=_lazy(u'Twitter Username'))
facebook = models.URLField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Facebook URL'))
mozillians = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Mozillians Username'))
irc_handle = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'IRC nickname'))
timezone = TimeZoneField(null=True, blank=True, default='US/Pacific',
verbose_name=_lazy(u'Timezone'))
country = models.CharField(max_length=2, choices=COUNTRIES, null=True,
blank=True, verbose_name=_lazy(u'Country'))
# No city validation
city = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'City'))
locale = LocaleField(default=settings.LANGUAGE_CODE,
verbose_name=_lazy(u'Preferred language'))
first_answer_email_sent = models.BooleanField(
default=False, help_text=_lazy(u'Has been sent a first answer contribution email.'))
first_l10n_email_sent = models.BooleanField(
default=False, help_text=_lazy(u'Has been sent a first revision contribution email.'))
involved_from = models.DateField(null=True, blank=True,
verbose_name=_lazy(u'Involved with Mozilla from'))
csat_email_sent = models.DateField(null=True, blank=True,
verbose_name=_lazy(u'When the user was sent a community '
u'health survey'))
is_fxa_migrated = models.BooleanField(default=False)
fxa_uid = models.CharField(blank=True, null=True, unique=True, max_length=128)
fxa_avatar = models.URLField(max_length=512, blank=True, default='')
has_subscriptions = models.BooleanField(default=False)
class Meta(object):
permissions = (('view_karma_points', 'Can view karma points'),
('deactivate_users', 'Can deactivate users'),
('screen_share', 'Can screen share'),)
def __unicode__(self):
try:
return unicode(self.user)
except Exception as exc:
return unicode('%d (%r)' % (self.pk, exc))
def get_absolute_url(self):
return reverse('users.profile', args=[self.user_id])
def clear(self):
"""Clears out the users profile"""
self.name = ''
self.public_email = False
self.avatar = None
self.bio = ''
self.website = ''
self.twitter = ''
self.facebook = ''
self.mozillians = ''
self.irc_handle = ''
self.city = ''
self.is_fxa_migrated = False
self.fxa_uid = ''
@property
def display_name(self):
return self.name if self.name else self.user.username
@property
def twitter_usernames(self):
from kitsune.customercare.models import Reply
return list(
Reply.objects.filter(user=self.user)
.values_list('twitter_username', flat=True)
.distinct())
@classmethod
def get_mapping_type(cls):
return UserMappingType
@classmethod
def get_serializer(cls, serializer_type='full'):
# Avoid circular import
from kitsune.users import api
if serializer_type == 'full':
return api.ProfileSerializer
elif serializer_type == 'fk':
return api.ProfileFKSerializer
else:
raise ValueError('Unknown serializer type "{}".'.format(serializer_type))
@property
def last_contribution_date(self):
"""Get the date of the user's last contribution."""
from kitsune.customercare.models import Reply
from kitsune.questions.models import Answer
from kitsune.wiki.models import Revision
dates = []
# Latest Army of Awesome reply:
try:
aoa_reply = Reply.objects.filter(
user=self.user).latest('created')
dates.append(aoa_reply.created)
except Reply.DoesNotExist:
pass
# Latest Support Forum answer:
try:
answer = Answer.objects.filter(
creator=self.user).latest('created')
dates.append(answer.created)
except Answer.DoesNotExist:
pass
# Latest KB Revision edited:
try:
revision = Revision.objects.filter(
creator=self.user).latest('created')
dates.append(revision.created)
except Revision.DoesNotExist:
pass
# Latest KB Revision reviewed:
try:
revision = Revision.objects.filter(
reviewer=self.user).latest('reviewed')
# Old revisions don't have the reviewed date.
dates.append(revision.reviewed or revision.created)
except Revision.DoesNotExist:
pass
if len(dates) == 0:
return None
return max(dates)
@property
def settings(self):
return self.user.settings
@property
def answer_helpfulness(self):
# Avoid circular import
from kitsune.questions.models import AnswerVote
return AnswerVote.objects.filter(answer__creator=self.user, helpful=True).count()
@register_mapping_type
class UserMappingType(SearchMappingType):
list_keys = [
'twitter_usernames',
'itwitter_usernames',
]
@classmethod
def get_model(cls):
return Profile
@classmethod
def get_index_group(cls):
return 'non-critical'
@classmethod
def get_mapping(cls):
return {
'properties': {
'id': {'type': 'long'},
'model': {'type': 'string', 'index': 'not_analyzed'},
'url': {'type': 'string', 'index': 'not_analyzed'},
'indexed_on': {'type': 'integer'},
'username': {'type': 'string', 'index': 'not_analyzed'},
'display_name': {'type': 'string', 'index': 'not_analyzed'},
'twitter_usernames': {
'type': 'string',
'index': 'not_analyzed'
},
'last_contribution_date': {'type': 'date'},
# lower-cased versions for querying:
'iusername': {'type': 'string', 'index': 'not_analyzed'},
'idisplay_name': {'type': 'string', 'analyzer': 'whitespace'},
'itwitter_usernames': {
'type': 'string',
'index': 'not_analyzed'
},
'avatar': {'type': 'string', 'index': 'not_analyzed'},
'suggest': {
'type': 'completion',
'analyzer': 'whitespace',
'payloads': True,
}
}
}
@classmethod
def extract_document(cls, obj_id, obj=None):
"""Extracts interesting thing from a Thread and its Posts"""
if obj is None:
model = cls.get_model()
obj = model.objects.select_related('user').get(pk=obj_id)
if not obj.user.is_active:
raise UnindexMeBro()
d = {}
d['id'] = obj.pk
d['model'] = cls.get_mapping_type_name()
d['url'] = obj.get_absolute_url()
d['indexed_on'] = int(time.time())
d['username'] = obj.user.username
d['display_name'] = obj.display_name
d['twitter_usernames'] = obj.twitter_usernames
d['last_contribution_date'] = obj.last_contribution_date
d['iusername'] = obj.user.username.lower()
d['idisplay_name'] = obj.display_name.lower()
d['itwitter_usernames'] = [u.lower() for u in obj.twitter_usernames]
from kitsune.users.templatetags.jinja_helpers import profile_avatar
d['avatar'] = profile_avatar(obj.user, size=120)
d['suggest'] = {
'input': [
d['iusername'],
d['idisplay_name']
],
'output': _(u'{displayname} ({username})').format(
displayname=d['display_name'], username=d['username']),
'payload': {'user_id': d['id']},
}
return d
@classmethod
def suggest_completions(cls, text):
"""Suggest completions for the text provided."""
USER_SUGGEST = 'user-suggest'
es = UserMappingType.search().get_es()
results = es.suggest(index=cls.get_index(), body={
USER_SUGGEST: {
'text': text.lower(),
'completion': {
'field': 'suggest'
}
}
})
if results[USER_SUGGEST][0]['length'] > 0:
return results[USER_SUGGEST][0]['options']
return []
register_for_indexing('users', Profile)
def get_profile(u):
try:
return Profile.objects.get(user=u)
except Profile.DoesNotExist:
return None
register_for_indexing(
'users',
User,
instance_to_indexee=get_profile)
class Setting(ModelBase):
"""User specific value per setting"""
user = models.ForeignKey(User, verbose_name=_lazy(u'User'),
related_name='settings')
name = models.CharField(max_length=100)
value = models.CharField(blank=True, max_length=60,
verbose_name=_lazy(u'Value'))
class Meta(object):
unique_together = (('user', 'name'),)
def __unicode__(self):
return u'%s %s:%s' % (self.user, self.name, self.value or u'[none]')
@classmethod
def get_for_user(cls, user, name):
from kitsune.users.forms import SettingsForm
form = SettingsForm()
if name not in form.fields.keys():
raise KeyError(("'{name}' is not a field in "
"user.forms.SettingsFrom()").format(name=name))
try:
setting = Setting.objects.get(user=user, name=name)
except Setting.DoesNotExist:
value = form.fields[name].initial or ''
setting = Setting.objects.create(user=user, name=name, value=value)
# Cast to the field's Python type.
return form.fields[name].to_python(setting.value)
# Activation model and manager:
# (based on http://bitbucket.org/ubernostrum/django-registration)
class ConfirmationManager(models.Manager):
"""
Custom manager for confirming keys sent by email.
The methods defined here provide shortcuts for creation of instances
and sending email confirmations.
Activation should be done in specific managers.
"""
def _send_email(self, confirmation_profile, url,
subject, text_template, html_template,
send_to, **kwargs):
"""
Send an email using a passed in confirmation profile.
Use specified url, subject, text_template, html_template and
email to send_to.
"""
current_site = Site.objects.get_current()
email_kwargs = {'activation_key': confirmation_profile.activation_key,
'domain': current_site.domain,
'activate_url': url,
'login_url': reverse('users.login'),
'reg': 'main'}
email_kwargs.update(kwargs)
# RegistrationProfile doesn't have a locale attribute. So if
# we get one of those, then we have to get the real profile
# from the user.
if hasattr(confirmation_profile, 'locale'):
locale = confirmation_profile.locale
else:
locale = confirmation_profile.user.profile.locale
@email_utils.safe_translation
def _make_mail(locale):
mail = email_utils.make_mail(
subject=subject,
text_template=text_template,
html_template=html_template,
context_vars=email_kwargs,
from_email=settings.DEFAULT_FROM_EMAIL,
to_email=send_to)
return mail
email_utils.send_messages([_make_mail(locale)])
def send_confirmation_email(self, *args, **kwargs):
"""This is meant to be overwritten."""
raise NotImplementedError
def create_profile(self, user, *args, **kwargs):
"""
Create an instance of this manager's object class for a given
``User``, and return it.
The activation key will be a SHA1 hash, generated from a combination
of the ``User``'s username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt + user.username).hexdigest()
return self.create(user=user, activation_key=activation_key, **kwargs)
class RegistrationManager(ConfirmationManager):
def get_user(self, activation_key):
"""Get the user for the specified activation_key."""
try:
profile = self.get(activation_key=activation_key)
return profile.user
except self.model.DoesNotExist:
return None
def activate_user(self, activation_key, request=None):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
profile = None
statsd.incr('user.activate-error.does-not-exist')
reason = 'key not found'
if profile:
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
# We don't need the RegistrationProfile anymore, delete it.
profile.delete()
# If user registered as contributor, send them the
# welcome email.
if user.groups.filter(name=CONTRIBUTOR_GROUP):
self._send_email(
confirmation_profile=profile,
url=None,
subject=_('Welcome to SUMO!'),
text_template='users/email/contributor.ltxt',
html_template='users/email/contributor.html',
send_to=user.email,
contributor=user)
return user
else:
statsd.incr('user.activate-error.expired')
reason = 'key expired'
else:
statsd.incr('user.activate-error.invalid-key')
reason = 'invalid key'
log.warning(u'User activation failure ({r}): {k}'.format(
r=reason, k=activation_key))
return False
def create_inactive_user(self, username, password, email,
locale=settings.LANGUAGE_CODE,
text_template=None, html_template=None,
subject=None, email_data=None,
volunteer_interest=False, **kwargs):
"""
Create a new, inactive ``User`` and ``Profile``, generates a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
"""
new_user = User.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
Profile.objects.create(user=new_user, locale=locale)
registration_profile = self.create_profile(new_user)
self.send_confirmation_email(
registration_profile,
text_template,
html_template,
subject,
email_data,
**kwargs)
if volunteer_interest:
statsd.incr('user.registered-as-contributor')
group = Group.objects.get(name=CONTRIBUTOR_GROUP)
new_user.groups.add(group)
return new_user
def send_confirmation_email(self, registration_profile,
text_template=None, html_template=None,
subject=None, email_data=None, **kwargs):
"""Send the user confirmation email."""
user_id = registration_profile.user.id
key = registration_profile.activation_key
self._send_email(
confirmation_profile=registration_profile,
url=reverse('users.activate', args=[user_id, key]),
subject=subject or _('Please confirm your email address'),
text_template=text_template or 'users/email/activate.ltxt',
html_template=html_template or 'users/email/activate.html',
send_to=registration_profile.user.email,
expiration_days=settings.ACCOUNT_ACTIVATION_DAYS,
username=registration_profile.user.username,
email_data=email_data,
**kwargs)
def delete_expired_users(self):
"""
Remove expired instances of this manager's object class.
Accounts to be deleted are identified by searching for
instances of this manager's object class with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
"""
days_valid = settings.ACCOUNT_ACTIVATION_DAYS
expired = datetime.now() - timedelta(days=days_valid)
prof_ids = self.filter(user__date_joined__lt=expired)
prof_ids = prof_ids.values_list('id', flat=True)
for chunk in chunked(prof_ids, 1000):
_delete_registration_profiles_chunk.apply_async(args=[chunk])
@task
def _delete_registration_profiles_chunk(data):
log_msg = u'Deleting {num} expired registration profiles.'
log.info(log_msg.format(num=len(data)))
qs = RegistrationProfile.objects.filter(id__in=data)
for profile in qs.select_related('user'):
user = profile.user
profile.delete()
if user and not user.is_active:
user.delete()
class EmailChangeManager(ConfirmationManager):
def send_confirmation_email(self, email_change, new_email):
"""Ask for confirmation before changing a user's email."""
self._send_email(
confirmation_profile=email_change,
url=reverse('users.confirm_email',
args=[email_change.activation_key]),
subject=_('Please confirm your email address'),
text_template='users/email/confirm_email.ltxt',
html_template='users/email/confirm_email.html',
send_to=new_email)
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key used for
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts.
"""
user = models.ForeignKey(User, unique=True, verbose_name=_lazy(u'user'))
activation_key = models.CharField(verbose_name=_lazy(u'activation key'),
max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _lazy(u'registration profile')
verbose_name_plural = _lazy(u'registration profiles')
def __unicode__(self):
return u'Registration information for %s' % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by:
1. The date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
exp_date = timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.user.date_joined + exp_date <= datetime.now()
activation_key_expired.boolean = True
class EmailChange(models.Model):
"""Stores email with activation key when user requests a change."""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(User, unique=True, verbose_name=_lazy(u'user'))
activation_key = models.CharField(verbose_name=_lazy(u'activation key'),
max_length=40)
email = models.EmailField(db_index=True, null=True)
objects = EmailChangeManager()
def __unicode__(self):
return u'Change email request to %s for %s' % (self.email, self.user)
class Deactivation(models.Model):
"""Stores user deactivation logs."""
user = models.ForeignKey(User, verbose_name=_lazy(u'user'),
related_name='+')
moderator = models.ForeignKey(User, verbose_name=_lazy(u'moderator'),
related_name='deactivations')
date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return u'%s was deactivated by %s on %s' % (self.user, self.moderator,
self.date)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import unknown_tlv
from . import state
from . import sid_label
class tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-sid-label-range/tlvs/tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Sub-TLVs of the SID/Label range TLV
"""
__slots__ = (
"_path_helper", "_extmethods", "__unknown_tlv", "__state", "__sid_label"
)
_yang_name = "tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__unknown_tlv = YANGDynClass(
base=unknown_tlv.unknown_tlv,
is_container="container",
yang_name="unknown-tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__sid_label = YANGDynClass(
base=sid_label.sid_label,
is_container="container",
yang_name="sid-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"segment-routing-sid-label-range",
"tlvs",
"tlv",
]
def _get_unknown_tlv(self):
"""
Getter method for unknown_tlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv (container)
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
return self.__unknown_tlv
def _set_unknown_tlv(self, v, load=False):
"""
Setter method for unknown_tlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_unknown_tlv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unknown_tlv() directly.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=unknown_tlv.unknown_tlv,
is_container="container",
yang_name="unknown-tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """unknown_tlv must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=unknown_tlv.unknown_tlv, is_container='container', yang_name="unknown-tlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__unknown_tlv = t
if hasattr(self, "_set"):
self._set()
def _unset_unknown_tlv(self):
self.__unknown_tlv = YANGDynClass(
base=unknown_tlv.unknown_tlv,
is_container="container",
yang_name="unknown-tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/state (container)
YANG Description: State parameters of the sub-TLVs of the SR/Label range TLV
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the sub-TLVs of the SR/Label range TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_sid_label(self):
"""
Getter method for sid_label, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/sid_label (container)
YANG Description: Sub-TLV used to advertise the SID or label associated with the
subset of the SRGB being advertised
"""
return self.__sid_label
def _set_sid_label(self, v, load=False):
"""
Setter method for sid_label, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/sid_label (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_label() directly.
YANG Description: Sub-TLV used to advertise the SID or label associated with the
subset of the SRGB being advertised
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=sid_label.sid_label,
is_container="container",
yang_name="sid-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_label must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=sid_label.sid_label, is_container='container', yang_name="sid-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__sid_label = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_label(self):
self.__sid_label = YANGDynClass(
base=sid_label.sid_label,
is_container="container",
yang_name="sid-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
unknown_tlv = __builtin__.property(_get_unknown_tlv)
state = __builtin__.property(_get_state)
sid_label = __builtin__.property(_get_sid_label)
_pyangbind_elements = OrderedDict(
[("unknown_tlv", unknown_tlv), ("state", state), ("sid_label", sid_label)]
)
from . import unknown_tlv
from . import state
from . import sid_label
class tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-sid-label-range/tlvs/tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Sub-TLVs of the SID/Label range TLV
"""
__slots__ = (
"_path_helper", "_extmethods", "__unknown_tlv", "__state", "__sid_label"
)
_yang_name = "tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__unknown_tlv = YANGDynClass(
base=unknown_tlv.unknown_tlv,
is_container="container",
yang_name="unknown-tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__sid_label = YANGDynClass(
base=sid_label.sid_label,
is_container="container",
yang_name="sid-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"segment-routing-sid-label-range",
"tlvs",
"tlv",
]
def _get_unknown_tlv(self):
"""
Getter method for unknown_tlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv (container)
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
return self.__unknown_tlv
def _set_unknown_tlv(self, v, load=False):
"""
Setter method for unknown_tlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_unknown_tlv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unknown_tlv() directly.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=unknown_tlv.unknown_tlv,
is_container="container",
yang_name="unknown-tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """unknown_tlv must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=unknown_tlv.unknown_tlv, is_container='container', yang_name="unknown-tlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__unknown_tlv = t
if hasattr(self, "_set"):
self._set()
def _unset_unknown_tlv(self):
self.__unknown_tlv = YANGDynClass(
base=unknown_tlv.unknown_tlv,
is_container="container",
yang_name="unknown-tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/state (container)
YANG Description: State parameters of the sub-TLVs of the SR/Label range TLV
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the sub-TLVs of the SR/Label range TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_sid_label(self):
"""
Getter method for sid_label, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/sid_label (container)
YANG Description: Sub-TLV used to advertise the SID or label associated with the
subset of the SRGB being advertised
"""
return self.__sid_label
def _set_sid_label(self, v, load=False):
"""
Setter method for sid_label, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/sid_label (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_label() directly.
YANG Description: Sub-TLV used to advertise the SID or label associated with the
subset of the SRGB being advertised
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=sid_label.sid_label,
is_container="container",
yang_name="sid-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_label must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=sid_label.sid_label, is_container='container', yang_name="sid-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__sid_label = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_label(self):
self.__sid_label = YANGDynClass(
base=sid_label.sid_label,
is_container="container",
yang_name="sid-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
unknown_tlv = __builtin__.property(_get_unknown_tlv)
state = __builtin__.property(_get_state)
sid_label = __builtin__.property(_get_sid_label)
_pyangbind_elements = OrderedDict(
[("unknown_tlv", unknown_tlv), ("state", state), ("sid_label", sid_label)]
)
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Creates the bar charts
All steps required to generate bar charts that use stock (time-agnostic) data.
-----
"""
# Built-in Modules
import itertools
import pickle
import sys
import traceback
# Third-party Modules
# Note the order and structure of matplotlib imports is intentional.
import matplotlib
matplotlib.use('AGG') # Note: this statement must be run before any other matplotlib imports are done.
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# My modules
import chart_tools
log = chart_tools.log
payload = chart_tools.payload
chart_data = payload['data']
p_dict = payload['p_dict']
k_dict = payload['k_dict']
props = payload['props']
chart_name = props['name']
plug_dict = payload['prefs']
annotation_values = []
bar_colors = []
x_labels = []
x_ticks = []
log['Threaddebug'].append(u"chart_bar_stock.py called.")
if plug_dict['verboseLogging']:
chart_tools.log['Threaddebug'].append(u"{0}".format(payload))
try:
def __init__():
pass
ax = chart_tools.make_chart_figure(width=p_dict['chart_width'], height=p_dict['chart_height'], p_dict=p_dict)
chart_tools.format_axis_x_ticks(ax=ax, p_dict=p_dict, k_dict=k_dict, logger=log)
chart_tools.format_axis_y(ax=ax, p_dict=p_dict, k_dict=k_dict, logger=log)
# ============================ Iterate the Bars =============================
for bar in chart_data:
b_num = bar['number']
color = bar['color_{i}'.format(i=b_num)]
suppress_bar = p_dict.get('suppressBar{i}'.format(i=b_num), False)
# x_labels.append(bar['legend_{i}'.format(i=b_num)])
x_ticks.append(b_num)
y_val = float(bar['val_{i}'.format(i=b_num)])
p_dict['data_array'].append(y_val)
bar_colors.append(color)
# ==================== Bar and Background Color the Same ====================
# If the bar color is the same as the background color, alert the user.
if color == p_dict['backgroundColor'] and not suppress_bar:
chart_tools.log['Warning'].append(u"[{name}] Area {i} color is the same as the background color (so "
u"you may not be able to see it).".format(name=chart_name, i=b_num))
# ============================= Bar Suppressed ==============================
# If the bar is suppressed, remind the user they suppressed it.
if suppress_bar:
chart_tools.log['Info'].append(u"[{name}] Bar {i} is suppressed by user setting. You can re-enable it in "
u"the device configuration menu.".format(name=chart_name, i=b_num))
# ============================ Display Zero Bars ============================
# Early versions of matplotlib will truncate leading and trailing bars where the value is zero.
# With this setting, we replace the Y values of zero with a very small positive value
# (0 becomes 1e-06). We get a slice of the original data for annotations.
# annotation_values.append(y_val)
annotation_values.append(bar['val_{i}'.format(i=b_num)])
if p_dict.get('showZeroBars', False):
if y_val == 0:
y_val = 1e-06
# ================================ Bar Width ================================
try:
bar_width = float(p_dict['barWidth'])
if bar_width == 0:
width = 0.8
else:
width = float(p_dict['barWidth'])
except ValueError:
width = 0.8
chart_tools.log['Warning'].append(u"[{n}] Problem setting bar width. Check value "
u"({w}).".format(n=chart_name, w=p_dict['barWidth']))
# ============================== Plot the Bar ===============================
# Plot the bars. If 'suppressBar{thing} is True, we skip it.
if not suppress_bar:
ax.bar(b_num,
y_val,
width=float(p_dict['barWidth']),
color=color,
bottom=None,
align='center',
edgecolor=color,
**k_dict['k_bar'])
# =============================== Annotations ===============================
# If annotations desired, plot those too.
if bar['annotate_{i}'.format(i=b_num)] and not suppress_bar:
ax.annotate(unicode(annotation_values[b_num-1]),
xy=(b_num, y_val),
xytext=(0, 0),
zorder=10,
**k_dict['k_annotation']
)
if bar['legend_{i}'.format(i=b_num)] == u"":
x_labels.append(unicode(b_num))
else:
x_labels.append(bar['legend_{i}'.format(i=b_num)])
# =============================== X Tick Bins ===============================
ax.set_xticks(x_ticks) # we set the tick value off the bar number.
ax.set_xticklabels(x_labels) # we set the tick label off the bar number (unless the user has set one explicitly).
ax.tick_params(axis='x', colors=p_dict['fontColor']) # we set this becuase it's apparently reset by the two preceeding lines.
chart_tools.format_axis_y1_min_max(p_dict=p_dict, logger=log)
chart_tools.format_axis_x_label(dev=props, p_dict=p_dict, k_dict=k_dict, logger=log)
chart_tools.format_axis_y1_label(p_dict=p_dict, k_dict=k_dict, logger=log)
# =========================== Transparent Border ============================
# Add a patch so that we can have transparent charts but a filled plot area.
if p_dict['transparent_charts'] and p_dict['transparent_filled']:
ax.add_patch(patches.Rectangle((0, 0), 1, 1,
transform=ax.transAxes,
facecolor=p_dict['faceColor'],
zorder=1
)
)
# ============================= Legend Properties =============================
# Legend should be plotted before any other lines are plotted (like averages or
# custom line segments).
if p_dict['showLegend']:
# Amend the headers if there are any custom legend entries defined.
counter = 1
final_headers = []
headers = [_.decode('utf-8') for _ in x_labels]
for header in headers:
if p_dict['bar{c}Legend'.format(c=counter)] == "":
final_headers.append(header)
else:
final_headers.append(p_dict['bar{c}Legend'.format(c=counter)])
counter += 1
# Set the legend
# Reorder the headers so that they fill by row instead of by column
num_col = int(p_dict['legendColumns'])
iter_headers = itertools.chain(*[final_headers[i::num_col] for i in range(num_col)])
final_headers = [_ for _ in iter_headers]
iter_colors = itertools.chain(*[bar_colors[i::num_col] for i in range(num_col)])
final_colors = [_ for _ in iter_colors]
legend = ax.legend(final_headers,
loc='upper center',
bbox_to_anchor=(0.5, -0.15),
ncol=int(p_dict['legendColumns']),
prop={'size': float(p_dict['legendFontSize'])}
)
# Set legend font color
[text.set_color(p_dict['fontColor']) for text in legend.get_texts()]
# Set legend bar colors
num_handles = len(legend.legendHandles)
[legend.legendHandles[_].set_color(final_colors[_]) for _ in range(0, num_handles)]
frame = legend.get_frame()
frame.set_alpha(0)
chart_tools.format_custom_line_segments(ax=ax, plug_dict=plug_dict, p_dict=p_dict, k_dict=k_dict, logger=log, orient="horiz")
chart_tools.format_grids(p_dict=p_dict, k_dict=k_dict, logger=log)
chart_tools.format_title(p_dict=p_dict, k_dict=k_dict, loc=(0.5, 0.98))
chart_tools.format_axis_y_ticks(p_dict=p_dict, k_dict=k_dict, logger=log)
# Note that subplots_adjust affects the space surrounding the subplots and
# not the fig.
plt.subplots_adjust(top=0.90,
bottom=0.20,
left=0.10,
right=0.90,
hspace=None,
wspace=None
)
try:
chart_tools.save(logger=log)
except OverflowError as err:
if "date value out of range" in traceback.format_exc(err):
chart_tools.log['Critical'].append(u"[{name}] Chart not saved. Try enabling Display Zero Bars in "
u"device settings.".format(name=payload['props']['name']))
except (KeyError, IndexError, ValueError, UnicodeEncodeError, ZeroDivisionError) as sub_error:
tb = traceback.format_exc()
chart_tools.log['Critical'].append(u"[{n}]\n{s}".format(n=chart_name, s=tb))
pickle.dump(chart_tools.log, sys.stdout)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import unittest
import numpy as np
import six
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope
from utils import DyGraphProgramDescTracerTestHelper, is_equal_program
from paddle.fluid.framework import _test_eager_guard, _in_eager_mode
class SimpleImgConvPool(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
pool_size,
pool_stride,
pool_padding=0,
pool_type='max',
global_pooling=False,
conv_stride=1,
conv_padding=0,
conv_dilation=1,
conv_groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(SimpleImgConvPool, self).__init__()
self._conv2d = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation,
groups=conv_groups,
param_attr=None,
bias_attr=None,
use_cudnn=use_cudnn)
self._pool2d = Pool2D(
pool_size=pool_size,
pool_type=pool_type,
pool_stride=pool_stride,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
x = self._pool2d(x)
return x
class MNIST(fluid.dygraph.Layer):
def __init__(self):
super(MNIST, self).__init__()
self._simple_img_conv_pool_1 = SimpleImgConvPool(
1, 20, 5, 2, 2, act="relu")
self._simple_img_conv_pool_2 = SimpleImgConvPool(
20, 50, 5, 2, 2, act="relu")
self.pool_2_shape = 50 * 4 * 4
SIZE = 10
scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5
self._fc = Linear(
self.pool_2_shape,
10,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)),
act="softmax")
def forward(self, inputs):
x = self._simple_img_conv_pool_1(inputs)
x = self._simple_img_conv_pool_2(x)
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape])
x = self._fc(x)
return x
class TestImperativeMnist(unittest.TestCase):
def reader_decorator(self, reader):
def _reader_imple():
for item in reader():
image = np.array(item[0]).reshape(1, 28, 28)
label = np.array(item[1]).astype('int64').reshape(1)
yield image, label
return _reader_imple
def func_test_mnist_float32(self):
seed = 90
epoch_num = 1
batch_size = 128
batch_num = 50
traced_layer = None
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
mnist = MNIST()
sgd = SGDOptimizer(
learning_rate=1e-3, parameter_list=mnist.parameters())
batch_py_reader = fluid.io.PyReader(capacity=1)
batch_py_reader.decorate_sample_list_generator(
paddle.batch(
self.reader_decorator(paddle.dataset.mnist.train()),
batch_size=batch_size,
drop_last=True),
places=fluid.CPUPlace())
mnist.train()
dy_param_init_value = {}
helper = DyGraphProgramDescTracerTestHelper(self)
program = None
for epoch in range(epoch_num):
for batch_id, data in enumerate(batch_py_reader()):
if batch_id >= batch_num:
break
img = data[0]
dy_x_data = img.numpy()
label = data[1]
label.stop_gradient = True
if batch_id % 10 == 0 and not _in_eager_mode():
cost, traced_layer = paddle.jit.TracedLayer.trace(
mnist, inputs=img)
if program is not None:
self.assertTrue(program, traced_layer.program)
program = traced_layer.program
traced_layer.save_inference_model(
'./infer_imperative_mnist')
else:
cost = mnist(img)
if traced_layer is not None:
cost_static = traced_layer([img])
helper.assertEachVar(cost, cost_static)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
dy_out = avg_loss.numpy()
if epoch == 0 and batch_id == 0:
for param in mnist.parameters():
dy_param_init_value[param.name] = param.numpy()
avg_loss.backward()
sgd.minimize(avg_loss)
mnist.clear_gradients()
dy_param_value = {}
for param in mnist.parameters():
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
mnist = MNIST()
sgd = SGDOptimizer(learning_rate=1e-3)
train_reader = paddle.batch(
paddle.dataset.mnist.train(),
batch_size=batch_size,
drop_last=True)
img = fluid.layers.data(
name='pixel', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = mnist(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
sgd.minimize(avg_loss)
# initialize params and fetch them
static_param_init_value = {}
static_param_name_list = []
for param in mnist.parameters():
static_param_name_list.append(param.name)
out = exe.run(fluid.default_startup_program(),
fetch_list=static_param_name_list)
for i in range(len(static_param_name_list)):
static_param_init_value[static_param_name_list[i]] = out[i]
for epoch in range(epoch_num):
for batch_id, data in enumerate(train_reader()):
if batch_id >= batch_num:
break
static_x_data = np.array(
[x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(
[batch_size, 1])
fetch_list = [avg_loss.name]
fetch_list.extend(static_param_name_list)
if traced_layer is not None:
traced_layer([static_x_data])
out = exe.run(
fluid.default_main_program(),
feed={"pixel": static_x_data,
"label": y_data},
fetch_list=fetch_list)
static_param_value = {}
static_out = out[0]
for i in range(1, len(out)):
static_param_value[static_param_name_list[i - 1]] = out[
i]
self.assertTrue(np.allclose(dy_x_data.all(), static_x_data.all()))
for key, value in six.iteritems(static_param_init_value):
self.assertTrue(np.allclose(value, dy_param_init_value[key]))
self.assertTrue(np.allclose(static_out, dy_out))
for key, value in six.iteritems(static_param_value):
self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-5))
def test_mnist_float32(self):
with _test_eager_guard():
self.func_test_mnist_float32()
self.func_test_mnist_float32()
if __name__ == '__main__':
unittest.main()
|
|
#! /usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
TEST BUILD & RUN
"""
import sys
from time import sleep
from shutil import copy
from os.path import join, abspath, dirname, isfile, isdir
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.utils import args_error
from tools.paths import BUILD_DIR
from tools.paths import RTOS_LIBRARIES
from tools.paths import RPC_LIBRARY
from tools.paths import ETH_LIBRARY
from tools.paths import USB_HOST_LIBRARIES, USB_LIBRARIES
from tools.paths import DSP_LIBRARIES
from tools.paths import FS_LIBRARY
from tools.paths import UBLOX_LIBRARY
from tools.tests import TESTS, Test, TEST_MAP
from tools.tests import TEST_MBED_LIB
from tools.tests import test_known, test_name_known
from tools.targets import TARGET_MAP
from tools.options import get_default_options_parser
from tools.build_api import build_project
from tools.build_api import mcu_toolchain_matrix
from utils import argparse_filestring_type
from utils import argparse_many
from utils import argparse_dir_not_parent
from argparse import ArgumentTypeError
from tools.toolchains import mbedToolchain
from tools.settings import CLI_COLOR_MAP
if __name__ == '__main__':
# Parse Options
parser = get_default_options_parser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("-p",
type=argparse_many(test_known),
dest="program",
help="The index of the desired test program: [0-%d]" % (len(TESTS)-1))
group.add_argument("-n",
type=argparse_many(test_name_known),
dest="program",
help="The name of the desired test program")
parser.add_argument("-j", "--jobs",
type=int,
dest="jobs",
default=0,
help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
parser.add_argument("--silent",
action="store_true",
dest="silent",
default=False,
help="Silent diagnostic output (no copy, compile notification)")
parser.add_argument("-D",
action="append",
dest="macros",
help="Add a macro definition")
group.add_argument("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
default=None,
help='For some commands you can use filter to filter out results')
# Local run
parser.add_argument("--automated", action="store_true", dest="automated",
default=False, help="Automated test")
parser.add_argument("--host", dest="host_test",
default=None, help="Host test")
parser.add_argument("--extra", dest="extra",
default=None, help="Extra files")
parser.add_argument("--peripherals", dest="peripherals",
default=None, help="Required peripherals")
parser.add_argument("--dep", dest="dependencies",
default=None, help="Dependencies")
parser.add_argument("--source", dest="source_dir", type=argparse_filestring_type,
default=None, help="The source (input) directory", action="append")
parser.add_argument("--duration", type=int, dest="duration",
default=None, help="Duration of the test")
parser.add_argument("--build", dest="build_dir", type=argparse_dir_not_parent(ROOT),
default=None, help="The build (output) directory")
parser.add_argument("-N", "--artifact-name", dest="artifact_name",
default=None, help="The built project's name")
parser.add_argument("-d", "--disk", dest="disk",
default=None, help="The mbed disk")
parser.add_argument("-s", "--serial", dest="serial",
default=None, help="The mbed serial port")
parser.add_argument("-b", "--baud", type=int, dest="baud",
default=None, help="The mbed serial baud rate")
group.add_argument("-L", "--list-tests", action="store_true", dest="list_tests",
default=False, help="List available tests in order and exit")
# Ideally, all the tests with a single "main" thread can be run with, or
# without the rtos, eth, usb_host, usb, dsp, fat, ublox
parser.add_argument("--rtos",
action="store_true", dest="rtos",
default=False, help="Link with RTOS library")
parser.add_argument("--rpc",
action="store_true", dest="rpc",
default=False, help="Link with RPC library")
parser.add_argument("--eth",
action="store_true", dest="eth",
default=False,
help="Link with Ethernet library")
parser.add_argument("--usb_host",
action="store_true",
dest="usb_host",
default=False,
help="Link with USB Host library")
parser.add_argument("--usb",
action="store_true",
dest="usb",
default=False,
help="Link with USB Device library")
parser.add_argument("--dsp",
action="store_true",
dest="dsp",
default=False,
help="Link with DSP library")
parser.add_argument("--fat",
action="store_true",
dest="fat",
default=False,
help="Link with FS ad SD card file system library")
parser.add_argument("--ublox",
action="store_true",
dest="ublox",
default=False,
help="Link with U-Blox library")
parser.add_argument("--testlib",
action="store_true",
dest="testlib",
default=False,
help="Link with mbed test library")
# Specify a different linker script
parser.add_argument("-l", "--linker", dest="linker_script",
type=argparse_filestring_type,
default=None, help="use the specified linker script")
options = parser.parse_args()
# Only prints matrix of supported toolchains
if options.supported_toolchains:
print mcu_toolchain_matrix(platform_filter=options.general_filter_regex)
exit(0)
# Print available tests in order and exit
if options.list_tests is True:
print '\n'.join(map(str, sorted(TEST_MAP.values())))
sys.exit()
# force program to "0" if a source dir is specified
if options.source_dir is not None:
p = 0
else:
# Program Number or name
p = options.program
# If 'p' was set via -n to list of numbers make this a single element integer list
if type(p) != type([]):
p = [p]
# Target
if options.mcu is None :
args_error(parser, "argument -m/--mcu is required")
mcu = options.mcu[0]
# Toolchain
if options.tool is None:
args_error(parser, "argument -t/--tool is required")
toolchain = options.tool[0]
if (options.program is None) and (not options.source_dir):
args_error(parser, "one of -p, -n, or --source is required")
if options.source_dir and not options.build_dir:
args_error(parser, "argument --build is required when argument --source is provided")
if options.color:
# This import happens late to prevent initializing colorization when we don't need it
import colorize
if options.verbose:
notify = mbedToolchain.print_notify_verbose
else:
notify = mbedToolchain.print_notify
notify = colorize.print_in_color_notifier(CLI_COLOR_MAP, notify)
else:
notify = None
# Test
for test_no in p:
test = Test(test_no)
if options.automated is not None: test.automated = options.automated
if options.dependencies is not None: test.dependencies = options.dependencies
if options.host_test is not None: test.host_test = options.host_test;
if options.peripherals is not None: test.peripherals = options.peripherals;
if options.duration is not None: test.duration = options.duration;
if options.extra is not None: test.extra_files = options.extra
if not test.is_supported(mcu, toolchain):
print 'The selected test is not supported on target %s with toolchain %s' % (mcu, toolchain)
sys.exit()
# Linking with extra libraries
if options.rtos: test.dependencies.append(RTOS_LIBRARIES)
if options.rpc: test.dependencies.append(RPC_LIBRARY)
if options.eth: test.dependencies.append(ETH_LIBRARY)
if options.usb_host: test.dependencies.append(USB_HOST_LIBRARIES)
if options.usb: test.dependencies.append(USB_LIBRARIES)
if options.dsp: test.dependencies.append(DSP_LIBRARIES)
if options.fat: test.dependencies.append(FS_LIBRARY)
if options.ublox: test.dependencies.append(UBLOX_LIBRARY)
if options.testlib: test.dependencies.append(TEST_MBED_LIB)
build_dir = join(BUILD_DIR, "test", mcu, toolchain, test.id)
if options.source_dir is not None:
test.source_dir = options.source_dir
build_dir = options.source_dir
if options.build_dir is not None:
build_dir = options.build_dir
try:
bin_file = build_project(test.source_dir, build_dir, mcu, toolchain, test.dependencies, options.options,
linker_script=options.linker_script,
clean=options.clean,
verbose=options.verbose,
notify=notify,
silent=options.silent,
macros=options.macros,
jobs=options.jobs,
name=options.artifact_name)
print 'Image: %s'% bin_file
if options.disk:
# Simple copy to the mbed disk
copy(bin_file, options.disk)
if options.serial:
# Import pyserial: https://pypi.python.org/pypi/pyserial
from serial import Serial
sleep(TARGET_MAP[mcu].program_cycle_s)
serial = Serial(options.serial, timeout = 1)
if options.baud:
serial.setBaudrate(options.baud)
serial.flushInput()
serial.flushOutput()
try:
serial.sendBreak()
except:
# In linux a termios.error is raised in sendBreak and in setBreak.
# The following setBreak() is needed to release the reset signal on the target mcu.
try:
serial.setBreak(False)
except:
pass
while True:
c = serial.read(512)
sys.stdout.write(c)
sys.stdout.flush()
except KeyboardInterrupt, e:
print "\n[CTRL+c] exit"
except Exception,e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
else:
print "[ERROR] %s" % str(e)
sys.exit(1)
|
|
"""Tests for the 'setuptools' package"""
import sys
import os
import unittest
import doctest
import distutils.core
import distutils.cmd
from distutils.errors import DistutilsOptionError, DistutilsPlatformError
from distutils.errors import DistutilsSetupError
from distutils.core import Extension
from distutils.version import LooseVersion
import setuptools.dist
import setuptools.depends as dep
from setuptools import Feature
from setuptools.depends import Require
def additional_tests():
import doctest, unittest
suite = unittest.TestSuite((
doctest.DocFileSuite(
os.path.join('tests', 'api_tests.txt'),
optionflags=doctest.ELLIPSIS, package='pkg_resources',
),
))
if sys.platform == 'win32':
suite.addTest(doctest.DocFileSuite('win_script_wrapper.txt'))
return suite
def makeSetup(**args):
"""Return distribution from 'setup(**args)', without executing commands"""
distutils.core._setup_stop_after = "commandline"
# Don't let system command line leak into tests!
args.setdefault('script_args',['install'])
try:
return setuptools.setup(**args)
finally:
distutils.core._setup_stop_after = None
class DependsTests(unittest.TestCase):
def testExtractConst(self):
if not hasattr(dep, 'extract_constant'):
# skip on non-bytecode platforms
return
def f1():
global x, y, z
x = "test"
y = z
# unrecognized name
self.assertEqual(dep.extract_constant(f1.func_code,'q', -1), None)
# constant assigned
self.assertEqual(dep.extract_constant(f1.func_code,'x', -1), "test")
# expression assigned
self.assertEqual(dep.extract_constant(f1.func_code,'y', -1), -1)
# recognized name, not assigned
self.assertEqual(dep.extract_constant(f1.func_code,'z', -1), None)
def testFindModule(self):
self.assertRaises(ImportError, dep.find_module, 'no-such.-thing')
self.assertRaises(ImportError, dep.find_module, 'setuptools.non-existent')
f,p,i = dep.find_module('setuptools.tests')
f.close()
def testModuleExtract(self):
if not hasattr(dep, 'get_module_constant'):
# skip on non-bytecode platforms
return
from email import __version__
self.assertEqual(
dep.get_module_constant('email','__version__'), __version__
)
self.assertEqual(
dep.get_module_constant('sys','version'), sys.version
)
self.assertEqual(
dep.get_module_constant('setuptools.tests','__doc__'),__doc__
)
def testRequire(self):
if not hasattr(dep, 'extract_constant'):
# skip on non-bytecode platformsh
return
req = Require('Email','1.0.3','email')
self.assertEqual(req.name, 'Email')
self.assertEqual(req.module, 'email')
self.assertEqual(req.requested_version, '1.0.3')
self.assertEqual(req.attribute, '__version__')
self.assertEqual(req.full_name(), 'Email-1.0.3')
from email import __version__
self.assertEqual(req.get_version(), __version__)
self.assertTrue(req.version_ok('1.0.9'))
self.assertTrue(not req.version_ok('0.9.1'))
self.assertTrue(not req.version_ok('unknown'))
self.assertTrue(req.is_present())
self.assertTrue(req.is_current())
req = Require('Email 3000','03000','email',format=LooseVersion)
self.assertTrue(req.is_present())
self.assertTrue(not req.is_current())
self.assertTrue(not req.version_ok('unknown'))
req = Require('Do-what-I-mean','1.0','d-w-i-m')
self.assertTrue(not req.is_present())
self.assertTrue(not req.is_current())
req = Require('Tests', None, 'tests', homepage="http://example.com")
self.assertEqual(req.format, None)
self.assertEqual(req.attribute, None)
self.assertEqual(req.requested_version, None)
self.assertEqual(req.full_name(), 'Tests')
self.assertEqual(req.homepage, 'http://example.com')
paths = [os.path.dirname(p) for p in __path__]
self.assertTrue(req.is_present(paths))
self.assertTrue(req.is_current(paths))
class DistroTests(unittest.TestCase):
def setUp(self):
self.e1 = Extension('bar.ext',['bar.c'])
self.e2 = Extension('c.y', ['y.c'])
self.dist = makeSetup(
packages=['a', 'a.b', 'a.b.c', 'b', 'c'],
py_modules=['b.d','x'],
ext_modules = (self.e1, self.e2),
package_dir = {},
)
def testDistroType(self):
self.assertTrue(isinstance(self.dist,setuptools.dist.Distribution))
def testExcludePackage(self):
self.dist.exclude_package('a')
self.assertEqual(self.dist.packages, ['b','c'])
self.dist.exclude_package('b')
self.assertEqual(self.dist.packages, ['c'])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1, self.e2])
self.dist.exclude_package('c')
self.assertEqual(self.dist.packages, [])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1])
# test removals from unspecified options
makeSetup().exclude_package('x')
def testIncludeExclude(self):
# remove an extension
self.dist.exclude(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2])
# add it back in
self.dist.include(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
# should not add duplicate
self.dist.include(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
def testExcludePackages(self):
self.dist.exclude(packages=['c','b','a'])
self.assertEqual(self.dist.packages, [])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1])
def testEmpty(self):
dist = makeSetup()
dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
dist = makeSetup()
dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
def testContents(self):
self.assertTrue(self.dist.has_contents_for('a'))
self.dist.exclude_package('a')
self.assertTrue(not self.dist.has_contents_for('a'))
self.assertTrue(self.dist.has_contents_for('b'))
self.dist.exclude_package('b')
self.assertTrue(not self.dist.has_contents_for('b'))
self.assertTrue(self.dist.has_contents_for('c'))
self.dist.exclude_package('c')
self.assertTrue(not self.dist.has_contents_for('c'))
def testInvalidIncludeExclude(self):
self.assertRaises(DistutilsSetupError,
self.dist.include, nonexistent_option='x'
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, nonexistent_option='x'
)
self.assertRaises(DistutilsSetupError,
self.dist.include, packages={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, packages={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.include, ext_modules={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, ext_modules={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.include, package_dir=['q']
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, package_dir=['q']
)
class FeatureTests(unittest.TestCase):
def setUp(self):
self.req = Require('Distutils','1.0.3','distutils')
self.dist = makeSetup(
features={
'foo': Feature("foo",standard=True,require_features=['baz',self.req]),
'bar': Feature("bar", standard=True, packages=['pkg.bar'],
py_modules=['bar_et'], remove=['bar.ext'],
),
'baz': Feature(
"baz", optional=False, packages=['pkg.baz'],
scripts = ['scripts/baz_it'],
libraries=[('libfoo','foo/foofoo.c')]
),
'dwim': Feature("DWIM", available=False, remove='bazish'),
},
script_args=['--without-bar', 'install'],
packages = ['pkg.bar', 'pkg.foo'],
py_modules = ['bar_et', 'bazish'],
ext_modules = [Extension('bar.ext',['bar.c'])]
)
def testDefaults(self):
self.assertTrue(not
Feature(
"test",standard=True,remove='x',available=False
).include_by_default()
)
self.assertTrue(
Feature("test",standard=True,remove='x').include_by_default()
)
# Feature must have either kwargs, removes, or require_features
self.assertRaises(DistutilsSetupError, Feature, "test")
def testAvailability(self):
self.assertRaises(
DistutilsPlatformError,
self.dist.features['dwim'].include_in, self.dist
)
def testFeatureOptions(self):
dist = self.dist
self.assertTrue(
('with-dwim',None,'include DWIM') in dist.feature_options
)
self.assertTrue(
('without-dwim',None,'exclude DWIM (default)') in dist.feature_options
)
self.assertTrue(
('with-bar',None,'include bar (default)') in dist.feature_options
)
self.assertTrue(
('without-bar',None,'exclude bar') in dist.feature_options
)
self.assertEqual(dist.feature_negopt['without-foo'],'with-foo')
self.assertEqual(dist.feature_negopt['without-bar'],'with-bar')
self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim')
self.assertTrue(not 'without-baz' in dist.feature_negopt)
def testUseFeatures(self):
dist = self.dist
self.assertEqual(dist.with_foo,1)
self.assertEqual(dist.with_bar,0)
self.assertEqual(dist.with_baz,1)
self.assertTrue(not 'bar_et' in dist.py_modules)
self.assertTrue(not 'pkg.bar' in dist.packages)
self.assertTrue('pkg.baz' in dist.packages)
self.assertTrue('scripts/baz_it' in dist.scripts)
self.assertTrue(('libfoo','foo/foofoo.c') in dist.libraries)
self.assertEqual(dist.ext_modules,[])
self.assertEqual(dist.require_features, [self.req])
# If we ask for bar, it should fail because we explicitly disabled
# it on the command line
self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar')
def testFeatureWithInvalidRemove(self):
self.assertRaises(
SystemExit, makeSetup, features = {'x':Feature('x', remove='y')}
)
class TestCommandTests(unittest.TestCase):
def testTestIsCommand(self):
test_cmd = makeSetup().get_command_obj('test')
self.assertTrue(isinstance(test_cmd, distutils.cmd.Command))
def testLongOptSuiteWNoDefault(self):
ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite'])
ts1 = ts1.get_command_obj('test')
ts1.ensure_finalized()
self.assertEqual(ts1.test_suite, 'foo.tests.suite')
def testDefaultSuite(self):
ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test')
ts2.ensure_finalized()
self.assertEqual(ts2.test_suite, 'bar.tests.suite')
def testDefaultWModuleOnCmdLine(self):
ts3 = makeSetup(
test_suite='bar.tests',
script_args=['test','-m','foo.tests']
).get_command_obj('test')
ts3.ensure_finalized()
self.assertEqual(ts3.test_module, 'foo.tests')
self.assertEqual(ts3.test_suite, 'foo.tests.test_suite')
def testConflictingOptions(self):
ts4 = makeSetup(
script_args=['test','-m','bar.tests', '-s','foo.tests.suite']
).get_command_obj('test')
self.assertRaises(DistutilsOptionError, ts4.ensure_finalized)
def testNoSuite(self):
ts5 = makeSetup().get_command_obj('test')
ts5.ensure_finalized()
self.assertEqual(ts5.test_suite, None)
|
|
import re
import requests
import six
import urllib3
from ipa_command_args_options import IPA_COMMAND_ARGS_OPTIONS
from st2common.runners.base_action import Action
urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
CONNECTION_OPTIONS = [
'server',
'username',
'password',
'verify_ssl',
]
class IpaAction(Action):
def __init__(self, config):
super(IpaAction, self).__init__(config)
self.session = requests.Session()
def _resolve_connection(self, **kwargs):
""" Lookup connection, by name, specified by the 'connection' parameter
during action invocation from the connection dict stored in the config
"""
# if there are no connection specified in the config, we have nothing to lookup
if not self.config.get('connections'):
return kwargs
# get the name of connection asked for during action invocation
con_name = kwargs.get('connection')
if con_name:
if con_name not in self.config['connections']:
raise KeyError('Unable to find connection named "{}"in config'
.format(con_name))
else:
con_name = 'default'
if con_name not in self.config['connections']:
return kwargs
# lookup the credential by name
connection = self.config['connections'][con_name]
for k, v in six.iteritems(connection):
# skip if the user explicitly set this property during action invocation
if kwargs.get(k) is not None:
continue
# only set the property if the value in the credential object is set
if v is not None:
kwargs[k] = v
return kwargs
def _validate_connection(self, connection):
"""Ensures that all required parameters are in the connection. If a
required parameter is missing a KeyError exception is raised.
:param connection: connection to validate
:returns: True if the connection is valid
"""
for key in CONNECTION_OPTIONS:
# ensure the key is present in the connection?
if connection.get(key, None) is not None:
continue
if 'connection' in connection:
raise KeyError("/opt/stackstorm/configs/freeipa.yaml missing key connections.{}.{}"
.format(connection['connection'], key))
else:
raise KeyError("Because the 'connection' action parameter was"
" not specified, the following action parameter"
" is required: {0}".format(key))
return True
def _raise_for_status(self, response):
"""Raises stored :class:`requests.HTTPError`, if one occurred.
Copied from requests package, but adds in response.content to the exception
message.
"""
http_error_msg = ''
if isinstance(response.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = response.reason.decode('utf-8')
except UnicodeDecodeError:
reason = response.reason.decode('iso-8859-1')
else:
reason = response.reason
error_side = ''
if 400 <= response.status_code < 500:
error_side = 'Client'
elif 500 <= response.status_code < 600:
error_side = 'Server'
if error_side:
http_error_msg = "{0} {1} Error: {2} for url: {3}\n{4}".format(response.status_code,
error_side,
reason,
response.url,
response.content)
raise requests.HTTPError(http_error_msg, response=response)
def _ipa_url(self, server, endpoint=None):
if not endpoint:
endpoint = ''
return 'https://{0}/ipa{1}'.format(server, endpoint)
def _login(self, connection):
"""Attempts to login to the FreeIPA server given the connection information.
:param connect: dict containing values for 'server', 'username' and 'password'
:returns: login session token upon successful login
:rtype: string
"""
server = connection['server']
username = connection['username']
password = connection['password']
url = self._ipa_url(server, '/session/login_password')
headers = {
"referer": self._ipa_url(server),
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/plain"
}
payload = "user={0}&password={1}".format(username, password)
response = self.session.post(url,
headers=headers,
data=payload)
self._raise_for_status(response)
session = ''
if 'ipa_session' in response.cookies:
session = response.cookies['ipa_session']
else:
raise RuntimeError('IPA server did not return a cookie named "ipa_session"')
self.logger.debug('Successfully logged in as {0}'.format(username))
return session
def _create_payload(self, method, api_version=None, **kwargs):
# lookup kwargs are in args vs options based on the auto-generated
# data, in IPA_COMMAND_ARGS_OPTIONS (this gets generated from etc/generate_actions.py)
method_args_options = IPA_COMMAND_ARGS_OPTIONS[method]
# args go into an array
args = []
for arg in method_args_options['args']:
if arg in kwargs:
args.append(kwargs[arg])
# options go into a hash where it's the option name : value
options = {}
for option in method_args_options['options']:
if option in kwargs:
options[option] = kwargs[option]
# add in API version, if specified
if api_version:
options['version'] = api_version
payload = {
"id": 0,
"method": method,
"params": [
args,
options
]
}
return payload
def _execute(self, session, server, method, **kwargs):
"""Called by main entry point for the StackStorm actions to execute the operation.
:returns: json-encoded content of the response to the HTTP request, if any
"""
payload = self._create_payload(method, **kwargs)
url = self._ipa_url(server, '/session/json')
headers = {
"referer": self._ipa_url(server),
"Content-Type": "application/json",
"Accept": "application/json"
}
response = self.session.post(url,
headers=headers,
json=payload,
cookies={'ipa_session': session})
self._raise_for_status(response)
result_data = response.json()
if 'error' in result_data and result_data['error']:
return (False, result_data)
return (True, result_data)
def _get_api_version(self, session, server):
# get the server version
response = self._execute(session, server, method='ping')
ping_good = response[0]
data = response[1]
# retrieve server version from result and add it to the
# options for the real request.
# this avoids the error message:
# "API Version number was not sent, forward compatibility not
# guaranteed. Assuming server's API version, x.xxx"
api_version = None
if ((ping_good and
('result' in data) and
('summary' in data['result']))):
# parse the API version from a "summary" string that looks like:
# "IPA server version 4.5.0. API version 2.228"
match = re.search(r'API version ([0-9]+\.[0-9]+)',
data['result']['summary'])
if match:
api_version = match.group(1)
self.logger.debug('API Version: {0}'.format(api_version))
return api_version
def run(self, **kwargs):
connection = self._resolve_connection(**kwargs)
self._validate_connection(connection)
self.session.verify = connection['verify_ssl']
method = kwargs['method']
if 'session' in kwargs and kwargs['session']:
server = kwargs['server']
session = kwargs['session']
else:
server = connection['server']
session = self._login(connection)
del kwargs['server']
del kwargs['session']
del kwargs['method']
del kwargs['verify_ssl']
if method == 'login':
return session
else:
api_version = self._get_api_version(session, server)
return self._execute(session, server, method=method, api_version=api_version, **kwargs)
|
|
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
import contextlib
import errno
import functools
import gc
import socket
import sys
import os
import platform
import shutil
import warnings
import unittest
import importlib
import collections
__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
"verbose", "use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "find_unused_port", "bind_port",
"fcmp", "is_jython", "TESTFN", "HOST", "FUZZ", "findfile", "verify",
"vereq", "sortdict", "check_syntax_error", "open_urlresource",
"check_warnings", "CleanImport", "EnvironmentVarGuard",
"TransientResource", "captured_output", "captured_stdout",
"time_out", "socket_peer_reset", "ioerror_peer_reset",
"run_with_locale",
"set_memlimit", "bigmemtest", "bigaddrspacetest", "BasicTestRunner",
"run_unittest", "run_doctest", "threading_setup", "threading_cleanup",
"reap_children", "cpython_only", "check_impl_detail", "get_attribute"]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect."""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Return value is True if the module was in sys.modules and
False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
else:
del sys.modules[name]
return saved
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return value is True if the module was in sys.modules and
False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = 0
return saved
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Imports and returns a module, deliberately bypassing the sys.modules cache
and importing a fresh copy of the module. Once the import is complete,
the sys.modules cache is restored to its original state.
Modules named in fresh are also imported anew if needed by the import.
Importing of modules named in blocked is prevented while the fresh import
takes place.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
# NOTE: test_heapq and test_warnings include extra sanity checks to make
# sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("module %s has no attribute %s" % (
obj.__name__, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
try:
os.unlink(filename)
except OSError:
pass
def rmtree(path):
try:
shutil.rmtree(path)
except OSError as e:
# Unix returns ENOENT, Windows returns ESRCH.
if e.errno not in (errno.ENOENT, errno.ESRCH):
raise
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
for dirname in sys.path:
unlink(os.path.join(dirname, modname + '.pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + '.pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe(1).f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if isinstance(x, float) or isinstance(y, float):
try:
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and isinstance(x, (tuple, list)):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return (len(x) > len(y)) - (len(x) < len(y))
return (x > y) - (x < y)
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
TESTFN_UNICODE = "@test-\xe0\xf2"
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = "@test-\u5171\u6709\u3055\u308c\u308b"
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem. '
'Unicode filename tests may not be effective'
% TESTFN_UNICODE_UNENCODEABLE)
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print(('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN)))
if fp is not None:
fp.close()
unlink(TESTFN)
del fp
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed("%r == %r" % (a, b))
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
requires('urlfetch')
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
for path in [os.path.curdir, os.path.pardir]:
fn = os.path.join(path, filename)
if os.path.exists(fn):
return open(fn, *args, **kw)
print('\tfetching %s ...' % url, file=get_original_stdout())
fn, _ = urllib.request.urlretrieve(url, filename)
return open(fn, *args, **kw)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self.warnings = warnings_list
def __getattr__(self, attr):
if self.warnings:
return getattr(self.warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
def reset(self):
del self.warnings[:]
@contextlib.contextmanager
def check_warnings():
with warnings.catch_warnings(record=True) as w:
yield WarningsRecorder(w)
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
__import__("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
time_out = TransientResource(IOError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def captured_output(stream_name):
"""Run the 'with' statement body using a StringIO object in place of a
specific attribute on the sys module.
Example use (with 'stream_name=stdout')::
with captured_stdout() as s:
print("hello")
assert s.getvalue() == "hello"
"""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
return captured_output("stdout")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
gc.collect()
gc.collect()
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
import re
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independent of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
# Retrieve values in case someone decided to adjust them
minsize = wrapper.minsize
memuse = wrapper.memuse
overhead = wrapper.overhead
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.failIf(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def precisionbigmemtest(size, memuse, overhead=5*_1M):
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
overhead = wrapper.overhead
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if real_max_memuse and real_max_memuse < maxsize * memuse:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
return f(self, maxsize)
wrapper.size = size
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource_is_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "errors occurred; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
_run_suite(suite)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
def threading_setup():
import threading
return len(threading._active), len(threading._limbo)
def threading_cleanup(num_active, num_limbo):
import threading
import time
_MAX_COUNT = 10
count = 0
while len(threading._active) != num_active and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
count = 0
while len(threading._limbo) != num_limbo and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
def reap_threads(func):
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
|
|
'CSV generator for Karnataka Budget PDFs'
import argparse
import csv
import logging
from logging.config import fileConfig
import re
import os
from parsers.pdf_to_csv import PDF2CSV
from parsers.keywords_extractor import KeywordsExtractor
fileConfig('parsers/logging_config.ini')
logger = logging.getLogger()
class KarnatakaBudgetCSVGenerator(PDF2CSV):
def __init__(self):
super(KarnatakaBudgetCSVGenerator, self).__init__()
self.keywords_extractor = KeywordsExtractor()
self.min_col_count = 8
self.max_col_count = 10
self.currency_slug = "(Rs. in Lakhs)"
self.empty_char_regex = r'(\xe2|\xc3|\x82|\xa2|\x80)'
self.parent_scheme_regex = r"([A-Z]+\.|\([a-z]+\)|\d{4,}|^[MDCLXVI]+ |^Total)"
self.voted_charged_column = True
def generate_karnataka_budget_csv(self, input_file, output_dir):
'''
Main call comes here setting global variable and calling PDF to CSV
'''
self.input_file = input_file
self.output_dir = output_dir
self.generate_csv_file(input_file, input_file.split(".pdf")[0] + ".csv",
is_header=True, identify_columns=True)
def modify_table_data(self, table):
'''
Modifying output of PDF to CSV to clean, wrangle and generate multiple CSV files
'''
pagewise_table = self.split_pages(table)
pagewise_table = self.extract_head_codes(pagewise_table)
pagewise_table = self.clean_pagewise_table(pagewise_table)
for page_num in pagewise_table:
unwanted_row_indices = []
page_table = pagewise_table[page_num]
header_found = False
for row_index in range(len(page_table)):
self.correct_column_count(row_index, page_table)
unwanted_header_row_indices = self.clean_header_values(row_index,
page_table)
if unwanted_header_row_indices:
unwanted_row_indices += unwanted_header_row_indices
header_found = True
elif header_found and self.voted_charged_column:
page_table[row_index].insert(2, "")
self.correct_combined_values(row_index, page_table)
unwanted_row_indices += self.merge_splitted_rows(page_table)
self.delete_unwanted_rows(unwanted_row_indices, page_table)
pagewise_headers = self.generate_page_headers_map(pagewise_table)
pagewise_table = self.extract_budget_codes(pagewise_table)
self.generate_pagewise_csv_files(pagewise_table, pagewise_headers)
def split_pages(self, table):
'''
Splitting main table into pagewise tables
'''
pagewise_table = {}
temp_list = []
page_num = 1
for row in table:
if row[0] == self.page_break.replace('"', ''):
if temp_list and len(temp_list[0]) > self.min_col_count:
pagewise_table[page_num] = temp_list
temp_list = []
page_num += 1
elif len(row) > self.min_col_count:
temp_list.append(row)
if temp_list and len(temp_list[0]) > self.min_col_count:
pagewise_table[page_num] = temp_list
return pagewise_table
def extract_head_codes(self, pagewise_table):
'''
Extracting Head codes from scheme descriptions, inheriting classes can customize it
'''
return pagewise_table
def extract_budget_codes(self, pagewise_table):
'''
Extracting Budget codes from scheme descriptions, inheriting classes can customize it
'''
return pagewise_table
def clean_pagewise_table(self, pagewise_table):
'''
Cleansing pagewise tables to remove Kannada chars(Windows-1252 encoded)
'''
for page_num in pagewise_table:
page_table = pagewise_table[page_num]
unwanted_row_indices = {}
for row_index in range(len(page_table)):
for col_index in range(len(page_table[row_index])):
val = page_table[row_index][col_index]
val = re.sub(r'(\xe2|\x80|vjU)', '', val).replace('\x90', '-')
if '\\x' in val.encode('string-escape'):
if " " in val:
val = re.sub(r"\s{2,}", " ", val)
val_list = val.split(" ")
clear_index = 0
for val_index in range(len(val_list)):
if not '\\x' in val_list[val_index].encode('string-escape') and re.findall(r"[a-zA-Z0-9\.\(\)\&\-\+]{1,}", val_list[val_index]):
if clear_index == 0:
clear_index = val_index
else:
clear_index = 0
if clear_index > 0:
val = " ".join(val.split(" ")[clear_index:])
else:
val = ""
else:
val = ""
page_table[row_index][col_index] = val.strip()
if not "".join(page_table[row_index]).strip():
unwanted_row_indices[row_index] = True
self.delete_unwanted_rows(unwanted_row_indices.keys(), page_table)
return pagewise_table
def correct_column_count(self,row_index, page_table):
'''Inserting extra columns wherever required
'''
while len(page_table[row_index]) < self.max_col_count:
page_table[row_index].insert(0, "")
def correct_combined_values(self, row_index, page_table):
'''Correcting Grand Total and Voted/Charged values which got merged in original doc
'''
if page_table[row_index][1] == "GRAND TOTAL (PLAN + NON-PLAN)":
col_index = 2
while col_index < len(page_table[row_index]):
if not "." in page_table[row_index][col_index]:
page_table[row_index][col_index+1] = page_table[row_index][col_index] + page_table[row_index][col_index+1]
page_table[row_index][col_index] = "P+NP ="
col_index += 2
voted_charged_match = re.findall(r"(\s){,1}(Voted|Charged)$", page_table[row_index][1])
if voted_charged_match:
voted_charged_match = "".join(map(list, voted_charged_match)[0])
page_table[row_index][1] = page_table[row_index][1].split(voted_charged_match)[0]
page_table[row_index][2] = voted_charged_match.strip()
def clean_header_values(self, row_index, page_table):
'''CLeaning and generating correct header values and unwanted row indices
'''
unwanted_row_indices = []
if page_table[row_index][2] == "Plan":
page_table[row_index][0] = "Budget Code"
header_1_val = ""
for index in range(row_index+1):
header_1_val += " " + page_table[index][1]
if index != row_index:
unwanted_row_indices.append(index)
page_table[row_index][1] = header_1_val.strip()
year_index = 2
for col_index in range(2, len(page_table[row_index])):
if col_index%2 == 0 and col_index != year_index:
year_index += 2
if not " " in page_table[0][year_index+1]:
page_table[0][year_index+1] = " " + page_table[0][year_index+1]
page_table[row_index][col_index] = page_table[0][year_index] + page_table[0][year_index+1] + " " + page_table[row_index][col_index] + " " + self.currency_slug
page_table[row_index].insert(2, 'Voted/Charged')
elif page_table[row_index][2] == "2":
unwanted_row_indices.append(row_index)
return unwanted_row_indices
def merge_splitted_rows(self, page_table):
'''Merging splitted rows into one
'''
unwanted_row_indices = {}
for row_index in range(5, len(page_table)):
if re.match(self.parent_scheme_regex, page_table[row_index][1]) or page_table[row_index][0]:
continue
elif not "".join(page_table[row_index][2:]):
parent_row_index = row_index
while not (re.match(self.parent_scheme_regex, page_table[parent_row_index][1]) or page_table[parent_row_index][0]):
parent_row_index -= 1
if parent_row_index in unwanted_row_indices:
continue
if page_table[row_index][1].strip():
if len(page_table) < abs(parent_row_index):
page_table[parent_row_index][1] += ' ' + page_table[row_index][1].strip()
unwanted_row_indices[row_index] = True
return unwanted_row_indices.keys()
def delete_unwanted_rows(self, unwanted_row_indices, page_table):
'''Deleting unwanted row indices from page tables
'''
unwanted_row_indices.sort()
num = 0
for row_index in unwanted_row_indices:
page_table.pop(row_index-num)
num += 1
def generate_page_headers_map(self, pagewise_table):
'''Generating pagewise headers for tables
'''
page_headers_map = {}
for page_num in pagewise_table:
keyword_list = self.keywords_extractor.get_bold_text_phrases(self.input_file, keyword_xpath="//text()", is_other_starting_phrases=True, single_word=True, page_num=page_num, lower_case=False)
page_header = []
for keyword_index in range(len(keyword_list)):
keyword = keyword_list[keyword_index]
keyword = re.sub(self.empty_char_regex, '', keyword).replace('\x90', '-')
if not '\\x' in keyword.encode('string-escape') and not "<!--" in keyword:
if " ".join(self.currency_slug.split(" ")[1:]) in keyword or "in Lakhs" in keyword:
break
keyword = keyword.decode('unicode_escape').encode('ascii','ignore').strip()
keyword = re.sub(r"\s{2,}", " ", keyword)
if "VOLUME" in keyword and keyword_index > 0:
keyword = keyword + keyword_list[keyword_index-1].split(":")[-1]
page_header.append(keyword.strip())
page_headers_map[page_num] = "|".join(page_header[:3])
return page_headers_map
def write_page_table(self, file_name, file_table):
'''Creating new file and writing file table in it
'''
file_name = file_name.split("|")[2].strip() + "|" + file_name.split("|")[1].strip()
file_name = file_name.replace("/", "|")
out_csv_file = open(self.output_dir + "/" + file_name + ".csv", "wb")
csv_writer = csv.writer(out_csv_file, delimiter=',')
for row in file_table:
csv_writer.writerow(row)
out_csv_file.close()
def generate_pagewise_csv_files(self, pagewise_table, pagewise_headers):
'''Generating pagewise CSV files
'''
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
file_name = ""
file_table = []
for page_num in pagewise_table:
if file_name and file_name != pagewise_headers[page_num]:
self.write_page_table(file_name, file_table)
file_table = pagewise_table[page_num]
file_name = pagewise_headers[page_num]
else:
if not file_name:
file_table += pagewise_table[page_num]
elif len(pagewise_table[page_num]) <= 1:
continue
else:
if re.match(self.parent_scheme_regex, pagewise_table[page_num][1][1]) or pagewise_table[page_num][1][0]:
file_table += pagewise_table[page_num][1:]
elif not "".join(pagewise_table[page_num][1][2:]):
file_table[-1][1] += " " + pagewise_table[page_num][1][1]
file_table += pagewise_table[page_num][2:]
file_name = pagewise_headers[page_num]
if file_table:
self.write_page_table(file_name, file_table)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generates CSV files from Karnataka State Budget PDF Document")
parser.add_argument("input_file", help="Input filepath for budget document")
parser.add_argument("output_dir", help="Output directory for budget document")
args = parser.parse_args()
obj = KarnatakaBudgetCSVGenerator()
if not args.input_file or not args.output_dir:
print("Please input directory to begin CSV extraction")
else:
obj.generate_karnataka_budget_csv(args.input_file, args.output_dir)
|
|
##
# Copyright (c) 2013-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for txdav.caldav.datastore.utils
"""
from pycalendar.datetime import DateTime
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from txdav.caldav.datastore.scheduling.utils import getCalendarObjectForRecord, \
extractEmailDomain, uidFromCalendarUserAddress
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
now = DateTime.getToday().getYear()
ORGANIZER_ICS = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Apple Inc.//iCal 4.0.1//EN
CALSCALE:GREGORIAN
BEGIN:VEVENT
CREATED:20100303T181216Z
UID:685BC3A1-195A-49B3-926D-388DDACA78A6
TRANSP:OPAQUE
SUMMARY:Ancient event
DTSTART:%(year)s0307T111500Z
DURATION:PT1H
DTSTAMP:20100303T181220Z
ORGANIZER:urn:uuid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:uuid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:uuid:user02
SEQUENCE:2
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n") % {"year": now + 1}
ATTENDEE_ICS = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Apple Inc.//iCal 4.0.1//EN
CALSCALE:GREGORIAN
BEGIN:VEVENT
CREATED:20100303T181216Z
UID:685BC3A1-195A-49B3-926D-388DDACA78A6
TRANSP:OPAQUE
SUMMARY:Ancient event
DTSTART:%(year)s0307T111500Z
DURATION:PT1H
DTSTAMP:20100303T181220Z
ORGANIZER:urn:uuid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:uuid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:uuid:user02
SEQUENCE:2
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n") % {"year": now + 1}
class RecipientCopy(CommonCommonTests, unittest.TestCase):
"""
Tests for deleting events older than a given date
"""
metadata = {
"accessMode": "PUBLIC",
"isScheduleObject": True,
"scheduleTag": "abc",
"scheduleEtags": (),
"hasPrivateComment": False,
}
requirements = {
"user01" : {
"calendar1" : {
"1.ics" : (ORGANIZER_ICS, metadata,),
}
},
"user02" : {
"calendar2" : {
"2.ics" : (ATTENDEE_ICS, metadata,),
},
"calendar3" : {
"3.ics" : (ATTENDEE_ICS, metadata,),
}
}
}
@inlineCallbacks
def setUp(self):
yield super(RecipientCopy, self).setUp()
yield self.buildStoreAndDirectory()
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
def storeUnderTest(self):
"""
Create and return a L{CalendarStore} for testing.
"""
return self._sqlCalendarStore
@inlineCallbacks
def test_getCalendarObjectForRecord(self):
"""
Test that L{txdav.caldav.datastore.scheduling.utils.getCalendarObjectForRecord} detects and removes
resources with duplicate UIDs in the same calendar home.
"""
# Check that expected resources are present
txn = self.transactionUnderTest()
for home_uid, calendar_name, resource_name in (
("user01", "calendar1", "1.ics",),
("user02", "calendar2", "2.ics",),
("user02", "calendar3", "3.ics",),
):
resource = (yield self.calendarObjectUnderTest(txn, name=resource_name, calendar_name=calendar_name, home=home_uid))
self.assertNotEqual(resource, None)
yield self.commit()
# Look up resource by UID in home where only one exists
principal = yield self.directory.recordWithUID(u"user01")
txn = self.transactionUnderTest()
resource = (yield getCalendarObjectForRecord(txn, principal, "685BC3A1-195A-49B3-926D-388DDACA78A6"))
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource._parentCollection.name(), "calendar1")
self.assertEqual(resource._parentCollection.viewerHome().uid(), "user01")
yield self.commit()
# Check that expected resources are still present
txn = self.transactionUnderTest()
for home_uid, calendar_name, resource_name in (
("user01", "calendar1", "1.ics",),
("user02", "calendar2", "2.ics",),
("user02", "calendar3", "3.ics",),
):
resource = (yield self.calendarObjectUnderTest(txn, name=resource_name, calendar_name=calendar_name, home=home_uid))
self.assertNotEqual(resource, None)
yield self.commit()
# Look up resource by UID in home where two exists
principal = yield self.directory.recordWithUID("user02")
txn = self.transactionUnderTest()
resource = (yield getCalendarObjectForRecord(txn, principal, "685BC3A1-195A-49B3-926D-388DDACA78A6"))
self.assertTrue(resource.name() in ("2.ics", "3.ics",))
self.assertTrue(resource._parentCollection.name() in ("calendar2", "calendar3",))
self.assertEqual(resource._parentCollection.viewerHome().uid(), "user02")
yield self.commit()
# Check that expected resources are still present, but the duplicate missing
txn = self.transactionUnderTest()
resource = (yield self.calendarObjectUnderTest(txn, name="1.ics", calendar_name="calendar1", home="user01"))
self.assertNotEqual(resource, None)
resource2 = (yield self.calendarObjectUnderTest(txn, name="2.ics", calendar_name="calendar2", home="user02"))
resource3 = (yield self.calendarObjectUnderTest(txn, name="3.ics", calendar_name="calendar3", home="user02"))
self.assertTrue((resource2 is not None) ^ (resource3 is not None))
yield self.commit()
# Look up resource where principal exists but home does not
principal = yield self.directory.recordWithUID("user102") # ASKCYRUS: but user102 doesn't exist
txn = self.transactionUnderTest()
resource = (yield getCalendarObjectForRecord(txn, principal, "685BC3A1-195A-49B3-926D-388DDACA78A6"))
self.assertTrue(resource is None)
yield self.commit()
def test_uidFromCalendarUserAddress(self):
"""
Test that L{uidFromCalendarUserAddress} returns the expected results.
"""
data = (
("urn:x-uid:foobar", "foobar"),
("urn:uuid:foobar", "foobar"),
("urn:uuid:49DE7436-F01C-4AD8-B685-A94303F40301", "49DE7436-F01C-4AD8-B685-A94303F40301"),
("/principals/__uids__/foobar", "foobar"),
("/principals/users/foobar", None),
("/principals/groups/foobar", None),
("mailto:[email protected]", None),
)
for cuaddr, uid in data:
self.assertEqual(uidFromCalendarUserAddress(cuaddr), uid)
def test_extractEmailDomain(self):
"""
Test that L{extractEmailDomain} returns the expected results.
"""
data = (
("mailto:[email protected]", "example.com"),
("mailto:[email protected]?subject=bar", "example.com"),
("mailto:foo", ""),
("mailto:foo@", ""),
("http://foobar.com", ""),
)
for mailto, domain in data:
self.assertEqual(extractEmailDomain(mailto), domain)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import operator
import uuid
from operator import itemgetter
from qonos.common import exception
from qonos.common import timeutils
import qonos.db.db_utils as db_utils
from qonos.openstack.common._i18n import _
DATA = {
'schedules': {},
'schedule_metadata': {},
'jobs': {},
'job_metadata': {},
'workers': {},
'job_faults': {},
}
def configure_db():
pass
def reset():
global DATA
for k in DATA:
DATA[k] = {}
def _gen_base_attributes(item_id=None):
values = {}
if item_id is None:
values['id'] = str(uuid.uuid4())
values['created_at'] = timeutils.utcnow()
values['updated_at'] = timeutils.utcnow()
return copy.deepcopy(values)
def _schedule_create(values):
global DATA
DATA['schedules'][values['id']] = values
_schedule_meta_init(values['id'])
return copy.deepcopy(values)
def _do_pagination(items, marker, limit):
"""
This method mimics the behavior of sqlalchemy paginate_query.
It takes items and pagination parameters - 'limit' and 'marker'
to filter out the items to be returned. Items are sorted in
lexicographical order based on the sort key - 'id'.
"""
items = sorted(items, key=itemgetter('id'))
start = 0
end = -1
if marker is None:
start = 0
else:
for i, item in enumerate(items):
if item['id'] == marker:
start = i + 1
break
else:
msg = _('Marker %s not found') % marker
raise exception.NotFound(explanation=msg)
end = start + limit if limit is not None else None
return items[start:end]
def schedule_get_all(filter_args={}):
SCHEDULE_BASE_FILTERS = ['next_run_after', 'next_run_before',
'tenant', 'limit', 'marker']
schedules = copy.deepcopy(DATA['schedules'].values())
schedules_mutate = copy.deepcopy(DATA['schedules'].values())
for schedule in schedules:
schedule['schedule_metadata'] =\
copy.deepcopy(schedule_meta_get_all(schedule['id']))
schedules_mutate = copy.deepcopy(schedules)
if 'next_run_before' in filter_args:
for schedule in schedules:
if not schedule['next_run'] <= filter_args['next_run_before']:
if schedule in schedules_mutate:
del schedules_mutate[schedules_mutate.index(schedule)]
filter_args.pop('next_run_before')
if 'next_run_after' in filter_args:
for schedule in schedules:
if not schedule['next_run'] >= filter_args['next_run_after']:
if schedule in schedules_mutate:
del schedules_mutate[schedules_mutate.index(schedule)]
filter_args.pop('next_run_after')
if filter_args.get('tenant') is not None:
for schedule in schedules:
if schedule['tenant'] != filter_args['tenant']:
if schedule in schedules_mutate:
del schedules_mutate[schedules_mutate.index(schedule)]
filter_args.pop('tenant')
if filter_args.get('action') is not None:
for schedule in schedules:
if schedule['action'] != filter_args['action']:
if schedule in schedules_mutate:
del schedules_mutate[schedules_mutate.index(schedule)]
filter_args.pop('action')
for filter_key in filter_args.keys():
if filter_key not in SCHEDULE_BASE_FILTERS:
filter_value = filter_args[filter_key]
for schedule in schedules:
if schedule['schedule_metadata']:
for schedule_metadata in schedule['schedule_metadata']:
if not(schedule_metadata['key'] == filter_key and
schedule_metadata['value'] == filter_value):
try:
schedule_mutated = \
schedules_mutate.index(schedule)
del schedules_mutate[schedule_mutated]
except Exception:
pass
break
else:
if schedule in schedules_mutate:
del schedules_mutate[schedules_mutate.index(schedule)]
marker = filter_args.get('marker')
limit = filter_args.get('limit')
schedules_mutate = _do_pagination(schedules_mutate, marker, limit)
return schedules_mutate
def schedule_get_by_id(schedule_id):
if schedule_id not in DATA['schedules']:
raise exception.NotFound()
schedule = copy.deepcopy(DATA['schedules'][schedule_id])
schedule['schedule_metadata'] = \
copy.deepcopy(schedule_meta_get_all(schedule_id))
return schedule
def schedule_create(schedule_values):
db_utils.validate_schedule_values(schedule_values)
values = copy.deepcopy(schedule_values)
schedule = {}
metadata = []
if 'schedule_metadata' in values:
metadata = values['schedule_metadata']
del values['schedule_metadata']
schedule.update(values)
item_id = values.get('id')
schedule.update(_gen_base_attributes(item_id=item_id))
schedule = _schedule_create(schedule)
for metadatum in metadata:
schedule_meta_create(schedule['id'], metadatum)
return schedule_get_by_id(schedule['id'])
def schedule_update(schedule_id, schedule_values):
global DATA
values = schedule_values.copy()
if schedule_id not in DATA['schedules']:
raise exception.NotFound()
metadata = None
if 'schedule_metadata' in values:
metadata = values['schedule_metadata']
del values['schedule_metadata']
if len(values) > 0:
schedule = DATA['schedules'][schedule_id]
schedule['updated_at'] = timeutils.utcnow()
schedule.update(values)
if metadata is not None:
DATA['schedule_metadata'][schedule_id] = {}
for metadatum in metadata:
schedule_meta_create(schedule_id, metadatum)
return schedule_get_by_id(schedule_id)
def schedule_test_and_set_next_run(schedule_id, expected_next_run, next_run):
global DATA
schedule = DATA['schedules'].get(schedule_id)
if not schedule:
raise exception.NotFound()
if expected_next_run:
expected_next_run = expected_next_run.replace(tzinfo=None)
current_next_run = schedule.get('next_run')
if current_next_run:
current_next_run = current_next_run.replace(tzinfo=None)
if expected_next_run != current_next_run:
raise exception.NotFound()
if next_run:
next_run = next_run.replace(tzinfo=None)
schedule['next_run'] = next_run
def schedule_delete(schedule_id):
global DATA
if schedule_id not in DATA['schedules']:
raise exception.NotFound()
del DATA['schedules'][schedule_id]
def _schedule_meta_init(schedule_id):
if DATA['schedule_metadata'].get(schedule_id) is None:
DATA['schedule_metadata'][schedule_id] = {}
def schedule_meta_create(schedule_id, values):
global DATA
if DATA['schedules'].get(schedule_id) is None:
msg = _('Schedule %s could not be found') % schedule_id
raise exception.NotFound(message=msg)
_schedule_meta_init(schedule_id)
try:
_check_meta_exists(schedule_id, values['key'])
except exception.NotFound:
pass
else:
raise exception.Duplicate()
meta = {}
values['schedule_id'] = schedule_id
meta.update(values)
meta.update(_gen_base_attributes())
DATA['schedule_metadata'][schedule_id][values['key']] = meta
return copy.deepcopy(meta)
def _check_schedule_exists(schedule_id):
if DATA['schedules'].get(schedule_id) is None:
msg = _('Schedule %s could not be found') % schedule_id
raise exception.NotFound(message=msg)
def _check_meta_exists(schedule_id, key):
_check_schedule_exists(schedule_id)
if (DATA['schedule_metadata'].get(schedule_id) is None or
DATA['schedule_metadata'][schedule_id].get(key) is None):
msg = (_('Meta %(key)s could not be found '
'for Schedule %(schedule_id)s ') %
{'key': key, 'schedule_id': schedule_id})
raise exception.NotFound(message=msg)
def schedule_meta_get_all(schedule_id):
_check_schedule_exists(schedule_id)
_schedule_meta_init(schedule_id)
return DATA['schedule_metadata'][schedule_id].values()
def schedule_metadata_update(schedule_id, values):
global DATA
if DATA['schedules'].get(schedule_id) is None:
msg = _('Schedule %s could not be found') % schedule_id
raise exception.NotFound(message=msg)
DATA['schedule_metadata'][schedule_id] = {}
for metadatum in values:
schedule_meta_create(schedule_id, metadatum)
return copy.deepcopy(DATA['schedule_metadata'][schedule_id].values())
def _delete_schedule_meta(schedule_id, key):
del DATA['schedule_metadata'][schedule_id][key]
def schedule_meta_delete(schedule_id, key):
_check_meta_exists(schedule_id, key)
_delete_schedule_meta(schedule_id, key)
def worker_get_all(params={}):
workers = copy.deepcopy(DATA['workers'].values())
marker = params.get('marker')
limit = params.get('limit')
workers = _do_pagination(workers, marker, limit)
return workers
def worker_get_by_id(worker_id):
if worker_id not in DATA['workers']:
raise exception.NotFound()
return copy.deepcopy(DATA['workers'][worker_id])
def worker_create(values):
global DATA
worker_values = copy.deepcopy(values)
if 'process_id' not in worker_values:
worker_values['process_id'] = None
worker = {}
worker.update(worker_values)
item_id = values.get('id')
worker.update(_gen_base_attributes(item_id=item_id))
DATA['workers'][worker['id']] = worker
return copy.deepcopy(worker)
def worker_delete(worker_id):
global DATA
if worker_id not in DATA['workers']:
raise exception.NotFound()
del DATA['workers'][worker_id]
def job_create(job_values):
global DATA
db_utils.validate_job_values(job_values)
values = job_values.copy()
job = {}
metadata = []
if 'job_metadata' in values:
metadata = values['job_metadata']
del values['job_metadata']
if 'retry_count' not in values:
values['retry_count'] = 0
job['worker_id'] = None
job['version_id'] = str(uuid.uuid4())
job.update(values)
item_id = values.get('id')
job.update(_gen_base_attributes(item_id=item_id))
DATA['jobs'][job['id']] = job
for metadatum in metadata:
job_meta_create(job['id'], metadatum)
return job_get_by_id(job['id'])
def job_get_all(params={}):
jobs = copy.deepcopy(DATA['jobs'].values())
JOB_BASE_FILTERS = ['schedule_id',
'tenant',
'action',
'worker_id',
'status',
'timeout',
'hard_timeout']
for key in JOB_BASE_FILTERS:
if key in params:
value = params.get(key)
if type(value) is datetime.datetime:
value = timeutils.normalize_time(value).replace(microsecond=0)
for job in reversed(jobs):
job_value = job.get(key)
if job_value and type(job_value) is datetime.datetime:
job_value = job_value.replace(microsecond=0)
if not (job_value == value):
del jobs[jobs.index(job)]
for job in jobs:
job['job_metadata'] =\
job_meta_get_all_by_job_id(job['id'])
marker = params.get('marker')
limit = params.get('limit')
jobs = _do_pagination(jobs, marker, limit)
return jobs
def job_get_by_id(job_id):
if job_id not in DATA['jobs']:
raise exception.NotFound()
job = copy.deepcopy(DATA['jobs'][job_id])
job['job_metadata'] = \
job_meta_get_all_by_job_id(job_id)
return job
def job_updated_at_get_by_id(job_id):
job = job_get_by_id(job_id)
return job['updated_at']
def job_get_and_assign_next_by_action(action, worker_id, new_timeout):
"""Get the next available job for the given action and assign it
to the worker for worker_id.
This must be an atomic action!"""
job_ref = None
now = timeutils.utcnow().replace(second=0, microsecond=0)
jobs = _jobs_get_sorted()
statuses = ['DONE', 'CANCELLED', 'HARD_TIMED_OUT', 'MAX_RETRIED']
for job in jobs:
if job['action'] == action and \
job['status'] not in statuses and \
(job['worker_id'] is None or job['timeout'] <= now):
job_ref = job
break
if job_ref is None:
return None
job_id = job_ref['id']
DATA['jobs'][job_id]['worker_id'] = worker_id
DATA['jobs'][job_id]['timeout'] = new_timeout
DATA['jobs'][job_id]['retry_count'] = job_ref['retry_count'] + 1
DATA['jobs'][job_id]['version_id'] = str(uuid.uuid4())
job = copy.deepcopy(DATA['jobs'][job_id])
job['job_metadata'] = job_meta_get_all_by_job_id(job_id)
return job
def _jobs_get_sorted():
jobs = copy.deepcopy(DATA['jobs'])
sorted_jobs = []
for job_id in jobs:
sorted_jobs.append(jobs[job_id])
sorted_jobs = sorted(sorted_jobs, key=operator.itemgetter('updated_at'))
return sorted_jobs
def _jobs_cleanup_hard_timed_out():
"""Find all jobs with hard_timeout values which have passed
and delete them, logging the timeout / failure as appropriate"""
now = timeutils.utcnow()
del_ids = []
for job_id in DATA['jobs']:
job = DATA['jobs'][job_id]
print now, job['hard_timeout']
print now - job['hard_timeout']
if (now - job['hard_timeout']) > datetime.timedelta(microseconds=0):
del_ids.append(job_id)
for job_id in del_ids:
job_delete(job_id)
return len(del_ids)
def job_update(job_id, job_values):
global DATA
values = job_values.copy()
if job_id not in DATA['jobs']:
raise exception.NotFound()
metadata = None
if 'job_metadata' in values:
metadata = values['job_metadata']
del values['job_metadata']
if len(values) > 0:
job = DATA['jobs'][job_id]
# NOTE(ameade): This must come before update specified values since
# we may be trying to manually set updated_at
job['updated_at'] = timeutils.utcnow()
job['version_id'] = str(uuid.uuid4())
job.update(values)
if metadata is not None:
DATA['job_metadata'][job_id] = {}
for metadatum in metadata:
job_meta_create(job_id, metadatum)
return job_get_by_id(job_id)
def job_delete(job_id):
global DATA
if job_id not in DATA['jobs']:
raise exception.NotFound()
del DATA['jobs'][job_id]
def job_meta_create(job_id, values):
global DATA
values['job_id'] = job_id
_check_job_exists(job_id)
if DATA['job_metadata'].get(job_id) is None:
DATA['job_metadata'][job_id] = {}
try:
_check_job_meta_exists(job_id, values['key'])
except exception.NotFound:
pass
else:
raise exception.Duplicate()
meta = {}
meta.update(values)
meta.update(_gen_base_attributes())
DATA['job_metadata'][job_id][values['key']] = meta
return copy.deepcopy(meta)
def _check_job_exists(job_id):
if job_id not in DATA['jobs']:
msg = _('Job %s could not be found') % job_id
raise exception.NotFound(message=msg)
def _check_job_meta_exists(job_id, key):
if (DATA['job_metadata'].get(job_id) is None or
DATA['job_metadata'][job_id].get(key) is None):
msg = (_('Meta %(key)s could not be found for Job %(job_id)s ') %
{'key': key, 'job_id': job_id})
raise exception.NotFound(message=msg)
def job_meta_get_all_by_job_id(job_id):
_check_job_exists(job_id)
if job_id not in DATA['job_metadata']:
DATA['job_metadata'][job_id] = {}
return copy.deepcopy(DATA['job_metadata'][job_id].values())
def job_metadata_update(job_id, values):
global DATA
_check_job_exists(job_id)
DATA['job_metadata'][job_id] = {}
for metadatum in values:
job_meta_create(job_id, metadatum)
return copy.deepcopy(DATA['job_metadata'][job_id].values())
def _job_faults_get_sorted():
jobs = copy.deepcopy(DATA['job_faults'])
sorted_jobs = []
for job_id in jobs:
sorted_jobs.append(jobs[job_id])
sorted_jobs = sorted(sorted_jobs, key=operator.itemgetter('created_at'),
reverse=True)
return sorted_jobs
def job_fault_latest_for_job_id(job_id):
job_faults = _job_faults_get_sorted()
for job_fault in job_faults:
if job_fault['job_id'] == job_id:
return job_fault
return None
def job_fault_create(values):
global DATA
job_fault = {}
job_fault.update(values)
item_id = values.get('id')
job_fault.update(_gen_base_attributes(item_id=item_id))
DATA['job_faults'][job_fault['id']] = job_fault
return copy.deepcopy(job_fault)
|
|
import re
import csv
from itertools import chain
import pytz
from io import StringIO
from os import path
from functools import wraps
import unicodedata
from urllib.parse import urlparse
from collections import namedtuple
from datetime import datetime, timedelta, timezone
import dateutil
import ago
from flask import (
abort,
current_app,
redirect,
request,
session,
url_for
)
from flask_login import current_user
import pyexcel
from notifications_utils.template import (
SMSPreviewTemplate,
EmailPreviewTemplate,
LetterImageTemplate,
LetterPreviewTemplate,
)
from orderedset._orderedset import OrderedSet
from werkzeug.datastructures import MultiDict
SENDING_STATUSES = ['created', 'pending', 'sending']
DELIVERED_STATUSES = ['delivered', 'sent']
FAILURE_STATUSES = ['failed', 'temporary-failure', 'permanent-failure', 'technical-failure']
REQUESTED_STATUSES = SENDING_STATUSES + DELIVERED_STATUSES + FAILURE_STATUSES
class BrowsableItem(object):
"""
Maps for the template browse-list.
"""
def __init__(self, item, *args, **kwargs):
self._item = item
super(BrowsableItem, self).__init__()
@property
def title(self):
pass
@property
def link(self):
pass
@property
def hint(self):
pass
@property
def destructive(self):
pass
def user_has_permissions(*permissions, admin_override=False, any_=False):
def wrap(func):
@wraps(func)
def wrap_func(*args, **kwargs):
if current_user and current_user.is_authenticated:
if current_user.has_permissions(
*permissions,
admin_override=admin_override,
any_=any_
):
return func(*args, **kwargs)
else:
abort(403)
else:
abort(401)
return wrap_func
return wrap
def redirect_to_sign_in(f):
@wraps(f)
def wrapped(*args, **kwargs):
if 'user_details' not in session:
return redirect(url_for('main.sign_in'))
else:
return f(*args, **kwargs)
return wrapped
def get_errors_for_csv(recipients, template_type):
errors = []
if recipients.rows_with_bad_recipients:
number_of_bad_recipients = len(list(recipients.rows_with_bad_recipients))
if 'sms' == template_type:
if 1 == number_of_bad_recipients:
errors.append("fix 1 phone number")
else:
errors.append("fix {} phone numbers".format(number_of_bad_recipients))
elif 'email' == template_type:
if 1 == number_of_bad_recipients:
errors.append("fix 1 email address")
else:
errors.append("fix {} email addresses".format(number_of_bad_recipients))
elif 'letter' == template_type:
if 1 == number_of_bad_recipients:
errors.append("fix 1 address")
else:
errors.append("fix {} addresses".format(number_of_bad_recipients))
if recipients.rows_with_missing_data:
number_of_rows_with_missing_data = len(list(recipients.rows_with_missing_data))
if 1 == number_of_rows_with_missing_data:
errors.append("enter missing data in 1 row")
else:
errors.append("enter missing data in {} rows".format(number_of_rows_with_missing_data))
return errors
def generate_notifications_csv(**kwargs):
from app import notification_api_client
if 'page' not in kwargs:
kwargs['page'] = 1
if kwargs['job_id']:
fieldnames = ['Row number', 'Recipient', 'Template', 'Type', 'Job', 'Status', 'Time']
else:
fieldnames = ['Recipient', 'Template', 'Type', 'Job', 'Status', 'Time']
yield ','.join(fieldnames) + '\n'
while kwargs['page']:
# if job_id then response looks different
notifications_resp = notification_api_client.get_notifications_for_service(**kwargs)
notifications = notifications_resp['notifications']
if kwargs['job_id']:
for notification in notifications:
values = [
notification['row_number'],
notification['recipient'],
notification['template_name'],
notification['template_type'],
notification['job_name'],
notification['status'],
notification['created_at']
]
line = ','.join(str(i) for i in values) + '\n'
yield line
else:
# Change here
for notification in notifications:
values = [
notification['to'],
notification['template']['name'],
notification['template']['template_type'],
notification.get('job_name', None),
notification['status'],
notification['created_at'],
notification['updated_at']
]
line = ','.join(str(i) for i in values) + '\n'
yield line
if notifications_resp['links'].get('next'):
kwargs['page'] += 1
else:
return
raise Exception("Should never reach here")
def get_page_from_request():
if 'page' in request.args:
try:
return int(request.args['page'])
except ValueError:
return None
else:
return 1
def generate_previous_dict(view, service_id, page, url_args=None):
return generate_previous_next_dict(view, service_id, page - 1, 'Previous page', url_args or {})
def generate_next_dict(view, service_id, page, url_args=None):
return generate_previous_next_dict(view, service_id, page + 1, 'Next page', url_args or {})
def generate_previous_next_dict(view, service_id, page, title, url_args):
return {
'url': url_for(view, service_id=service_id, page=page, **url_args),
'title': title,
'label': 'page {}'.format(page)
}
def email_safe(string, whitespace='.'):
# strips accents, diacritics etc
string = ''.join(c for c in unicodedata.normalize('NFD', string) if unicodedata.category(c) != 'Mn')
string = ''.join(
word.lower() if word.isalnum() or word == whitespace else ''
for word in re.sub(r'\s+', whitespace, string.strip())
)
string = re.sub(r'\.{2,}', '.', string)
return string.strip('.')
class Spreadsheet():
allowed_file_extensions = ['csv', 'xlsx', 'xls', 'ods', 'xlsm', 'tsv']
def __init__(self, csv_data, filename=''):
self.filename = filename
self.as_csv_data = csv_data
self.as_dict = {
'file_name': self.filename,
'data': self.as_csv_data
}
@classmethod
def can_handle(cls, filename):
return cls.get_extension(filename) in cls.allowed_file_extensions
@staticmethod
def get_extension(filename):
return path.splitext(filename)[1].lower().lstrip('.')
@staticmethod
def normalise_newlines(file_content):
return '\r\n'.join(file_content.read().decode('utf-8').splitlines())
@classmethod
def from_rows(cls, rows, filename=''):
with StringIO() as converted:
output = csv.writer(converted)
for row in rows:
output.writerow(row)
return cls(converted.getvalue(), filename)
@classmethod
def from_dict(cls, dictionary, filename=''):
return cls.from_rows(
zip(
*sorted(dictionary.items(), key=lambda pair: pair[0])
),
filename
)
@classmethod
def from_file(cls, file_content, filename=''):
extension = cls.get_extension(filename)
if extension == 'csv':
return cls(Spreadsheet.normalise_newlines(file_content), filename)
if extension == 'tsv':
file_content = StringIO(
Spreadsheet.normalise_newlines(file_content))
instance = cls.from_rows(
pyexcel.iget_array(
file_type=extension,
file_stream=file_content),
filename)
pyexcel.free_resources()
return instance
def get_help_argument():
return request.args.get('help') if request.args.get('help') in ('1', '2', '3') else None
def is_gov_user(email_address):
valid_domains = current_app.config['EMAIL_DOMAIN_REGEXES']
email_regex = (r"[\.|@]({})$".format("|".join(valid_domains)))
return bool(re.search(email_regex, email_address.lower()))
def get_template(
template,
service,
show_recipient=False,
expand_emails=False,
letter_preview_url=None,
page_count=1,
redact_missing_personalisation=False,
email_reply_to=None,
sms_sender=None,
):
if 'email' == template['template_type']:
return EmailPreviewTemplate(
template,
from_name=service['name'],
from_address='{}@notifications.service.gov.uk'.format(service['email_from']),
expanded=expand_emails,
show_recipient=show_recipient,
redact_missing_personalisation=redact_missing_personalisation,
reply_to=email_reply_to,
)
if 'sms' == template['template_type']:
return SMSPreviewTemplate(
template,
prefix=service['name'],
show_prefix=service['prefix_sms'],
sender=sms_sender,
show_sender=bool(sms_sender),
show_recipient=show_recipient,
redact_missing_personalisation=redact_missing_personalisation,
)
if 'letter' == template['template_type']:
if letter_preview_url:
return LetterImageTemplate(
template,
image_url=letter_preview_url,
page_count=int(page_count),
contact_block=template['reply_to_text']
)
else:
return LetterPreviewTemplate(
template,
contact_block=template['reply_to_text'],
admin_base_url=current_app.config['ADMIN_BASE_URL'],
redact_missing_personalisation=redact_missing_personalisation,
)
def get_current_financial_year():
now = datetime.utcnow()
current_month = int(now.strftime('%-m'))
current_year = int(now.strftime('%Y'))
return current_year if current_month > 3 else current_year - 1
def get_time_left(created_at):
return ago.human(
(
datetime.now(timezone.utc).replace(hour=23, minute=59, second=59)
) - (
dateutil.parser.parse(created_at) + timedelta(days=8)
),
future_tense='Data available for {}',
past_tense='Data no longer available', # No-one should ever see this
precision=1
)
def email_or_sms_not_enabled(template_type, permissions):
return (template_type in ['email', 'sms']) and (template_type not in permissions)
def get_letter_timings(upload_time):
LetterTimings = namedtuple(
'LetterTimings',
'printed_by, is_printed, earliest_delivery, latest_delivery'
)
# shift anything after 5pm to the next day
processing_day = gmt_timezones(upload_time) + timedelta(hours=(7))
print_day, earliest_delivery, latest_delivery = (
processing_day + timedelta(days=days)
for days in {
'Wednesday': (1, 3, 5),
'Thursday': (1, 4, 5),
'Friday': (3, 5, 6),
'Saturday': (2, 4, 5),
}.get(processing_day.strftime('%A'), (1, 3, 4))
)
printed_by = print_day.astimezone(pytz.timezone('Europe/London')).replace(hour=15, minute=0)
now = datetime.utcnow().replace(tzinfo=pytz.timezone('Europe/London'))
return LetterTimings(
printed_by=printed_by,
is_printed=(now > printed_by),
earliest_delivery=earliest_delivery,
latest_delivery=latest_delivery,
)
def gmt_timezones(date):
date = dateutil.parser.parse(date)
forced_utc = date.replace(tzinfo=pytz.utc)
return forced_utc.astimezone(pytz.timezone('Europe/London'))
def get_cdn_domain():
parsed_uri = urlparse(current_app.config['ADMIN_BASE_URL'])
if parsed_uri.netloc.startswith('localhost'):
return 'static-logos.notify.tools'
subdomain = parsed_uri.hostname.split('.')[0]
domain = parsed_uri.netloc[len(subdomain + '.'):]
return "static-logos.{}".format(domain)
def parse_filter_args(filter_dict):
if not isinstance(filter_dict, MultiDict):
filter_dict = MultiDict(filter_dict)
return MultiDict(
(
key,
(','.join(filter_dict.getlist(key))).split(',')
)
for key in filter_dict.keys()
if ''.join(filter_dict.getlist(key))
)
def set_status_filters(filter_args):
status_filters = filter_args.get('status', [])
return list(OrderedSet(chain(
(status_filters or REQUESTED_STATUSES),
DELIVERED_STATUSES if 'delivered' in status_filters else [],
SENDING_STATUSES if 'sending' in status_filters else [],
FAILURE_STATUSES if 'failed' in status_filters else []
)))
|
|
# Copyright 2014 OpenStack Foundation
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import errno
import logging
import math
import os
import re
import threading
import urllib
import urlparse
import json
import time
import Queue
import datetime
import dateutil.parser
try:
import ordereddict
except:
pass
import requests
import requests.utils
import gertty.version
from gertty import gitrepo
HIGH_PRIORITY=0
NORMAL_PRIORITY=1
LOW_PRIORITY=2
TIMEOUT=30
CLOSED_STATUSES = ['MERGED', 'ABANDONED']
class MultiQueue(object):
def __init__(self, priorities):
try:
self.queues = collections.OrderedDict()
except AttributeError:
self.queues = ordereddict.OrderedDict()
for key in priorities:
self.queues[key] = collections.deque()
self.condition = threading.Condition()
def qsize(self):
count = 0
for queue in self.queues.values():
count += len(queue)
return count
def put(self, item, priority):
added = False
self.condition.acquire()
try:
if item not in self.queues[priority]:
self.queues[priority].append(item)
added = True
self.condition.notify()
finally:
self.condition.release()
return added
def get(self):
self.condition.acquire()
try:
while True:
for queue in self.queues.values():
try:
ret = queue.popleft()
return ret
except IndexError:
pass
self.condition.wait()
finally:
self.condition.release()
def find(self, klass, priority):
results = []
self.condition.acquire()
try:
for item in self.queues[priority]:
if isinstance(item, klass):
results.append(item)
finally:
self.condition.release()
return results
class UpdateEvent(object):
def updateRelatedChanges(self, session, change):
related_change_keys = set()
related_change_keys.add(change.key)
for revision in change.revisions:
parent = session.getRevisionByCommit(revision.parent)
if parent:
related_change_keys.add(parent.change.key)
for child in session.getRevisionsByParent(revision.commit):
related_change_keys.add(child.change.key)
self.related_change_keys = related_change_keys
class ProjectAddedEvent(UpdateEvent):
def __repr__(self):
return '<ProjectAddedEvent project_key:%s>' % (
self.project_key,)
def __init__(self, project):
self.project_key = project.key
class ChangeAddedEvent(UpdateEvent):
def __repr__(self):
return '<ChangeAddedEvent project_key:%s change_key:%s>' % (
self.project_key, self.change_key)
def __init__(self, change):
self.project_key = change.project.key
self.change_key = change.key
self.related_change_keys = set()
self.review_flag_changed = True
self.status_changed = True
self.held_changed = False
class ChangeUpdatedEvent(UpdateEvent):
def __repr__(self):
return '<ChangeUpdatedEvent project_key:%s change_key:%s review_flag_changed:%s status_changed:%s>' % (
self.project_key, self.change_key, self.review_flag_changed, self.status_changed)
def __init__(self, change):
self.project_key = change.project.key
self.change_key = change.key
self.related_change_keys = set()
self.review_flag_changed = False
self.status_changed = False
self.held_changed = False
class Task(object):
def __init__(self, priority=NORMAL_PRIORITY):
self.log = logging.getLogger('gertty.sync')
self.priority = priority
self.succeeded = None
self.event = threading.Event()
self.tasks = []
self.results = []
def complete(self, success):
self.succeeded = success
self.event.set()
def wait(self, timeout=None):
self.event.wait(timeout)
return self.succeeded
def __eq__(self, other):
raise NotImplementedError()
class SyncOwnAccountTask(Task):
def __repr__(self):
return '<SyncOwnAccountTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('accounts/self')
sync.account_id = remote['_account_id']
with app.db.getSession() as session:
session.getAccountByID(remote['_account_id'],
remote.get('name'),
remote.get('username'),
remote.get('email'))
class SyncProjectListTask(Task):
def __repr__(self):
return '<SyncProjectListTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('projects/?d')
remote_keys = set(remote.keys())
with app.db.getSession() as session:
local = {}
for p in session.getProjects():
local[p.name] = p
local_keys = set(local.keys())
for name in local_keys-remote_keys:
session.delete(local[name])
for name in remote_keys-local_keys:
p = remote[name]
project = session.createProject(name,
description=p.get('description', ''))
self.log.info("Created project %s", project.name)
self.results.append(ProjectAddedEvent(project))
class SyncSubscribedProjectBranchesTask(Task):
def __repr__(self):
return '<SyncSubscribedProjectBranchesTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
projects = session.getProjects(subscribed=True)
for p in projects:
sync.submitTask(SyncProjectBranchesTask(p.name, self.priority))
class SyncProjectBranchesTask(Task):
branch_re = re.compile(r'refs/heads/(.*)')
def __init__(self, project_name, priority=NORMAL_PRIORITY):
super(SyncProjectBranchesTask, self).__init__(priority)
self.project_name = project_name
def __repr__(self):
return '<SyncProjectBranchesTask %s>' % (self.project_name,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_name == self.project_name):
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('projects/%s/branches/' % urllib.quote_plus(self.project_name))
remote_branches = set()
for x in remote:
m = self.branch_re.match(x['ref'])
if m:
remote_branches.add(m.group(1))
with app.db.getSession() as session:
local = {}
project = session.getProjectByName(self.project_name)
for branch in project.branches:
local[branch.name] = branch
local_branches = set(local.keys())
for name in local_branches-remote_branches:
session.delete(local[name])
self.log.info("Deleted branch %s from project %s in local DB.", name, project.name)
for name in remote_branches-local_branches:
project.createBranch(name)
self.log.info("Added branch %s to project %s in local DB.", name, project.name)
class SyncSubscribedProjectsTask(Task):
def __repr__(self):
return '<SyncSubscribedProjectsTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
keys = [p.key for p in session.getProjects(subscribed=True)]
for i in range(0, len(keys), 10):
t = SyncProjectTask(keys[i:i+10], self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = SyncQueriedChangesTask('owner', 'is:owner', self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = SyncQueriedChangesTask('starred', 'is:starred', self.priority)
self.tasks.append(t)
sync.submitTask(t)
class SyncProjectTask(Task):
def __init__(self, project_keys, priority=NORMAL_PRIORITY):
super(SyncProjectTask, self).__init__(priority)
if type(project_keys) == int:
project_keys = [project_keys]
self.project_keys = project_keys
def __repr__(self):
return '<SyncProjectTask %s>' % (self.project_keys,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_keys == self.project_keys):
return True
return False
def run(self, sync):
app = sync.app
now = datetime.datetime.utcnow()
queries = []
with app.db.getSession() as session:
for project_key in self.project_keys:
project = session.getProject(project_key)
query = 'q=project:%s' % project.name
if project.updated:
# Allow 4 seconds for request time, etc.
query += ' -age:%ss' % (int(math.ceil((now-project.updated).total_seconds())) + 4,)
else:
query += ' status:open'
queries.append(query)
changes = []
sortkey = ''
done = False
while not done:
query = '&'.join(queries)
# We don't actually want to limit to 500, but that's the server-side default, and
# if we don't specify this, we won't get a _more_changes flag.
q = 'changes/?n=500%s&%s' % (sortkey, query)
self.log.debug('Query: %s ' % (q,))
responses = sync.get(q)
if len(queries) == 1:
responses = [responses]
done = True
for batch in responses:
changes += batch
if batch and '_more_changes' in batch[-1]:
sortkey = '&N=%s' % (batch[-1]['_sortkey'],)
done = False
change_ids = [c['id'] for c in changes]
with app.db.getSession() as session:
# Winnow the list of IDs to only the ones in the local DB.
change_ids = session.getChangeIDs(change_ids)
for c in changes:
# For now, just sync open changes or changes already
# in the db optionally we could sync all changes ever
if c['id'] in change_ids or (c['status'] not in CLOSED_STATUSES):
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
for key in self.project_keys:
sync.submitTask(SetProjectUpdatedTask(key, now, priority=self.priority))
class SetProjectUpdatedTask(Task):
def __init__(self, project_key, updated, priority=NORMAL_PRIORITY):
super(SetProjectUpdatedTask, self).__init__(priority)
self.project_key = project_key
self.updated = updated
def __repr__(self):
return '<SetProjectUpdatedTask %s %s>' % (self.project_key, self.updated)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_key == self.project_key and
other.updated == self.updated):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
project = session.getProject(self.project_key)
project.updated = self.updated
class SyncQueriedChangesTask(Task):
def __init__(self, query_name, query, priority=NORMAL_PRIORITY):
super(SyncQueriedChangesTask, self).__init__(priority)
self.query_name = query_name
self.query = query
def __repr__(self):
return '<SyncQueriedChangesTask %s>' % self.query_name
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.query_name == self.query_name and
other.query == self.query):
return True
return False
def run(self, sync):
app = sync.app
now = datetime.datetime.utcnow()
with app.db.getSession() as session:
sync_query = session.getSyncQueryByName(self.query_name)
query = 'q=%s' % self.query
if sync_query.updated:
# Allow 4 seconds for request time, etc.
query += ' -age:%ss' % (int(math.ceil((now-sync_query.updated).total_seconds())) + 4,)
else:
query += ' status:open'
for project in session.getProjects(subscribed=True):
query += ' -project:%s' % project.name
changes = []
sortkey = ''
done = False
while not done:
# We don't actually want to limit to 500, but that's the server-side default, and
# if we don't specify this, we won't get a _more_changes flag.
q = 'changes/?n=500%s&%s' % (sortkey, query)
self.log.debug('Query: %s ' % (q,))
batch = sync.get(q)
done = True
if batch:
changes += batch
if '_more_changes' in batch[-1]:
sortkey = '&N=%s' % (batch[-1]['_sortkey'],)
done = False
change_ids = [c['id'] for c in changes]
with app.db.getSession() as session:
# Winnow the list of IDs to only the ones in the local DB.
change_ids = session.getChangeIDs(change_ids)
for c in changes:
# For now, just sync open changes or changes already
# in the db optionally we could sync all changes ever
if c['id'] in change_ids or (c['status'] not in CLOSED_STATUSES):
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
sync.submitTask(SetSyncQueryUpdatedTask(self.query_name, now, priority=self.priority))
class SetSyncQueryUpdatedTask(Task):
def __init__(self, query_name, updated, priority=NORMAL_PRIORITY):
super(SetSyncQueryUpdatedTask, self).__init__(priority)
self.query_name = query_name
self.updated = updated
def __repr__(self):
return '<SetSyncQueryUpdatedTask %s %s>' % (self.query_name, self.updated)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.query_name == self.query_name and
other.updated == self.updated):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
sync_query = session.getSyncQueryByName(self.query_name)
sync_query.updated = self.updated
class SyncChangesByCommitsTask(Task):
def __init__(self, commits, priority=NORMAL_PRIORITY):
super(SyncChangesByCommitsTask, self).__init__(priority)
self.commits = commits
def __repr__(self):
return '<SyncChangesByCommitsTask %s>' % (self.commits,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.commits == self.commits):
return True
return False
def run(self, sync):
query = ' OR '.join(['commit:%s' % x for x in self.commits])
changes = sync.get('changes/?q=%s' % query)
self.log.debug('Query: %s ' % (query,))
for c in changes:
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
self.log.debug("Sync change %s for its commit" % (c['id'],))
def addCommit(self, commit):
if commit in self.commits:
return True
# 100 should be under the URL length limit
if len(self.commits) >= 100:
return False
self.commits.append(commit)
return True
class SyncChangeByNumberTask(Task):
def __init__(self, number, priority=NORMAL_PRIORITY):
super(SyncChangeByNumberTask, self).__init__(priority)
self.number = number
def __repr__(self):
return '<SyncChangeByNumberTask %s>' % (self.number,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.number == self.number):
return True
return False
def run(self, sync):
query = '%s' % self.number
changes = sync.get('changes/?q=%s' % query)
self.log.debug('Query: %s ' % (query,))
for c in changes:
task = SyncChangeTask(c['id'], priority=self.priority)
self.tasks.append(task)
sync.submitTask(task)
self.log.debug("Sync change %s because it is number %s" % (c['id'], self.number))
class SyncChangeTask(Task):
def __init__(self, change_id, force_fetch=False, priority=NORMAL_PRIORITY):
super(SyncChangeTask, self).__init__(priority)
self.change_id = change_id
self.force_fetch = force_fetch
def __repr__(self):
return '<SyncChangeTask %s>' % (self.change_id,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_id == self.change_id and
other.force_fetch == self.force_fetch):
return True
return False
def run(self, sync):
start_time = time.time()
app = sync.app
remote_change = sync.get('changes/%s?o=DETAILED_LABELS&o=ALL_REVISIONS&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&o=CURRENT_ACTIONS&o=ALL_FILES' % self.change_id)
# Perform subqueries this task will need outside of the db session
for remote_commit, remote_revision in remote_change.get('revisions', {}).items():
remote_comments_data = sync.get('changes/%s/revisions/%s/comments' % (self.change_id, remote_commit))
remote_revision['_gertty_remote_comments_data'] = remote_comments_data
fetches = collections.defaultdict(list)
parent_commits = set()
with app.db.getSession() as session:
change = session.getChangeByID(self.change_id)
account = session.getAccountByID(remote_change['owner']['_account_id'],
name=remote_change['owner'].get('name'),
username=remote_change['owner'].get('username'),
email=remote_change['owner'].get('email'))
if not change:
project = session.getProjectByName(remote_change['project'])
created = dateutil.parser.parse(remote_change['created'])
updated = dateutil.parser.parse(remote_change['updated'])
change = project.createChange(remote_change['id'], account, remote_change['_number'],
remote_change['branch'], remote_change['change_id'],
remote_change['subject'], created,
updated, remote_change['status'],
topic=remote_change.get('topic'))
self.log.info("Created new change %s in local DB.", change.id)
result = ChangeAddedEvent(change)
else:
result = ChangeUpdatedEvent(change)
self.results.append(result)
change.owner = account
if change.status != remote_change['status']:
change.status = remote_change['status']
result.status_changed = True
if remote_change.get('starred'):
change.starred = True
else:
change.starred = False
change.subject = remote_change['subject']
change.updated = dateutil.parser.parse(remote_change['updated'])
change.topic = remote_change.get('topic')
repo = gitrepo.get_repo(change.project.name, app.config)
new_revision = False
for remote_commit, remote_revision in remote_change.get('revisions', {}).items():
revision = session.getRevisionByCommit(remote_commit)
# TODO: handle multiple parents
if 'anonymous http' in remote_revision['fetch']:
ref = remote_revision['fetch']['anonymous http']['ref']
url = remote_revision['fetch']['anonymous http']['url']
auth = False
elif 'http' in remote_revision['fetch']:
auth = True
ref = remote_revision['fetch']['http']['ref']
url = list(urlparse.urlsplit(sync.app.config.url + change.project.name))
url[1] = '%s:%s@%s' % (
urllib.quote_plus(sync.app.config.username),
urllib.quote_plus(sync.app.config.password), url[1])
url = urlparse.urlunsplit(url)
elif 'ssh' in remote_revision['fetch']:
ref = remote_revision['fetch']['ssh']['ref']
url = remote_revision['fetch']['ssh']['url']
auth = False
elif 'git' in remote_revision['fetch']:
ref = remote_revision['fetch']['git']['ref']
url = remote_revision['fetch']['git']['url']
auth = False
else:
if len(remote_revision['fetch']):
errMessage = "No supported fetch method found. Server offers: %s" % (
', '.join(remote_revision['fetch'].keys()))
else:
errMessage = "The server is missing the download-commands plugin."
raise Exception(errMessage)
if (not revision) or self.force_fetch:
fetches[url].append('+%(ref)s:%(ref)s' % dict(ref=ref))
if not revision:
revision = change.createRevision(remote_revision['_number'],
remote_revision['commit']['message'], remote_commit,
remote_revision['commit']['parents'][0]['commit'],
auth, ref)
self.log.info("Created new revision %s for change %s revision %s in local DB.",
revision.key, self.change_id, remote_revision['_number'])
new_revision = True
revision.message = remote_revision['commit']['message']
actions = remote_revision.get('actions', {})
revision.can_submit = 'submit' in actions
# TODO: handle multiple parents
if revision.parent not in parent_commits:
parent_revision = session.getRevisionByCommit(revision.parent)
if not parent_revision and change.status not in CLOSED_STATUSES:
sync._syncChangeByCommit(revision.parent, self.priority)
self.log.debug("Change %s revision %s needs parent commit %s synced" %
(change.id, remote_revision['_number'], revision.parent))
parent_commits.add(revision.parent)
result.updateRelatedChanges(session, change)
f = revision.getFile('/COMMIT_MSG')
if f is None:
f = revision.createFile('/COMMIT_MSG', None,
None, None, None)
for remote_path, remote_file in remote_revision['files'].items():
f = revision.getFile(remote_path)
if f is None:
if remote_file.get('binary'):
inserted = deleted = None
else:
inserted = remote_file.get('lines_inserted', 0)
deleted = remote_file.get('lines_deleted', 0)
f = revision.createFile(remote_path, remote_file.get('status', 'M'),
remote_file.get('old_path'),
inserted, deleted)
remote_comments_data = remote_revision['_gertty_remote_comments_data']
for remote_file, remote_comments in remote_comments_data.items():
for remote_comment in remote_comments:
account = session.getAccountByID(remote_comment['author']['_account_id'],
name=remote_comment['author'].get('name'),
username=remote_comment['author'].get('username'),
email=remote_comment['author'].get('email'))
comment = session.getCommentByID(remote_comment['id'])
if not comment:
# Normalize updated -> created
created = dateutil.parser.parse(remote_comment['updated'])
parent = False
if remote_comment.get('side', '') == 'PARENT':
parent = True
fileobj = revision.getFile(remote_file)
comment = fileobj.createComment(remote_comment['id'], account,
remote_comment.get('in_reply_to'),
created,
parent, remote_comment.get('line'),
remote_comment['message'])
self.log.info("Created new comment %s for revision %s in local DB.",
comment.key, revision.key)
else:
if comment.author != account:
comment.author = account
new_message = False
for remote_message in remote_change.get('messages', []):
if 'author' in remote_message:
account = session.getAccountByID(remote_message['author']['_account_id'],
name=remote_message['author'].get('name'),
username=remote_message['author'].get('username'),
email=remote_message['author'].get('email'))
if account.username != app.config.username:
new_message = True
else:
account = session.getSystemAccount()
message = session.getMessageByID(remote_message['id'])
if not message:
revision = session.getRevisionByNumber(change, remote_message.get('_revision_number', 1))
if revision:
# Normalize date -> created
created = dateutil.parser.parse(remote_message['date'])
message = revision.createMessage(remote_message['id'], account, created,
remote_message['message'])
self.log.info("Created new review message %s for revision %s in local DB.", message.key, revision.key)
else:
self.log.info("Unable to create new review message for revision %s because it is not in local DB (draft?).", remote_message.get('_revision_number'))
else:
if message.author != account:
message.author = account
remote_approval_entries = {}
remote_label_entries = {}
user_voted = False
for remote_label_name, remote_label_dict in remote_change.get('labels', {}).items():
for remote_approval in remote_label_dict.get('all', []):
if remote_approval.get('value') is None:
continue
remote_approval['category'] = remote_label_name
key = '%s~%s' % (remote_approval['category'], remote_approval['_account_id'])
remote_approval_entries[key] = remote_approval
if remote_approval['_account_id'] == sync.account_id and int(remote_approval['value']) != 0:
user_voted = True
for key, value in remote_label_dict.get('values', {}).items():
# +1: "LGTM"
label = dict(value=key,
description=value,
category=remote_label_name)
key = '%s~%s~%s' % (label['category'], label['value'], label['description'])
remote_label_entries[key] = label
remote_approval_keys = set(remote_approval_entries.keys())
remote_label_keys = set(remote_label_entries.keys())
local_approvals = {}
local_labels = {}
user_votes = {}
for approval in change.approvals:
if approval.draft and not new_revision:
# If we have a new revision, we need to delete
# draft local approvals because they can no longer
# be uploaded. Otherwise, keep them because we
# may be about to upload a review. Ignoring an
# approval here means it will not be deleted.
# Also keep track of these approvals so we can
# determine whether we should hold the change
# later.
user_votes[approval.category] = approval.value
# Count draft votes as having voted for the
# purposes of deciding whether to clear the
# reviewed flag later.
user_voted = True
continue
key = '%s~%s' % (approval.category, approval.reviewer.id)
if key in local_approvals:
# Delete duplicate approvals.
session.delete(approval)
else:
local_approvals[key] = approval
local_approval_keys = set(local_approvals.keys())
for label in change.labels:
key = '%s~%s~%s' % (label.category, label.value, label.description)
local_labels[key] = label
local_label_keys = set(local_labels.keys())
for key in local_approval_keys-remote_approval_keys:
session.delete(local_approvals[key])
for key in local_label_keys-remote_label_keys:
session.delete(local_labels[key])
for key in remote_approval_keys-local_approval_keys:
remote_approval = remote_approval_entries[key]
account = session.getAccountByID(remote_approval['_account_id'],
name=remote_approval.get('name'),
username=remote_approval.get('username'),
email=remote_approval.get('email'))
change.createApproval(account,
remote_approval['category'],
remote_approval['value'])
self.log.info("Created approval for change %s in local DB.", change.id)
user_value = user_votes.get(remote_approval['category'], 0)
if user_value > 0 and remote_approval['value'] < 0:
# Someone left a negative vote after the local
# user created a draft positive vote. Hold the
# change so that it doesn't look like the local
# user is ignoring negative feedback.
if not change.held:
change.held = True
result.held_changed = True
self.log.info("Setting change %s to held due to negative review after positive", change.id)
for key in remote_label_keys-local_label_keys:
remote_label = remote_label_entries[key]
change.createLabel(remote_label['category'],
remote_label['value'],
remote_label['description'])
for key in remote_approval_keys.intersection(local_approval_keys):
local_approval = local_approvals[key]
remote_approval = remote_approval_entries[key]
local_approval.value = remote_approval['value']
# For the side effect of updating account info:
account = session.getAccountByID(remote_approval['_account_id'],
name=remote_approval.get('name'),
username=remote_approval.get('username'),
email=remote_approval.get('email'))
remote_permitted_entries = {}
for remote_label_name, remote_label_values in remote_change.get('permitted_labels', {}).items():
for remote_label_value in remote_label_values:
remote_label = dict(category=remote_label_name,
value=remote_label_value)
key = '%s~%s' % (remote_label['category'], remote_label['value'])
remote_permitted_entries[key] = remote_label
remote_permitted_keys = set(remote_permitted_entries.keys())
local_permitted = {}
for permitted in change.permitted_labels:
key = '%s~%s' % (permitted.category, permitted.value)
local_permitted[key] = permitted
local_permitted_keys = set(local_permitted.keys())
for key in local_permitted_keys-remote_permitted_keys:
session.delete(local_permitted[key])
for key in remote_permitted_keys-local_permitted_keys:
remote_permitted = remote_permitted_entries[key]
change.createPermittedLabel(remote_permitted['category'],
remote_permitted['value'])
if not user_voted:
# Only consider changing the reviewed state if we don't have a vote
if new_revision or new_message:
if change.reviewed:
change.reviewed = False
result.review_flag_changed = True
for url, refs in fetches.items():
self.log.debug("Fetching from %s with refs %s", url, refs)
try:
repo.fetch(url, refs)
except Exception:
# Backwards compat with GitPython before the multi-ref fetch
# patch.
# (https://github.com/gitpython-developers/GitPython/pull/170)
for ref in refs:
self.log.debug("git fetch %s %s" % (url, ref))
repo.fetch(url, ref)
end_time = time.time()
total_time = end_time - start_time
self.log.info("Synced change %s in %0.5f seconds.", self.change_id, total_time)
class CheckReposTask(Task):
# on startup, check all projects
# for any subscribed project withot a local repo or if
# --fetch-missing-refs is supplied, check all local changes for
# missing refs, and sync the associated changes
def __repr__(self):
return '<CheckReposTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
projects = session.getProjects(subscribed=True)
for project in projects:
try:
missing = False
try:
repo = gitrepo.get_repo(project.name, app.config)
except gitrepo.GitCloneError:
missing = True
if missing or app.fetch_missing_refs:
sync.submitTask(CheckRevisionsTask(project.key,
priority=LOW_PRIORITY))
except Exception:
self.log.exception("Exception checking repo %s" %
(project.name,))
class CheckRevisionsTask(Task):
def __init__(self, project_key, priority=NORMAL_PRIORITY):
super(CheckRevisionsTask, self).__init__(priority)
self.project_key = project_key
def __repr__(self):
return '<CheckRevisionsTask %s>' % (self.project_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_key == self.project_key):
return True
return False
def run(self, sync):
app = sync.app
to_sync = set()
with app.db.getSession() as session:
project = session.getProject(self.project_key)
repo = None
try:
repo = gitrepo.get_repo(project.name, app.config)
except gitrepo.GitCloneError:
pass
for change in project.open_changes:
if repo:
for revision in change.revisions:
if not (repo.hasCommit(revision.parent) and
repo.hasCommit(revision.commit)):
to_sync.add(change.id)
else:
to_sync.add(change.id)
for change_id in to_sync:
sync.submitTask(SyncChangeTask(change_id, priority=self.priority))
class UploadReviewsTask(Task):
def __repr__(self):
return '<UploadReviewsTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
for c in session.getPendingTopics():
sync.submitTask(SetTopicTask(c.key, self.priority))
for c in session.getPendingRebases():
sync.submitTask(RebaseChangeTask(c.key, self.priority))
for c in session.getPendingStatusChanges():
sync.submitTask(ChangeStatusTask(c.key, self.priority))
for c in session.getPendingStarred():
sync.submitTask(ChangeStarredTask(c.key, self.priority))
for c in session.getPendingCherryPicks():
sync.submitTask(SendCherryPickTask(c.key, self.priority))
for r in session.getPendingCommitMessages():
sync.submitTask(ChangeCommitMessageTask(r.key, self.priority))
for m in session.getPendingMessages():
sync.submitTask(UploadReviewTask(m.key, self.priority))
class SetTopicTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(SetTopicTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<SetTopicTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
data = dict(topic=change.topic)
change.pending_topic = False
# Inside db session for rollback
sync.put('changes/%s/topic' % (change.id,),
data)
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class RebaseChangeTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(RebaseChangeTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<RebaseChangeTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
change.pending_rebase = False
# Inside db session for rollback
sync.post('changes/%s/rebase' % (change.id,), {})
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class ChangeStarredTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(ChangeStarredTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<ChangeStarredTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
if change.starred:
sync.put('accounts/self/starred.changes/%s' % (change.id,),
data={})
else:
sync.delete('accounts/self/starred.changes/%s' % (change.id,),
data={})
change.pending_starred = False
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class ChangeStatusTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(ChangeStatusTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<ChangeStatusTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
if change.pending_status_message:
data = dict(message=change.pending_status_message)
else:
data = {}
change.pending_status = False
change.pending_status_message = None
# Inside db session for rollback
if change.status == 'ABANDONED':
sync.post('changes/%s/abandon' % (change.id,),
data)
elif change.status == 'NEW':
sync.post('changes/%s/restore' % (change.id,),
data)
elif change.status == 'SUBMITTED':
sync.post('changes/%s/submit' % (change.id,), {})
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class SendCherryPickTask(Task):
def __init__(self, cp_key, priority=NORMAL_PRIORITY):
super(SendCherryPickTask, self).__init__(priority)
self.cp_key = cp_key
def __repr__(self):
return '<SendCherryPickTask %s>' % (self.cp_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.cp_key == self.cp_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
cp = session.getPendingCherryPick(self.cp_key)
data = dict(message=cp.message,
destination=cp.branch)
session.delete(cp)
# Inside db session for rollback
ret = sync.post('changes/%s/revisions/%s/cherrypick' %
(cp.revision.change.id, cp.revision.commit),
data)
if ret and 'id' in ret:
sync.submitTask(SyncChangeTask(ret['id'], priority=self.priority))
class ChangeCommitMessageTask(Task):
def __init__(self, revision_key, priority=NORMAL_PRIORITY):
super(ChangeCommitMessageTask, self).__init__(priority)
self.revision_key = revision_key
def __repr__(self):
return '<ChangeCommitMessageTask %s>' % (self.revision_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.revision_key == self.revision_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
revision = session.getRevision(self.revision_key)
revision.pending_message = False
data = dict(message=revision.message)
# Inside db session for rollback
sync.post('changes/%s/revisions/%s/message' %
(revision.change.id, revision.commit),
data)
change_id = revision.change.id
sync.submitTask(SyncChangeTask(change_id, priority=self.priority))
class UploadReviewTask(Task):
def __init__(self, message_key, priority=NORMAL_PRIORITY):
super(UploadReviewTask, self).__init__(priority)
self.message_key = message_key
def __repr__(self):
return '<UploadReviewTask %s>' % (self.message_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.message_key == self.message_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
message = session.getMessage(self.message_key)
if message is None:
self.log.debug("Message %s has already been uploaded" % (
self.message_key))
return
change = message.revision.change
if not change.held:
self.log.debug("Syncing %s to find out if it should be held" % (change.id,))
t = SyncChangeTask(change.id)
t.run(sync)
self.results += t.results
submit = False
change_id = None
with app.db.getSession() as session:
message = session.getMessage(self.message_key)
revision = message.revision
change = message.revision.change
if change.held:
self.log.debug("Not uploading review to %s because it is held" %
(change.id,))
return
change_id = change.id
current_revision = change.revisions[-1]
if change.pending_status and change.status == 'SUBMITTED':
submit = True
data = dict(message=message.message,
strict_labels=False)
if revision == current_revision:
data['labels'] = {}
for approval in change.draft_approvals:
data['labels'][approval.category] = approval.value
session.delete(approval)
comments = {}
for file in revision.files:
if file.draft_comments:
comment_list = []
for comment in file.draft_comments:
d = dict(line=comment.line,
message=comment.message)
if comment.parent:
d['side'] = 'PARENT'
comment_list.append(d)
session.delete(comment)
comments[file.path] = comment_list
if comments:
data['comments'] = comments
session.delete(message)
# Inside db session for rollback
sync.post('changes/%s/revisions/%s/review' % (change.id, revision.commit),
data)
if submit:
# In another db session in case submit fails after posting
# the message succeeds
with app.db.getSession() as session:
change = session.getChangeByID(change_id)
change.pending_status = False
change.pending_status_message = None
sync.post('changes/%s/submit' % (change_id,), {})
sync.submitTask(SyncChangeTask(change_id, priority=self.priority))
class PruneDatabaseTask(Task):
def __init__(self, age, priority=NORMAL_PRIORITY):
super(PruneDatabaseTask, self).__init__(priority)
self.age = age
def __repr__(self):
return '<PruneDatabaseTask %s>' % (self.age,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.age == self.age):
return True
return False
def run(self, sync):
if not self.age:
return
app = sync.app
with app.db.getSession() as session:
for change in session.getChanges('status:closed age:%s' % self.age):
t = PruneChangeTask(change.key, priority=self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = VacuumDatabaseTask(priority=self.priority)
self.tasks.append(t)
sync.submitTask(t)
class PruneChangeTask(Task):
def __init__(self, key, priority=NORMAL_PRIORITY):
super(PruneChangeTask, self).__init__(priority)
self.key = key
def __repr__(self):
return '<PruneChangeTask %s>' % (self.key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.key == self.key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.key)
if not change:
return
repo = gitrepo.get_repo(change.project.name, app.config)
self.log.info("Pruning %s change %s status:%s updated:%s" % (
change.project.name, change.number, change.status, change.updated))
change_ref = None
for revision in change.revisions:
if change_ref is None:
change_ref = '/'.join(revision.fetch_ref.split('/')[:-1])
self.log.info("Deleting %s ref %s" % (
change.project.name, revision.fetch_ref))
repo.deleteRef(revision.fetch_ref)
self.log.info("Deleting %s ref %s" % (
change.project.name, change_ref))
try:
repo.deleteRef(change_ref)
except OSError, e:
if e.errno not in [errno.EISDIR, errno.EPERM]:
raise
session.delete(change)
class VacuumDatabaseTask(Task):
def __init__(self, priority=NORMAL_PRIORITY):
super(VacuumDatabaseTask, self).__init__(priority)
def __repr__(self):
return '<VacuumDatabaseTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
session.vacuum()
class Sync(object):
def __init__(self, app):
self.user_agent = 'Gertty/%s %s' % (gertty.version.version_info.release_string(),
requests.utils.default_user_agent())
self.offline = False
self.account_id = None
self.app = app
self.log = logging.getLogger('gertty.sync')
self.queue = MultiQueue([HIGH_PRIORITY, NORMAL_PRIORITY, LOW_PRIORITY])
self.result_queue = Queue.Queue()
self.session = requests.Session()
if self.app.config.auth_type == 'basic':
authclass = requests.auth.HTTPBasicAuth
else:
authclass = requests.auth.HTTPDigestAuth
self.auth = authclass(
self.app.config.username, self.app.config.password)
self.submitTask(SyncOwnAccountTask(HIGH_PRIORITY))
self.submitTask(CheckReposTask(HIGH_PRIORITY))
self.submitTask(UploadReviewsTask(HIGH_PRIORITY))
self.submitTask(SyncProjectListTask(HIGH_PRIORITY))
self.submitTask(SyncSubscribedProjectsTask(NORMAL_PRIORITY))
self.submitTask(SyncSubscribedProjectBranchesTask(LOW_PRIORITY))
self.submitTask(PruneDatabaseTask(self.app.config.expire_age, LOW_PRIORITY))
self.periodic_thread = threading.Thread(target=self.periodicSync)
self.periodic_thread.daemon = True
self.periodic_thread.start()
def periodicSync(self):
hourly = time.time()
while True:
try:
time.sleep(60)
self.syncSubscribedProjects()
now = time.time()
if now-hourly > 3600:
hourly = now
self.pruneDatabase()
except Exception:
self.log.exception('Exception in periodicSync')
def submitTask(self, task):
if not self.offline:
if not self.queue.put(task, task.priority):
task.complete(False)
else:
task.complete(False)
def run(self, pipe):
task = None
while True:
task = self._run(pipe, task)
def _run(self, pipe, task=None):
if not task:
task = self.queue.get()
self.log.debug('Run: %s' % (task,))
try:
task.run(self)
task.complete(True)
except requests.ConnectionError, e:
self.log.warning("Offline due to: %s" % (e,))
if not self.offline:
self.submitTask(UploadReviewsTask(HIGH_PRIORITY))
self.offline = True
self.app.status.update(offline=True, refresh=False)
os.write(pipe, 'refresh\n')
time.sleep(30)
return task
except Exception:
task.complete(False)
self.log.exception('Exception running task %s' % (task,))
self.app.status.update(error=True, refresh=False)
self.offline = False
self.app.status.update(offline=False, refresh=False)
for r in task.results:
self.result_queue.put(r)
os.write(pipe, 'refresh\n')
return None
def url(self, path):
return self.app.config.url + 'a/' + path
def get(self, path):
url = self.url(path)
self.log.debug('GET: %s' % (url,))
r = self.session.get(url,
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'User-Agent': self.user_agent})
if r.status_code == 200:
ret = json.loads(r.text[4:])
if len(ret):
self.log.debug('200 OK, Received: %s' % (ret,))
else:
self.log.debug('200 OK, No body.')
return ret
else:
self.log.warn('HTTP response: %d', r.status_code)
def post(self, path, data):
url = self.url(path)
self.log.debug('POST: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.post(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.log.debug('Received: %s' % (r.text,))
ret = None
if r.text and len(r.text)>4:
try:
ret = json.loads(r.text[4:])
except Exception:
self.log.exception("Unable to parse result %s from post to %s" %
(r.text, url))
return ret
def put(self, path, data):
url = self.url(path)
self.log.debug('PUT: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.put(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.log.debug('Received: %s' % (r.text,))
def delete(self, path, data):
url = self.url(path)
self.log.debug('DELETE: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.delete(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.log.debug('Received: %s' % (r.text,))
def syncSubscribedProjects(self):
task = SyncSubscribedProjectsTask(LOW_PRIORITY)
self.submitTask(task)
if task.wait():
for subtask in task.tasks:
subtask.wait()
def pruneDatabase(self):
task = PruneDatabaseTask(self.app.config.expire_age, LOW_PRIORITY)
self.submitTask(task)
if task.wait():
for subtask in task.tasks:
subtask.wait()
def _syncChangeByCommit(self, commit, priority):
# Accumulate sync change by commit tasks because they often
# come in batches. This method assumes it is being called
# from within the run queue already and therefore does not
# need to worry about locking the queue.
task = None
for task in self.queue.find(SyncChangesByCommitsTask, priority):
if task.addCommit(commit):
return
task = SyncChangesByCommitsTask([commit], priority)
self.submitTask(task)
|
|
"""Representation of a deCONZ gateway."""
import asyncio
import async_timeout
from pydeconz import DeconzSession, errors
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_ALLOW_NEW_DEVICES,
CONF_MASTER_GATEWAY,
DEFAULT_ALLOW_CLIP_SENSOR,
DEFAULT_ALLOW_DECONZ_GROUPS,
DEFAULT_ALLOW_NEW_DEVICES,
DOMAIN as DECONZ_DOMAIN,
LOGGER,
NEW_GROUP,
NEW_LIGHT,
NEW_SCENE,
NEW_SENSOR,
PLATFORMS,
)
from .deconz_event import async_setup_events, async_unload_events
from .errors import AuthenticationRequired, CannotConnect
@callback
def get_gateway_from_config_entry(hass, config_entry):
"""Return gateway with a matching config entry ID."""
return hass.data[DECONZ_DOMAIN][config_entry.entry_id]
class DeconzGateway:
"""Manages a single deCONZ gateway."""
def __init__(self, hass, config_entry) -> None:
"""Initialize the system."""
self.hass = hass
self.config_entry = config_entry
self.api = None
self.available = True
self.ignore_state_updates = False
self.deconz_ids = {}
self.entities = {}
self.events = []
@property
def bridgeid(self) -> str:
"""Return the unique identifier of the gateway."""
return self.config_entry.unique_id
@property
def host(self) -> str:
"""Return the host of the gateway."""
return self.config_entry.data[CONF_HOST]
@property
def master(self) -> bool:
"""Gateway which is used with deCONZ services without defining id."""
return self.config_entry.options[CONF_MASTER_GATEWAY]
# Options
@property
def option_allow_clip_sensor(self) -> bool:
"""Allow loading clip sensor from gateway."""
return self.config_entry.options.get(
CONF_ALLOW_CLIP_SENSOR, DEFAULT_ALLOW_CLIP_SENSOR
)
@property
def option_allow_deconz_groups(self) -> bool:
"""Allow loading deCONZ groups from gateway."""
return self.config_entry.options.get(
CONF_ALLOW_DECONZ_GROUPS, DEFAULT_ALLOW_DECONZ_GROUPS
)
@property
def option_allow_new_devices(self) -> bool:
"""Allow automatic adding of new devices."""
return self.config_entry.options.get(
CONF_ALLOW_NEW_DEVICES, DEFAULT_ALLOW_NEW_DEVICES
)
# Signals
@property
def signal_reachable(self) -> str:
"""Gateway specific event to signal a change in connection status."""
return f"deconz-reachable-{self.bridgeid}"
@callback
def async_signal_new_device(self, device_type) -> str:
"""Gateway specific event to signal new device."""
new_device = {
NEW_GROUP: f"deconz_new_group_{self.bridgeid}",
NEW_LIGHT: f"deconz_new_light_{self.bridgeid}",
NEW_SCENE: f"deconz_new_scene_{self.bridgeid}",
NEW_SENSOR: f"deconz_new_sensor_{self.bridgeid}",
}
return new_device[device_type]
# Callbacks
@callback
def async_connection_status_callback(self, available) -> None:
"""Handle signals of gateway connection status."""
self.available = available
self.ignore_state_updates = False
async_dispatcher_send(self.hass, self.signal_reachable, True)
@callback
def async_add_device_callback(
self, device_type, device=None, force: bool = False
) -> None:
"""Handle event of new device creation in deCONZ."""
if not force and not self.option_allow_new_devices:
return
args = []
if device is not None and not isinstance(device, list):
args.append([device])
async_dispatcher_send(
self.hass,
self.async_signal_new_device(device_type),
*args, # Don't send device if None, it would override default value in listeners
)
async def async_update_device_registry(self) -> None:
"""Update device registry."""
device_registry = await self.hass.helpers.device_registry.async_get_registry()
# Host device
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, self.api.config.mac)},
)
# Gateway service
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
identifiers={(DECONZ_DOMAIN, self.api.config.bridgeid)},
manufacturer="Dresden Elektronik",
model=self.api.config.modelid,
name=self.api.config.name,
sw_version=self.api.config.swversion,
via_device=(CONNECTION_NETWORK_MAC, self.api.config.mac),
)
async def async_setup(self) -> bool:
"""Set up a deCONZ gateway."""
try:
self.api = await get_gateway(
self.hass,
self.config_entry.data,
self.async_add_device_callback,
self.async_connection_status_callback,
)
except CannotConnect as err:
raise ConfigEntryNotReady from err
except AuthenticationRequired as err:
raise ConfigEntryAuthFailed from err
self.hass.config_entries.async_setup_platforms(self.config_entry, PLATFORMS)
await async_setup_events(self)
self.api.start()
self.config_entry.add_update_listener(self.async_config_entry_updated)
return True
@staticmethod
async def async_config_entry_updated(hass, entry) -> None:
"""Handle signals of config entry being updated.
This is a static method because a class method (bound method), can not be used with weak references.
Causes for this is either discovery updating host address or config entry options changing.
"""
gateway = get_gateway_from_config_entry(hass, entry)
if gateway.api.host != gateway.host:
gateway.api.close()
gateway.api.host = gateway.host
gateway.api.start()
return
await gateway.options_updated()
async def options_updated(self):
"""Manage entities affected by config entry options."""
deconz_ids = []
if self.option_allow_clip_sensor:
self.async_add_device_callback(NEW_SENSOR)
else:
deconz_ids += [
sensor.deconz_id
for sensor in self.api.sensors.values()
if sensor.type.startswith("CLIP")
]
if self.option_allow_deconz_groups:
self.async_add_device_callback(NEW_GROUP)
else:
deconz_ids += [group.deconz_id for group in self.api.groups.values()]
entity_registry = await self.hass.helpers.entity_registry.async_get_registry()
for entity_id, deconz_id in self.deconz_ids.items():
if deconz_id in deconz_ids and entity_registry.async_is_registered(
entity_id
):
# Removing an entity from the entity registry will also remove them
# from Home Assistant
entity_registry.async_remove(entity_id)
@callback
def shutdown(self, event) -> None:
"""Wrap the call to deconz.close.
Used as an argument to EventBus.async_listen_once.
"""
self.api.close()
async def async_reset(self):
"""Reset this gateway to default state."""
self.api.async_connection_status_callback = None
self.api.close()
await self.hass.config_entries.async_unload_platforms(
self.config_entry, PLATFORMS
)
async_unload_events(self)
self.deconz_ids = {}
return True
async def get_gateway(
hass, config, async_add_device_callback, async_connection_status_callback
) -> DeconzSession:
"""Create a gateway object and verify configuration."""
session = aiohttp_client.async_get_clientsession(hass)
deconz = DeconzSession(
session,
config[CONF_HOST],
config[CONF_PORT],
config[CONF_API_KEY],
async_add_device=async_add_device_callback,
connection_status=async_connection_status_callback,
)
try:
with async_timeout.timeout(10):
await deconz.initialize()
return deconz
except errors.Unauthorized as err:
LOGGER.warning("Invalid key for deCONZ at %s", config[CONF_HOST])
raise AuthenticationRequired from err
except (asyncio.TimeoutError, errors.RequestError) as err:
LOGGER.error("Error connecting to deCONZ gateway at %s", config[CONF_HOST])
raise CannotConnect from err
|
|
# Copyright 2015-2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import datetime
import a_sync
import asynctest
import mock
import pytest
import paasta_tools.instance.kubernetes as pik
from paasta_tools import utils
from tests.conftest import Struct
from tests.conftest import wrap_value_in_task
@pytest.fixture
def mock_pod():
return Struct(
metadata=Struct(
owner_references=[Struct(kind="ReplicaSet", name="replicaset_1")],
name="pod_1",
namespace="paasta",
creation_timestamp=datetime.datetime(2021, 3, 6),
deletion_timestamp=None,
labels={
"paasta.yelp.com/git_sha": "aaa000",
"paasta.yelp.com/config_sha": "config000",
"paasta.yelp.com/service": "service",
"paasta.yelp.com/instance": "instance",
},
),
status=Struct(
pod_ip="1.2.3.4",
host_ip="4.3.2.1",
phase="Running",
reason=None,
message=None,
conditions=[
Struct(type="Ready", status="True",),
Struct(type="PodScheduled", status="True",),
],
container_statuses=[
Struct(
name="main_container",
restart_count=0,
state=Struct(
running=Struct(
reason="a_state_reason",
message="a_state_message",
started_at=datetime.datetime(2021, 3, 6),
),
waiting=None,
terminated=None,
),
last_state=Struct(
running=None,
waiting=None,
terminated=dict(
reason="a_last_state_reason",
message="a_last_state_message",
started_at=datetime.datetime(2021, 3, 4),
finished_at=datetime.datetime(2021, 3, 5),
),
),
),
],
),
spec=Struct(
containers=[
Struct(
name="main_container",
liveness_probe=Struct(
initial_delay_seconds=1,
failure_threshold=2,
period_seconds=3,
timeout_seconds=4,
http_get=Struct(port=8080, path="/healthcheck",),
),
)
]
),
)
def test_instance_types_integrity():
for it in pik.INSTANCE_TYPES:
assert it in utils.INSTANCE_TYPES
for it in pik.INSTANCE_TYPES_WITH_SET_STATE:
assert it in utils.INSTANCE_TYPES
def instance_status_kwargs():
return dict(
service="",
instance="",
instance_type="",
verbose=0,
include_smartstack=False,
include_envoy=False,
settings=mock.Mock(),
use_new=False,
)
@mock.patch("paasta_tools.instance.kubernetes.cr_status", autospec=True)
@mock.patch("paasta_tools.instance.kubernetes.kubernetes_status", autospec=True)
def test_instance_status_invalid_instance_type(mock_kubernetes_status, mock_cr_status):
kwargs = instance_status_kwargs()
with pytest.raises(RuntimeError) as excinfo:
pik.instance_status(**kwargs)
assert "Unknown instance type" in str(excinfo.value)
assert len(mock_cr_status.mock_calls) == 0
assert len(mock_kubernetes_status.mock_calls) == 0
@mock.patch("paasta_tools.instance.kubernetes.cr_status", autospec=True)
@mock.patch("paasta_tools.instance.kubernetes.kubernetes_status", autospec=True)
def test_instance_status_kubernetes_only(mock_kubernetes_status, mock_cr_status):
kwargs = instance_status_kwargs()
kwargs.update(instance_type="kubernetes")
pik.instance_status(**kwargs)
assert len(mock_cr_status.mock_calls) == 0
assert len(mock_kubernetes_status.mock_calls) == 1
@mock.patch("paasta_tools.instance.kubernetes.cr_status", autospec=True)
@mock.patch("paasta_tools.instance.kubernetes.kubernetes_status", autospec=True)
def test_instance_status_cr_only(mock_kubernetes_status, mock_cr_status):
kwargs = instance_status_kwargs()
kwargs.update(instance_type="flink")
pik.instance_status(**kwargs)
assert len(mock_cr_status.mock_calls) == 1
assert len(mock_kubernetes_status.mock_calls) == 0
@mock.patch("paasta_tools.instance.kubernetes.cr_status", autospec=True)
@mock.patch("paasta_tools.instance.kubernetes.kubernetes_status", autospec=True)
def test_instance_status_cr_and_kubernetes(mock_kubernetes_status, mock_cr_status):
kwargs = instance_status_kwargs()
kwargs.update(instance_type="cassandracluster")
pik.instance_status(**kwargs)
assert len(mock_cr_status.mock_calls) == 1
assert len(mock_kubernetes_status.mock_calls) == 1
def test_kubernetes_status():
with asynctest.patch(
"paasta_tools.instance.kubernetes.job_status", autospec=True,
), asynctest.patch(
"paasta_tools.kubernetes_tools.replicasets_for_service_instance", autospec=True,
) as mock_replicasets_for_service_instance, asynctest.patch(
"paasta_tools.kubernetes_tools.pods_for_service_instance", autospec=True,
) as mock_pods_for_service_instance, asynctest.patch(
"paasta_tools.kubernetes_tools.get_kubernetes_app_by_name", autospec=True,
), asynctest.patch(
"paasta_tools.instance.kubernetes.LONG_RUNNING_INSTANCE_TYPE_HANDLERS",
autospec=True,
) as mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS:
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS["flink"] = mock.Mock()
mock_pods_for_service_instance.return_value = []
mock_replicasets_for_service_instance.return_value = []
status = pik.kubernetes_status(
service="",
instance="",
verbose=0,
include_smartstack=False,
include_envoy=False,
instance_type="flink",
settings=mock.Mock(),
)
assert "app_count" in status
assert "evicted_count" in status
assert "bounce_method" in status
assert "desired_state" in status
class TestKubernetesStatusV2:
@pytest.fixture
def mock_pods_for_service_instance(self):
with asynctest.patch(
"paasta_tools.kubernetes_tools.pods_for_service_instance", autospec=True
) as mock_pods_for_service_instance:
yield mock_pods_for_service_instance
@pytest.fixture
def mock_replicasets_for_service_instance(self):
with asynctest.patch(
"paasta_tools.kubernetes_tools.replicasets_for_service_instance",
autospec=True,
) as mock_replicasets_for_service_instance:
yield mock_replicasets_for_service_instance
@pytest.fixture
def mock_mesh_status(self):
with asynctest.patch(
"paasta_tools.instance.kubernetes.mesh_status", autospec=True,
) as mock_mesh_status:
yield mock_mesh_status
@pytest.fixture
def mock_load_service_namespace_config(self):
with asynctest.patch(
"paasta_tools.kubernetes_tools.load_service_namespace_config",
autospec=True,
) as mock_load_service_namespace_config:
yield mock_load_service_namespace_config
@pytest.fixture
def mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS(self):
with asynctest.patch(
"paasta_tools.instance.kubernetes.LONG_RUNNING_INSTANCE_TYPE_HANDLERS",
autospec=True,
) as mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS:
yield mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS
@pytest.fixture
def mock_controller_revisions_for_service_instance(self):
with asynctest.patch(
"paasta_tools.kubernetes_tools.controller_revisions_for_service_instance",
autospec=True,
) as mock_controller_revisions_for_service_instance:
yield mock_controller_revisions_for_service_instance
@pytest.fixture
def mock_get_pod_event_messages(self):
with asynctest.patch(
"paasta_tools.instance.kubernetes.get_pod_event_messages", autospec=True,
) as mock_get_pod_event_messages:
yield mock_get_pod_event_messages
def test_replicaset(
self,
mock_replicasets_for_service_instance,
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS,
mock_load_service_namespace_config,
mock_pods_for_service_instance,
mock_mesh_status,
mock_get_pod_event_messages,
mock_pod,
):
mock_job_config = mock.Mock(get_persistent_volumes=mock.Mock(return_value=[]),)
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS[
"kubernetes"
].loader.return_value = mock_job_config
mock_replicasets_for_service_instance.return_value = [
Struct(
spec=Struct(replicas=1),
metadata=Struct(
name="replicaset_1",
creation_timestamp=datetime.datetime(2021, 3, 5),
deletion_timestamp=None,
labels={
"paasta.yelp.com/git_sha": "aaa000",
"paasta.yelp.com/config_sha": "config000",
},
),
),
]
mock_pods_for_service_instance.return_value = [mock_pod]
mock_load_service_namespace_config.return_value = {}
mock_job_config.get_registrations.return_value = ["service.instance"]
mock_get_pod_event_messages.return_value = []
status = pik.kubernetes_status_v2(
service="service",
instance="instance",
verbose=0,
include_smartstack=False,
include_envoy=False,
instance_type="kubernetes",
settings=mock.Mock(),
)
assert status == {
"app_name": mock_job_config.get_sanitised_deployment_name.return_value,
"desired_state": mock_job_config.get_desired_state.return_value,
"desired_instances": mock_job_config.get_instances.return_value,
"bounce_method": mock_job_config.get_bounce_method.return_value,
"versions": [
{
"type": "ReplicaSet",
"name": "replicaset_1",
"replicas": 1,
"ready_replicas": 0,
"create_timestamp": datetime.datetime(2021, 3, 5).timestamp(),
"git_sha": "aaa000",
"config_sha": "config000",
"pods": [
{
"name": "pod_1",
"ip": "1.2.3.4",
"create_timestamp": datetime.datetime(
2021, 3, 6
).timestamp(),
"delete_timestamp": None,
"host": "4.3.2.1",
"phase": "Running",
"reason": None,
"message": None,
"scheduled": True,
"ready": True,
"mesh_ready": None,
"events": [],
"containers": [
{
"healthcheck_grace_period": 1,
"healthcheck_cmd": {
"http_url": "http://1.2.3.4:8080/healthcheck"
},
"name": "main_container",
"restart_count": 0,
"state": "running",
"reason": "a_state_reason",
"message": "a_state_message",
"last_state": "terminated",
"last_reason": "a_last_state_reason",
"last_message": "a_last_state_message",
"last_duration": 86400.0,
"last_timestamp": datetime.datetime(
2021, 3, 4
).timestamp(),
"previous_tail_lines": None,
"timestamp": datetime.datetime(
2021, 3, 6
).timestamp(),
"tail_lines": {
"error_message": "",
"stderr": [],
"stdout": [],
},
},
],
},
],
}
],
}
def test_statefulset(
self,
mock_controller_revisions_for_service_instance,
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS,
mock_load_service_namespace_config,
mock_pods_for_service_instance,
mock_mesh_status,
mock_pod,
):
mock_job_config = mock.Mock(
get_persistent_volumes=mock.Mock(return_value=[mock.Mock]),
)
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS[
"kubernetes"
].loader.return_value = mock_job_config
mock_controller_revisions_for_service_instance.return_value = [
Struct(
metadata=Struct(
name="controller_revision_1",
creation_timestamp=datetime.datetime(2021, 4, 1),
labels={
"paasta.yelp.com/git_sha": "aaa000",
"paasta.yelp.com/config_sha": "config000",
},
),
),
]
mock_pod.metadata.owner_references = []
mock_pods_for_service_instance.return_value = [mock_pod]
with asynctest.patch(
"paasta_tools.instance.kubernetes.get_pod_status", autospec=True
) as mock_get_pod_status:
mock_get_pod_status.return_value = {}
status = pik.kubernetes_status_v2(
service="service",
instance="instance",
verbose=0,
include_smartstack=False,
include_envoy=False,
instance_type="kubernetes",
settings=mock.Mock(),
)
assert len(status["versions"]) == 1
assert status["versions"][0] == {
"name": "controller_revision_1",
"type": "ControllerRevision",
"replicas": 1,
"ready_replicas": 1,
"create_timestamp": datetime.datetime(2021, 4, 1).timestamp(),
"git_sha": "aaa000",
"config_sha": "config000",
"pods": [mock.ANY],
}
def test_event_timeout(
self,
mock_replicasets_for_service_instance,
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS,
mock_load_service_namespace_config,
mock_pods_for_service_instance,
mock_mesh_status,
mock_get_pod_event_messages,
mock_pod,
):
mock_job_config = mock.Mock(get_persistent_volumes=mock.Mock(return_value=[]),)
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS[
"kubernetes"
].loader.return_value = mock_job_config
mock_replicasets_for_service_instance.return_value = [
Struct(
spec=Struct(replicas=1),
metadata=Struct(
name="replicaset_1",
creation_timestamp=datetime.datetime(2021, 3, 5),
deletion_timestamp=None,
labels={
"paasta.yelp.com/git_sha": "aaa000",
"paasta.yelp.com/config_sha": "config000",
},
),
),
]
mock_pods_for_service_instance.return_value = [mock_pod]
mock_load_service_namespace_config.return_value = {}
mock_job_config.get_registrations.return_value = ["service.instance"]
mock_get_pod_event_messages.side_effect = asyncio.TimeoutError
status = pik.kubernetes_status_v2(
service="service",
instance="instance",
verbose=0,
include_smartstack=False,
include_envoy=False,
instance_type="kubernetes",
settings=mock.Mock(),
)
# Verify we did not throw an exception
assert status
assert all(
p["events"] == [{"error": "Could not retrieve events. Please try again."}]
for p in status["versions"][0]["pods"]
)
def test_pod_timeout(
self,
mock_replicasets_for_service_instance,
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS,
mock_load_service_namespace_config,
mock_pods_for_service_instance,
mock_mesh_status,
mock_get_pod_event_messages,
mock_pod,
):
mock_job_config = mock.Mock(get_persistent_volumes=mock.Mock(return_value=[]),)
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS[
"kubernetes"
].loader.return_value = mock_job_config
mock_replicasets_for_service_instance.return_value = [
Struct(
spec=Struct(replicas=1),
metadata=Struct(
name="replicaset_1",
creation_timestamp=datetime.datetime(2021, 3, 5),
deletion_timestamp=None,
labels={
"paasta.yelp.com/git_sha": "aaa000",
"paasta.yelp.com/config_sha": "config000",
},
),
),
]
mock_load_service_namespace_config.return_value = {}
mock_job_config.get_registrations.return_value = ["service.instance"]
mock_get_pod_event_messages.return_value = []
mock_pods_for_service_instance.side_effect = asyncio.TimeoutError
status = pik.kubernetes_status_v2(
service="service",
instance="instance",
verbose=0,
include_smartstack=False,
include_envoy=False,
instance_type="kubernetes",
settings=mock.Mock(),
)
# Verify we did not throw an exception
assert status
assert "Could not fetch instance data" in status["error_message"]
@mock.patch("paasta_tools.kubernetes_tools.get_kubernetes_app_by_name", autospec=True)
def test_job_status_include_replicaset_non_verbose(mock_get_kubernetes_app_by_name):
kstatus = {}
a_sync.block(
pik.job_status,
kstatus=kstatus,
client=mock.Mock(),
job_config=mock.Mock(),
pod_list=[],
replicaset_list=[mock.Mock(), mock.Mock(), mock.Mock()],
verbose=0,
namespace=mock.Mock(),
)
assert len(kstatus["replicasets"]) == 3
def test_kubernetes_status_include_smartstack():
with asynctest.patch(
"paasta_tools.instance.kubernetes.job_status", autospec=True,
), asynctest.patch(
"paasta_tools.kubernetes_tools.load_service_namespace_config", autospec=True
) as mock_load_service_namespace_config, asynctest.patch(
"paasta_tools.instance.kubernetes.mesh_status", autospec=True,
) as mock_mesh_status, asynctest.patch(
"paasta_tools.kubernetes_tools.replicasets_for_service_instance", autospec=True
) as mock_replicasets_for_service_instance, asynctest.patch(
"paasta_tools.kubernetes_tools.pods_for_service_instance", autospec=True,
) as mock_pods_for_service_instance, asynctest.patch(
"paasta_tools.kubernetes_tools.get_kubernetes_app_by_name", autospec=True,
), asynctest.patch(
"paasta_tools.instance.kubernetes.LONG_RUNNING_INSTANCE_TYPE_HANDLERS",
autospec=True,
) as mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS:
mock_load_service_namespace_config.return_value = {"proxy_port": 1234}
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS["flink"] = mock.Mock()
mock_pods_for_service_instance.return_value = []
mock_replicasets_for_service_instance.return_value = []
mock_service = mock.Mock()
status = pik.kubernetes_status(
service=mock_service,
instance="",
verbose=0,
include_smartstack=True,
include_envoy=False,
instance_type="flink",
settings=mock.Mock(),
)
assert (
mock_load_service_namespace_config.mock_calls[0][2]["service"]
is mock_service
)
assert mock_mesh_status.mock_calls[0][2]["service"] is mock_service
assert "app_count" in status
assert "evicted_count" in status
assert "bounce_method" in status
assert "desired_state" in status
def test_cr_status_bad_instance_type():
with pytest.raises(RuntimeError) as excinfo:
pik.cr_status(
service="",
instance="",
verbose=0,
instance_type="marathon",
kube_client=mock.Mock(),
)
assert "Unknown instance type" in str(excinfo.value)
@mock.patch("paasta_tools.kubernetes_tools.get_cr", autospec=True)
def test_cr_status_happy_path(mock_get_cr):
mock_status = mock.Mock()
mock_metadata = mock.Mock()
mock_return = dict(status=mock_status, metadata=mock_metadata)
mock_get_cr.return_value = mock_return
status = pik.cr_status(
service="",
instance="",
verbose=0,
instance_type="flink",
kube_client=mock.Mock(),
)
assert status == mock_return
def test_set_cr_desired_state_invalid_instance_type():
with pytest.raises(RuntimeError) as excinfo:
pik.set_cr_desired_state(
kube_client=mock.Mock(),
service=mock.Mock(),
instance=mock.Mock(),
instance_type="marathon",
desired_state=mock.Mock(),
)
assert "Unknown instance type" in str(excinfo.value)
@mock.patch("paasta_tools.kubernetes_tools.set_cr_desired_state", autospec=True)
def test_set_cr_desired_state_calls_k8s_tools(mock_set_cr_desired_state):
pik.set_cr_desired_state(
kube_client=mock.Mock(),
service=mock.Mock(),
instance=mock.Mock(),
instance_type="flink",
desired_state=mock.Mock(),
)
assert len(mock_set_cr_desired_state.mock_calls) == 1
def test_can_set_state():
for it in pik.INSTANCE_TYPES_WITH_SET_STATE:
assert pik.can_set_state(it)
assert not pik.can_set_state("marathon")
def test_can_handle():
for it in pik.INSTANCE_TYPES:
assert pik.can_handle(it)
assert not pik.can_handle("marathon")
def test_filter_actually_running_replicasets():
replicaset_list = [
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
]
# the `spec` kwarg is special to Mock so we have to set it this way.
replicaset_list[0].configure_mock(
**{"spec.replicas": 5, "status.ready_replicas": 5}
)
replicaset_list[1].configure_mock(
**{"spec.replicas": 5, "status.ready_replicas": 0}
)
replicaset_list[2].configure_mock(
**{"spec.replicas": 0, "status.ready_replicas": 0}
)
replicaset_list[3].configure_mock(
**{"spec.replicas": 0, "status.ready_replicas": 5}
)
expected = [
replicaset_list[0],
replicaset_list[1],
replicaset_list[3],
]
assert pik.filter_actually_running_replicasets(replicaset_list) == expected
@pytest.mark.asyncio
async def test_get_pod_status_mesh_ready(event_loop):
with asynctest.patch(
"paasta_tools.instance.kubernetes.get_pod_containers", autospec=True
) as mock_get_pod_containers, asynctest.patch(
"paasta_tools.kubernetes_tools.is_pod_scheduled", autospec=True
) as mock_is_pod_scheduled, asynctest.patch(
"paasta_tools.kubernetes_tools.get_pod_event_messages", autospec=True
) as mock_get_pod_event_messages:
mock_get_pod_containers.return_value = []
mock_get_pod_event_messages.return_value = []
mock_is_pod_scheduled.return_value = True
mock_pod = mock.MagicMock()
mock_pod.status.pod_ip = "1.2.3.4"
mock_ready_condition = mock.MagicMock()
mock_ready_condition.type = "Ready"
mock_ready_condition.status = "True"
mock_kube_client = mock.MagicMock()
mock_pod.status.conditions = [mock_ready_condition]
backends_task = wrap_value_in_task([{"address": "0.0.0.0"}])
status = await pik.get_pod_status(mock_pod, backends_task, mock_kube_client, 10)
assert status["ready"]
assert not status["mesh_ready"]
@pytest.mark.parametrize(
"include_smartstack,include_envoy,expected",
[
(True, True, ("smartstack", "envoy")),
(True, False, ("smartstack",)),
(False, True, ("envoy",)),
],
)
def test_kubernetes_mesh_status(
include_smartstack, include_envoy, expected,
):
with asynctest.patch(
"paasta_tools.kubernetes_tools.load_service_namespace_config", autospec=True
) as mock_load_service_namespace_config, asynctest.patch(
"paasta_tools.instance.kubernetes.mesh_status", autospec=True
) as mock_mesh_status, asynctest.patch(
"paasta_tools.kubernetes_tools.pods_for_service_instance", autospec=True
) as mock_pods_for_service_instance, asynctest.patch(
"paasta_tools.instance.kubernetes.LONG_RUNNING_INSTANCE_TYPE_HANDLERS",
{"flink": mock.Mock()},
autospec=False,
):
mock_load_service_namespace_config.return_value = {"proxy_port": 1234}
mock_pods_for_service_instance.return_value = ["pod_1"]
mock_job_config = pik.LONG_RUNNING_INSTANCE_TYPE_HANDLERS[
"flink"
].loader.return_value
mock_settings = mock.Mock()
kmesh = pik.kubernetes_mesh_status(
service="fake_service",
instance="fake_instance",
instance_type="flink",
settings=mock_settings,
include_smartstack=include_smartstack,
include_envoy=include_envoy,
)
assert len(kmesh) == len(expected)
for i in range(len(expected)):
mesh_type = expected[i]
assert kmesh.get(mesh_type) == mock_mesh_status.return_value
assert mock_mesh_status.call_args_list[i] == mock.call(
service="fake_service",
instance=mock_job_config.get_nerve_namespace.return_value,
job_config=mock_job_config,
service_namespace_config={"proxy_port": 1234},
pods_task=mock.ANY,
should_return_individual_backends=True,
settings=mock_settings,
service_mesh=getattr(pik.ServiceMesh, mesh_type.upper()),
)
_, kwargs = mock_mesh_status.call_args_list[i]
assert kwargs["pods_task"].result() == ["pod_1"]
@mock.patch(
"paasta_tools.kubernetes_tools.load_service_namespace_config", autospec=True
)
@mock.patch("paasta_tools.instance.kubernetes.mesh_status", autospec=True)
@mock.patch(
"paasta_tools.kubernetes_tools.pods_for_service_instance",
mock.Mock(return_value=("pod_1")),
autospec=False,
)
@mock.patch(
"paasta_tools.instance.kubernetes.LONG_RUNNING_INSTANCE_TYPE_HANDLERS",
{"flink": mock.Mock()},
autospec=False,
)
@pytest.mark.parametrize(
"include_mesh,inst_type,service_ns_conf,expected_msg",
[
(False, "flink", {"proxy_port": 1234}, "No mesh types"),
(True, "tron", {"proxy_port": 1234}, "not supported"),
(True, "flink", {}, "not configured"),
],
)
def test_kubernetes_mesh_status_error(
mock_mesh_status,
mock_load_service_namespace_config,
include_mesh,
inst_type,
service_ns_conf,
expected_msg,
):
mock_load_service_namespace_config.return_value = service_ns_conf
mock_settings = mock.Mock()
with pytest.raises(RuntimeError) as excinfo:
pik.kubernetes_mesh_status(
service="fake_service",
instance="fake_instance",
instance_type=inst_type,
settings=mock_settings,
include_smartstack=include_mesh,
include_envoy=include_mesh,
)
assert expected_msg in excinfo.value.args[0]
assert mock_mesh_status.call_args_list == []
def test_bounce_status():
with asynctest.patch(
"paasta_tools.instance.kubernetes.kubernetes_tools", autospec=True
) as mock_kubernetes_tools:
mock_config = mock_kubernetes_tools.load_kubernetes_service_config.return_value
mock_kubernetes_tools.get_kubernetes_app_deploy_status.return_value = (
"deploy_status",
"message",
)
mock_kubernetes_tools.get_active_shas_for_service.return_value = [
("aaa", "config_aaa"),
("bbb", "config_bbb"),
]
mock_settings = mock.Mock()
status = pik.bounce_status("fake_service", "fake_instance", mock_settings)
assert status == {
"expected_instance_count": mock_config.get_instances.return_value,
"desired_state": mock_config.get_desired_state.return_value,
"running_instance_count": mock_kubernetes_tools.get_kubernetes_app_by_name.return_value.status.ready_replicas,
"deploy_status": mock_kubernetes_tools.KubernetesDeployStatus.tostring.return_value,
"active_shas": [("aaa", "config_aaa"), ("bbb", "config_bbb"),],
"app_count": 2,
}
@pytest.mark.asyncio
async def test_get_pod_containers(mock_pod):
mock_client = mock.Mock()
with asynctest.patch(
"paasta_tools.instance.kubernetes.get_tail_lines_for_kubernetes_container",
side_effect=[["current"], ["previous"]],
autospec=None,
), mock.patch(
"paasta_tools.kubernetes_tools.recent_container_restart",
return_value=True,
autospec=None,
):
containers = await pik.get_pod_containers(mock_pod, mock_client, 10)
assert containers == [
dict(
name="main_container",
restart_count=0,
state="running",
reason="a_state_reason",
message="a_state_message",
last_state="terminated",
last_reason="a_last_state_reason",
last_message="a_last_state_message",
last_duration=86400.0,
last_timestamp=datetime.datetime(2021, 3, 4).timestamp(),
previous_tail_lines=["previous"],
timestamp=datetime.datetime(2021, 3, 6).timestamp(),
healthcheck_grace_period=1,
healthcheck_cmd={"http_url": "http://1.2.3.4:8080/healthcheck"},
tail_lines=["current"],
),
]
|
|
# Argument Clinic
# Copyright 2012-2013 by Larry Hastings.
# Licensed to the PSF under a contributor agreement.
#
import builtins
import clinic
from clinic import DSLParser
import collections
import inspect
from test import support
import sys
import unittest
from unittest import TestCase
class FakeConverter:
def __init__(self, name, args):
self.name = name
self.args = args
class FakeConverterFactory:
def __init__(self, name):
self.name = name
def __call__(self, name, default, **kwargs):
return FakeConverter(self.name, kwargs)
class FakeConvertersDict:
def __init__(self):
self.used_converters = {}
def get(self, name, default):
return self.used_converters.setdefault(name, FakeConverterFactory(name))
clinic.Clinic.presets_text = ''
c = clinic.Clinic(language='C')
class FakeClinic:
def __init__(self):
self.converters = FakeConvertersDict()
self.legacy_converters = FakeConvertersDict()
self.language = clinic.CLanguage(None)
self.filename = None
self.block_parser = clinic.BlockParser('', self.language)
self.modules = collections.OrderedDict()
self.classes = collections.OrderedDict()
clinic.clinic = self
self.name = "FakeClinic"
self.line_prefix = self.line_suffix = ''
self.destinations = {}
self.add_destination("block", "buffer")
self.add_destination("file", "buffer")
self.add_destination("suppress", "suppress")
d = self.destinations.get
self.field_destinations = collections.OrderedDict((
('docstring_prototype', d('suppress')),
('docstring_definition', d('block')),
('methoddef_define', d('block')),
('impl_prototype', d('block')),
('parser_prototype', d('suppress')),
('parser_definition', d('block')),
('impl_definition', d('block')),
))
def get_destination(self, name):
d = self.destinations.get(name)
if not d:
sys.exit("Destination does not exist: " + repr(name))
return d
def add_destination(self, name, type, *args):
if name in self.destinations:
sys.exit("Destination already exists: " + repr(name))
self.destinations[name] = clinic.Destination(name, type, self, *args)
def is_directive(self, name):
return name == "module"
def directive(self, name, args):
self.called_directives[name] = args
_module_and_class = clinic.Clinic._module_and_class
class ClinicWholeFileTest(TestCase):
def test_eol(self):
# regression test:
# clinic's block parser didn't recognize
# the "end line" for the block if it
# didn't end in "\n" (as in, the last)
# byte of the file was '/'.
# so it woudl spit out an end line for you.
# and since you really already had one,
# the last line of the block got corrupted.
c = clinic.Clinic(clinic.CLanguage(None))
raw = "/*[clinic]\nfoo\n[clinic]*/"
cooked = c.parse(raw).splitlines()
end_line = cooked[2].rstrip()
# this test is redundant, it's just here explicitly to catch
# the regression test so we don't forget what it looked like
self.assertNotEqual(end_line, "[clinic]*/[clinic]*/")
self.assertEqual(end_line, "[clinic]*/")
class ClinicGroupPermuterTest(TestCase):
def _test(self, l, m, r, output):
computed = clinic.permute_optional_groups(l, m, r)
self.assertEqual(output, computed)
def test_range(self):
self._test([['start']], ['stop'], [['step']],
(
('stop',),
('start', 'stop',),
('start', 'stop', 'step',),
))
def test_add_window(self):
self._test([['x', 'y']], ['ch'], [['attr']],
(
('ch',),
('ch', 'attr'),
('x', 'y', 'ch',),
('x', 'y', 'ch', 'attr'),
))
def test_ludicrous(self):
self._test([['a1', 'a2', 'a3'], ['b1', 'b2']], ['c1'], [['d1', 'd2'], ['e1', 'e2', 'e3']],
(
('c1',),
('b1', 'b2', 'c1'),
('b1', 'b2', 'c1', 'd1', 'd2'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1', 'd1', 'd2'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1', 'd1', 'd2', 'e1', 'e2', 'e3'),
))
def test_right_only(self):
self._test([], [], [['a'],['b'],['c']],
(
(),
('a',),
('a', 'b'),
('a', 'b', 'c')
))
def test_have_left_options_but_required_is_empty(self):
def fn():
clinic.permute_optional_groups(['a'], [], [])
self.assertRaises(AssertionError, fn)
class ClinicLinearFormatTest(TestCase):
def _test(self, input, output, **kwargs):
computed = clinic.linear_format(input, **kwargs)
self.assertEqual(output, computed)
def test_empty_strings(self):
self._test('', '')
def test_solo_newline(self):
self._test('\n', '\n')
def test_no_substitution(self):
self._test("""
abc
""", """
abc
""")
def test_empty_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
def
""", name='')
def test_single_line_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
GARGLE
def
""", name='GARGLE')
def test_multiline_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
bingle
bungle
def
""", name='bingle\nbungle\n')
class InertParser:
def __init__(self, clinic):
pass
def parse(self, block):
pass
class CopyParser:
def __init__(self, clinic):
pass
def parse(self, block):
block.output = block.input
class ClinicBlockParserTest(TestCase):
def _test(self, input, output):
language = clinic.CLanguage(None)
blocks = list(clinic.BlockParser(input, language))
writer = clinic.BlockPrinter(language)
for block in blocks:
writer.print_block(block)
output = writer.f.getvalue()
assert output == input, "output != input!\n\noutput " + repr(output) + "\n\n input " + repr(input)
def round_trip(self, input):
return self._test(input, input)
def test_round_trip_1(self):
self.round_trip("""
verbatim text here
lah dee dah
""")
def test_round_trip_2(self):
self.round_trip("""
verbatim text here
lah dee dah
/*[inert]
abc
[inert]*/
def
/*[inert checksum: 7b18d017f89f61cf17d47f92749ea6930a3f1deb]*/
xyz
""")
def _test_clinic(self, input, output):
language = clinic.CLanguage(None)
c = clinic.Clinic(language)
c.parsers['inert'] = InertParser(c)
c.parsers['copy'] = CopyParser(c)
computed = c.parse(input)
self.assertEqual(output, computed)
def test_clinic_1(self):
self._test_clinic("""
verbatim text here
lah dee dah
/*[copy input]
def
[copy start generated code]*/
abc
/*[copy end generated code: output=03cfd743661f0797 input=7b18d017f89f61cf]*/
xyz
""", """
verbatim text here
lah dee dah
/*[copy input]
def
[copy start generated code]*/
def
/*[copy end generated code: output=7b18d017f89f61cf input=7b18d017f89f61cf]*/
xyz
""")
class ClinicParserTest(TestCase):
def test_trivial(self):
parser = DSLParser(FakeClinic())
block = clinic.Block("module os\nos.access")
parser.parse(block)
module, function = block.signatures
self.assertEqual("access", function.name)
self.assertEqual("os", module.name)
def test_ignore_line(self):
block = self.parse("#\nmodule os\nos.access")
module, function = block.signatures
self.assertEqual("access", function.name)
self.assertEqual("os", module.name)
def test_param(self):
function = self.parse_function("module os\nos.access\n path: int")
self.assertEqual("access", function.name)
self.assertEqual(2, len(function.parameters))
p = function.parameters['path']
self.assertEqual('path', p.name)
self.assertIsInstance(p.converter, clinic.int_converter)
def test_param_default(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: bool = True")
p = function.parameters['follow_symlinks']
self.assertEqual(True, p.default)
def test_param_with_continuations(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: \\\n bool \\\n =\\\n True")
p = function.parameters['follow_symlinks']
self.assertEqual(True, p.default)
def test_param_default_expression(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: int(c_default='MAXSIZE') = sys.maxsize")
p = function.parameters['follow_symlinks']
self.assertEqual(sys.maxsize, p.default)
self.assertEqual("MAXSIZE", p.converter.c_default)
s = self.parse_function_should_fail("module os\nos.access\n follow_symlinks: int = sys.maxsize")
self.assertEqual(s, "Error on line 0:\nWhen you specify a named constant ('sys.maxsize') as your default value,\nyou MUST specify a valid c_default.\n")
def test_param_no_docstring(self):
function = self.parse_function("""
module os
os.access
follow_symlinks: bool = True
something_else: str = ''""")
p = function.parameters['follow_symlinks']
self.assertEqual(3, len(function.parameters))
self.assertIsInstance(function.parameters['something_else'].converter, clinic.str_converter)
def test_param_default_parameters_out_of_order(self):
s = self.parse_function_should_fail("""
module os
os.access
follow_symlinks: bool = True
something_else: str""")
self.assertEqual(s, """Error on line 0:
Can't have a parameter without a default ('something_else')
after a parameter with a default!
""")
def disabled_test_converter_arguments(self):
function = self.parse_function("module os\nos.access\n path: path_t(allow_fd=1)")
p = function.parameters['path']
self.assertEqual(1, p.converter.args['allow_fd'])
def test_function_docstring(self):
function = self.parse_function("""
module os
os.stat as os_stat_fn
path: str
Path to be examined
Perform a stat system call on the given path.""")
self.assertEqual("""
stat($module, /, path)
--
Perform a stat system call on the given path.
path
Path to be examined
""".strip(), function.docstring)
def test_explicit_parameters_in_docstring(self):
function = self.parse_function("""
module foo
foo.bar
x: int
Documentation for x.
y: int
This is the documentation for foo.
Okay, we're done here.
""")
self.assertEqual("""
bar($module, /, x, y)
--
This is the documentation for foo.
x
Documentation for x.
Okay, we're done here.
""".strip(), function.docstring)
def test_parser_regression_special_character_in_parameter_column_of_docstring_first_line(self):
function = self.parse_function("""
module os
os.stat
path: str
This/used to break Clinic!
""")
self.assertEqual("stat($module, /, path)\n--\n\nThis/used to break Clinic!", function.docstring)
def test_c_name(self):
function = self.parse_function("module os\nos.stat as os_stat_fn")
self.assertEqual("os_stat_fn", function.c_basename)
def test_return_converter(self):
function = self.parse_function("module os\nos.stat -> int")
self.assertIsInstance(function.return_converter, clinic.int_return_converter)
def test_star(self):
function = self.parse_function("module os\nos.access\n *\n follow_symlinks: bool = True")
p = function.parameters['follow_symlinks']
self.assertEqual(inspect.Parameter.KEYWORD_ONLY, p.kind)
self.assertEqual(0, p.group)
def test_group(self):
function = self.parse_function("module window\nwindow.border\n [\n ls : int\n ]\n /\n")
p = function.parameters['ls']
self.assertEqual(1, p.group)
def test_left_group(self):
function = self.parse_function("""
module curses
curses.addch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
ch: char
Character to add.
[
attr: long
Attributes for the character.
]
/
""")
for name, group in (
('y', -1), ('x', -1),
('ch', 0),
('attr', 1),
):
p = function.parameters[name]
self.assertEqual(p.group, group)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(function.docstring.strip(), """
addch([y, x,] ch, [attr])
y
Y-coordinate.
x
X-coordinate.
ch
Character to add.
attr
Attributes for the character.
""".strip())
def test_nested_groups(self):
function = self.parse_function("""
module curses
curses.imaginary
[
[
y1: int
Y-coordinate.
y2: int
Y-coordinate.
]
x1: int
X-coordinate.
x2: int
X-coordinate.
]
ch: char
Character to add.
[
attr1: long
Attributes for the character.
attr2: long
Attributes for the character.
attr3: long
Attributes for the character.
[
attr4: long
Attributes for the character.
attr5: long
Attributes for the character.
attr6: long
Attributes for the character.
]
]
/
""")
for name, group in (
('y1', -2), ('y2', -2),
('x1', -1), ('x2', -1),
('ch', 0),
('attr1', 1), ('attr2', 1), ('attr3', 1),
('attr4', 2), ('attr5', 2), ('attr6', 2),
):
p = function.parameters[name]
self.assertEqual(p.group, group)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(function.docstring.strip(), """
imaginary([[y1, y2,] x1, x2,] ch, [attr1, attr2, attr3, [attr4, attr5,
attr6]])
y1
Y-coordinate.
y2
Y-coordinate.
x1
X-coordinate.
x2
X-coordinate.
ch
Character to add.
attr1
Attributes for the character.
attr2
Attributes for the character.
attr3
Attributes for the character.
attr4
Attributes for the character.
attr5
Attributes for the character.
attr6
Attributes for the character.
""".strip())
def parse_function_should_fail(self, s):
with support.captured_stdout() as stdout:
with self.assertRaises(SystemExit):
self.parse_function(s)
return stdout.getvalue()
def test_disallowed_grouping__two_top_groups_on_left(self):
s = self.parse_function_should_fail("""
module foo
foo.two_top_groups_on_left
[
group1 : int
]
[
group2 : int
]
param: int
""")
self.assertEqual(s,
('Error on line 0:\n'
'Function two_top_groups_on_left has an unsupported group configuration. (Unexpected state 2.b)\n'))
def test_disallowed_grouping__two_top_groups_on_right(self):
self.parse_function_should_fail("""
module foo
foo.two_top_groups_on_right
param: int
[
group1 : int
]
[
group2 : int
]
""")
def test_disallowed_grouping__parameter_after_group_on_right(self):
self.parse_function_should_fail("""
module foo
foo.parameter_after_group_on_right
param: int
[
[
group1 : int
]
group2 : int
]
""")
def test_disallowed_grouping__group_after_parameter_on_left(self):
self.parse_function_should_fail("""
module foo
foo.group_after_parameter_on_left
[
group2 : int
[
group1 : int
]
]
param: int
""")
def test_disallowed_grouping__empty_group_on_left(self):
self.parse_function_should_fail("""
module foo
foo.empty_group
[
[
]
group2 : int
]
param: int
""")
def test_disallowed_grouping__empty_group_on_right(self):
self.parse_function_should_fail("""
module foo
foo.empty_group
param: int
[
[
]
group2 : int
]
""")
def test_no_parameters(self):
function = self.parse_function("""
module foo
foo.bar
Docstring
""")
self.assertEqual("bar($module, /)\n--\n\nDocstring", function.docstring)
self.assertEqual(1, len(function.parameters)) # self!
def test_init_with_no_parameters(self):
function = self.parse_function("""
module foo
class foo.Bar "unused" "notneeded"
foo.Bar.__init__
Docstring
""", signatures_in_block=3, function_index=2)
# self is not in the signature
self.assertEqual("Bar()\n--\n\nDocstring", function.docstring)
# but it *is* a parameter
self.assertEqual(1, len(function.parameters))
def test_illegal_module_line(self):
self.parse_function_should_fail("""
module foo
foo.bar => int
/
""")
def test_illegal_c_basename(self):
self.parse_function_should_fail("""
module foo
foo.bar as 935
/
""")
def test_single_star(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
*
""")
def test_parameters_required_after_star_without_initial_parameters_or_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
""")
def test_parameters_required_after_star_without_initial_parameters_with_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
Docstring here.
""")
def test_parameters_required_after_star_with_initial_parameters_without_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
this: int
*
""")
def test_parameters_required_after_star_with_initial_parameters_and_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
this: int
*
Docstring.
""")
def test_single_slash(self):
self.parse_function_should_fail("""
module foo
foo.bar
/
/
""")
def test_mix_star_and_slash(self):
self.parse_function_should_fail("""
module foo
foo.bar
x: int
y: int
*
z: int
/
""")
def test_parameters_not_permitted_after_slash_for_now(self):
self.parse_function_should_fail("""
module foo
foo.bar
/
x: int
""")
def test_function_not_at_column_0(self):
function = self.parse_function("""
module foo
foo.bar
x: int
Nested docstring here, goeth.
*
y: str
Not at column 0!
""")
self.assertEqual("""
bar($module, /, x, *, y)
--
Not at column 0!
x
Nested docstring here, goeth.
""".strip(), function.docstring)
def test_parser_regression_special_character_in_parameter_column_of_docstring_first_line(self):
function = self.parse_function("""
module os
os.stat
path: str
This/used to break Clinic!
""")
self.assertEqual("stat($module, /, path)\n--\n\nThis/used to break Clinic!", function.docstring)
def test_directive(self):
c = FakeClinic()
parser = DSLParser(c)
parser.flag = False
parser.directives['setflag'] = lambda : setattr(parser, 'flag', True)
block = clinic.Block("setflag")
parser.parse(block)
self.assertTrue(parser.flag)
def test_legacy_converters(self):
block = self.parse('module os\nos.access\n path: "s"')
module, function = block.signatures
self.assertIsInstance((function.parameters['path']).converter, clinic.str_converter)
def parse(self, text):
c = FakeClinic()
parser = DSLParser(c)
block = clinic.Block(text)
parser.parse(block)
return block
def parse_function(self, text, signatures_in_block=2, function_index=1):
block = self.parse(text)
s = block.signatures
self.assertEqual(len(s), signatures_in_block)
assert isinstance(s[0], clinic.Module)
assert isinstance(s[function_index], clinic.Function)
return s[function_index]
def test_scaffolding(self):
# test repr on special values
self.assertEqual(repr(clinic.unspecified), '<Unspecified>')
self.assertEqual(repr(clinic.NULL), '<Null>')
# test that fail fails
with support.captured_stdout() as stdout:
with self.assertRaises(SystemExit):
clinic.fail('The igloos are melting!', filename='clown.txt', line_number=69)
self.assertEqual(stdout.getvalue(), 'Error in file "clown.txt" on line 69:\nThe igloos are melting!\n')
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
"""
Module that performs extraction. For usage, refer to documentation for the class
'Extractor'. This module can also be executed directly,
e.g. 'extractor.py <input> <output>'.
"""
import argparse
import hashlib
import multiprocessing
import os
import shutil
import tempfile
import traceback
import magic
import binwalk
class Extractor(object):
"""
Class that extracts kernels and filesystems from firmware images, given an
input file or directory and output directory.
"""
# Directories that define the root of a UNIX filesystem, and the
# appropriate threshold condition
UNIX_DIRS = ["bin", "etc", "dev", "home", "lib", "mnt", "opt", "root",
"run", "sbin", "tmp", "usr", "var"]
UNIX_THRESHOLD = 4
# Lock to prevent concurrent access to visited set. Unfortunately, must be
# static because it cannot be pickled or passed as instance attribute.
visited_lock = multiprocessing.Lock()
def __init__(self, indir, outdir=None, rootfs=True, kernel=True,
numproc=True, server=None, brand=None):
# Input firmware update file or directory
self._input = os.path.abspath(indir)
# Output firmware directory
self.output_dir = os.path.abspath(outdir) if outdir else None
# Whether to attempt to extract kernel
self.do_kernel = kernel
# Whether to attempt to extract root filesystem
self.do_rootfs = rootfs
# Brand of the firmware
self.brand = brand
# Hostname of SQL server
self.database = server
# Worker pool.
self._pool = multiprocessing.Pool() if numproc else None
# Set containing MD5 checksums of visited items
self.visited = set()
# List containing tagged items to extract as 2-tuple: (tag [e.g. MD5],
# path)
self._list = list()
def __getstate__(self):
"""
Eliminate attributes that should not be pickled.
"""
self_dict = self.__dict__.copy()
del self_dict["_pool"]
del self_dict["_list"]
return self_dict
@staticmethod
def io_dd(indir, offset, size, outdir):
"""
Given a path to a target file, extract size bytes from specified offset
to given output file.
"""
if not size:
return
with open(indir, "rb") as ifp:
with open(outdir, "wb") as ofp:
ifp.seek(offset, 0)
ofp.write(ifp.read(size))
@staticmethod
def magic(indata, mime=False):
"""
Performs file magic while maintaining compatibility with different
libraries.
"""
try:
if mime:
mymagic = magic.open(magic.MAGIC_MIME_TYPE)
else:
mymagic = magic.open(magic.MAGIC_NONE)
mymagic.load()
except AttributeError:
mymagic = magic.Magic(mime)
mymagic.file = mymagic.from_file
return mymagic.file(indata)
@staticmethod
def io_md5(target):
"""
Performs MD5 with a block size of 64kb.
"""
blocksize = 65536
hasher = hashlib.md5()
with open(target, 'rb') as ifp:
buf = ifp.read(blocksize)
while buf:
hasher.update(buf)
buf = ifp.read(blocksize)
return hasher.hexdigest()
@staticmethod
def io_rm(target):
"""
Attempts to recursively delete a directory.
"""
shutil.rmtree(target, ignore_errors=False, onerror=Extractor._io_err)
@staticmethod
def _io_err(function, path, excinfo):
"""
Internal function used by '_rm' to print out errors.
"""
print(("!! %s: Cannot delete %s!\n%s" % (function, path, excinfo)))
@staticmethod
def io_find_rootfs(start, recurse=True):
"""
Attempts to find a Linux root directory.
"""
# Recurse into single directory chains, e.g. jffs2-root/fs_1/.../
path = start
while (len(os.listdir(path)) == 1 and
os.path.isdir(os.path.join(path, os.listdir(path)[0]))):
path = os.path.join(path, os.listdir(path)[0])
# count number of unix-like directories
count = 0
for subdir in os.listdir(path):
if subdir in Extractor.UNIX_DIRS and \
os.path.isdir(os.path.join(path, subdir)):
count += 1
# check for extracted filesystem, otherwise update queue
if count >= Extractor.UNIX_THRESHOLD:
return (True, path)
# in some cases, multiple filesystems may be extracted, so recurse to
# find best one
if recurse:
for subdir in os.listdir(path):
if os.path.isdir(os.path.join(path, subdir)):
res = Extractor.io_find_rootfs(os.path.join(path, subdir),
False)
if res[0]:
return res
return (False, start)
def extract(self):
"""
Perform extraction of firmware updates from input to tarballs in output
directory using a thread pool.
"""
if os.path.isdir(self._input):
for path, _, files in os.walk(self._input):
for item in files:
self._list.append(os.path.join(path, item))
elif os.path.isfile(self._input):
self._list.append(self._input)
if self.output_dir and not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if self._pool:
self._pool.map(self._extract_item, self._list)
else:
for item in self._list:
return self._extract_item(item)
def _extract_item(self, path):
"""
Wrapper function that creates an ExtractionItem and calls the extract()
method.
"""
e = ExtractionItem(self, path, 0)
e.extract()
return (e.tag, e.repeated)
class ExtractionItem(object):
"""
Class that encapsulates the state of a single item that is being extracted.
"""
# Maximum recursion breadth and depth
RECURSION_BREADTH = 5
RECURSION_DEPTH = 2
def __init__(self, extractor, path, depth, tag=None):
# Temporary directory
self.temp = None
# Recursion depth counter
self.depth = depth
# Reference to parent extractor object
self.extractor = extractor
# File path
self.item = path
# Database connection
if self.extractor.database:
import psycopg2
self.database = psycopg2.connect(database="firmware",
user="firmadyne",
password="firmadyne",
host=self.extractor.database)
else:
self.database = None
# Checksum
self.checksum = Extractor.io_md5(path)
# Tag
self.tag = tag if tag else self.generate_tag()
# Output file path and filename prefix
self.output = os.path.join(self.extractor.output_dir, self.tag) if \
self.extractor.output_dir else None
# Status, with terminate indicating early termination for this item
self.terminate = False
self.status = None
self.update_status()
self.repeated = False
def __del__(self):
if self.database:
self.database.close()
if self.temp:
self.printf(">> Cleaning up %s..." % self.temp)
Extractor.io_rm(self.temp)
def printf(self, fmt):
"""
Prints output string with appropriate depth indentation.
"""
print(("\t" * self.depth + fmt))
def generate_tag(self):
"""
Generate the filename tag.
"""
if not self.database:
return os.path.basename(self.item) + "_" + self.checksum
try:
image_id = None
cur = self.database.cursor()
if self.extractor.brand:
brand = self.extractor.brand
else:
brand = os.path.relpath(self.item).split(os.path.sep)[0]
cur.execute("SELECT id FROM brand WHERE name=%s", (brand, ))
brand_id = cur.fetchone()
if not brand_id:
cur.execute("INSERT INTO brand (name) VALUES (%s) RETURNING id",
(brand, ))
brand_id = cur.fetchone()
if brand_id:
cur.execute("SELECT id FROM image WHERE hash=%s",
(self.checksum, ))
image_id = cur.fetchone()
if not image_id:
cur.execute("INSERT INTO image (filename, brand_id, hash) \
VALUES (%s, %s, %s) RETURNING id",
(os.path.basename(self.item), brand_id[0],
self.checksum))
image_id = cur.fetchone()
self.database.commit()
except BaseException:
traceback.print_exc()
self.database.rollback()
finally:
if cur:
cur.close()
if image_id:
self.printf(">> Database Image ID: %s" % image_id[0])
return str(image_id[0]) if \
image_id else os.path.basename(self.item) + "_" + self.checksum
def get_kernel_status(self):
"""
Get the flag corresponding to the kernel status.
"""
return self.status[0]
def get_rootfs_status(self):
"""
Get the flag corresponding to the root filesystem status.
"""
return self.status[1]
def update_status(self):
"""
Updates the status flags using the tag to determine completion status.
"""
kernel_done = os.path.isfile(self.get_kernel_path()) if \
self.extractor.do_kernel and self.output else \
not self.extractor.do_kernel
rootfs_done = os.path.isfile(self.get_rootfs_path()) if \
self.extractor.do_rootfs and self.output else \
not self.extractor.do_rootfs
self.status = (kernel_done, rootfs_done)
if self.database and kernel_done and self.extractor.do_kernel:
self.update_database("kernel_extracted", "True")
if self.database and rootfs_done and self.extractor.do_rootfs:
self.update_database("rootfs_extracted", "True")
return self.get_status()
def update_database(self, field, value):
"""
Update a given field in the database.
"""
ret = True
if self.database:
try:
cur = self.database.cursor()
cur.execute("UPDATE image SET " + field + "='" + value +
"' WHERE id=%s", (self.tag, ))
self.database.commit()
except BaseException:
ret = False
traceback.print_exc()
self.database.rollback()
finally:
if cur:
cur.close()
return ret
def get_status(self):
"""
Returns True if early terminate signaled, extraction is complete,
otherwise False.
"""
return True if self.terminate or all(i for i in self.status) else False
def get_kernel_path(self):
"""
Return the full path (including filename) to the output kernel file.
"""
return self.output + ".kernel" if self.output else None
def get_rootfs_path(self):
"""
Return the full path (including filename) to the output root filesystem
file.
"""
return self.output + ".tar.gz" if self.output else None
def extract(self):
"""
Perform the actual extraction of firmware updates, recursively. Returns
True if extraction complete, otherwise False.
"""
self.printf("\n" + self.item.encode("utf-8", "replace").decode("utf-8"))
# check if item is complete
if self.get_status():
self.printf(">> Skipping: completed!")
self.repeated = True
return True
# check if exceeding recursion depth
if self.depth > ExtractionItem.RECURSION_DEPTH:
self.printf(">> Skipping: recursion depth %d" % self.depth)
return self.get_status()
# check if checksum is in visited set
self.printf(">> MD5: %s" % self.checksum)
with Extractor.visited_lock:
if self.checksum in self.extractor.visited:
self.printf(">> Skipping: %s..." % self.checksum)
return self.get_status()
else:
self.extractor.visited.add(self.checksum)
# check if filetype is blacklisted
if self._check_blacklist():
return self.get_status()
# create working directory
self.temp = tempfile.mkdtemp()
try:
self.printf(">> Tag: %s" % self.tag)
self.printf(">> Temp: %s" % self.temp)
self.printf(">> Status: Kernel: %s, Rootfs: %s, Do_Kernel: %s, \
Do_Rootfs: %s" % (self.get_kernel_status(),
self.get_rootfs_status(),
self.extractor.do_kernel,
self.extractor.do_rootfs))
for analysis in [self._check_archive, self._check_firmware,
self._check_kernel, self._check_rootfs,
self._check_compressed]:
# Move to temporary directory so binwalk does not write to input
os.chdir(self.temp)
# Update status only if analysis changed state
if analysis():
if self.update_status():
self.printf(">> Skipping: completed!")
return True
except Exception:
traceback.print_exc()
return False
def _check_blacklist(self):
"""
Check if this file is blacklisted for analysis based on file type.
"""
# First, use MIME-type to exclude large categories of files
filetype = Extractor.magic(self.item.encode("utf-8", "surrogateescape"),
mime=True)
if any(s in filetype for s in ["application/x-dosexec",
"application/pdf",
"application/msword",
"image/", "video/"]):
self.printf(">> Skipping: %s..." % filetype)
return True
# Next, check for specific file types that have MIME-type
# 'application/octet-stream'
filetype = Extractor.magic(self.item.encode("utf-8", "surrogateescape"))
if any(s in filetype for s in ["applet"]):
self.printf(">> Skipping: %s..." % filetype)
return True
# Finally, check for specific file extensions that would be incorrectly
# identified
if self.item.endswith(".dmg"):
self.printf(">> Skipping: %s..." % (self.item))
return True
return False
def _check_archive(self):
"""
If this file is an archive, recurse over its contents, unless it matches
an extracted root filesystem.
"""
return self._check_recursive("archive")
def _check_firmware(self):
"""
If this file is of a known firmware type, directly attempt to extract
the kernel and root filesystem.
"""
for module in binwalk.scan(self.item, "-y", "header", signature=True,
quiet=True):
for entry in module.results:
# uImage
if "uImage header" in entry.description:
if not self.get_kernel_status() and \
"OS Kernel Image" in entry.description:
kernel_offset = entry.offset + 64
kernel_size = 0
for stmt in entry.description.split(','):
if "image size:" in stmt:
kernel_size = int(''.join(
i for i in stmt if i.isdigit()), 10)
if kernel_size != 0 and kernel_offset + kernel_size \
<= os.path.getsize(self.item):
self.printf(">>>> %s" % entry.description)
tmp_fd, tmp_path = tempfile.mkstemp(dir=self.temp)
os.close(tmp_fd)
Extractor.io_dd(self.item, kernel_offset,
kernel_size, tmp_path)
kernel = ExtractionItem(self.extractor, tmp_path,
self.depth, self.tag)
return kernel.extract()
# elif "RAMDisk Image" in entry.description:
# self.printf(">>>> %s" % entry.description)
# self.printf(">>>> Skipping: RAMDisk / initrd")
# self.terminate = True
# return True
# TP-Link or TRX
elif not self.get_kernel_status() and \
not self.get_rootfs_status() and \
"rootfs offset: " in entry.description and \
"kernel offset: " in entry.description:
kernel_offset = 0
kernel_size = 0
rootfs_offset = 0
rootfs_size = 0
for stmt in entry.description.split(','):
if "kernel offset:" in stmt:
kernel_offset = int(stmt.split(':')[1], 16)
elif "kernel length:" in stmt:
kernel_size = int(stmt.split(':')[1], 16)
elif "rootfs offset:" in stmt:
rootfs_offset = int(stmt.split(':')[1], 16)
elif "rootfs length:" in stmt:
rootfs_size = int(stmt.split(':')[1], 16)
# compute sizes if only offsets provided
if kernel_offset != rootfs_size and kernel_size == 0 and \
rootfs_size == 0:
kernel_size = rootfs_offset - kernel_offset
rootfs_size = os.path.getsize(self.item) - rootfs_offset
# ensure that computed values are sensible
if (kernel_size > 0 and kernel_offset + kernel_size \
<= os.path.getsize(self.item)) and \
(rootfs_size != 0 and rootfs_offset + rootfs_size \
<= os.path.getsize(self.item)):
self.printf(">>>> %s" % entry.description)
tmp_fd, tmp_path = tempfile.mkstemp(dir=self.temp)
os.close(tmp_fd)
Extractor.io_dd(self.item, kernel_offset, kernel_size,
tmp_path)
kernel = ExtractionItem(self.extractor, tmp_path,
self.depth, self.tag)
kernel.extract()
tmp_fd, tmp_path = tempfile.mkstemp(dir=self.temp)
os.close(tmp_fd)
Extractor.io_dd(self.item, rootfs_offset, rootfs_size,
tmp_path)
rootfs = ExtractionItem(self.extractor, tmp_path,
self.depth, self.tag)
rootfs.extract()
return self.update_status()
return False
def _check_kernel(self):
"""
If this file contains a kernel version string, assume it is a kernel.
Only Linux kernels are currently extracted.
"""
if not self.get_kernel_status():
for module in binwalk.scan(self.item, "-y", "kernel",
signature=True, quiet=True):
for entry in module.results:
if "kernel version" in entry.description:
self.update_database("kernel_version",
entry.description)
if "Linux" in entry.description:
if self.get_kernel_path():
shutil.copy(self.item, self.get_kernel_path())
else:
self.extractor.do_kernel = False
self.printf(">>>> %s" % entry.description)
return True
# VxWorks, etc
else:
self.printf(">>>> Ignoring: %s" % entry.description)
return False
return False
return False
def _check_rootfs(self):
"""
If this file contains a known filesystem type, extract it.
"""
if not self.get_rootfs_status():
for module in binwalk.scan(self.item, "-e", "-r", "-y",
"filesystem", signature=True,
quiet=True):
for entry in module.results:
self.printf(">>>> %s" % entry.description)
break
if module.extractor.directory:
unix = Extractor.io_find_rootfs(module.extractor.directory)
if not unix[0]:
self.printf(">>>> Extraction failed!")
return False
self.printf(">>>> Found Linux filesystem in %s!" % unix[1])
if self.output:
shutil.make_archive(self.output, "gztar",
root_dir=unix[1])
else:
self.extractor.do_rootfs = False
return True
return False
def _check_compressed(self):
"""
If this file appears to be compressed, decompress it and recurse over
its contents.
"""
return self._check_recursive("compressed")
# treat both archived and compressed files using the same pathway. this is
# because certain files may appear as e.g. "xz compressed data" but still
# extract into a root filesystem.
def _check_recursive(self, fmt):
"""
Unified implementation for checking both "archive" and "compressed"
items.
"""
desc = None
# perform extraction
for module in binwalk.scan(self.item, "-e", "-r", "-y", fmt,
signature=True, quiet=True):
for entry in module.results:
# skip cpio/initrd files since they should be included with
# kernel
# if "cpio archive" in entry.description:
# self.printf(">> Skipping: cpio: %s" % entry.description)
# self.terminate = True
# return True
desc = entry.description
self.printf(">>>> %s" % entry.description)
break
if module.extractor.directory:
unix = Extractor.io_find_rootfs(module.extractor.directory)
# check for extracted filesystem, otherwise update queue
if unix[0]:
self.printf(">>>> Found Linux filesystem in %s!" % unix[1])
if self.output:
shutil.make_archive(self.output, "gztar",
root_dir=unix[1])
else:
self.extractor.do_rootfs = False
return True
else:
count = 0
self.printf(">> Recursing into %s ..." % fmt)
for root, _, files in os.walk(module.extractor.directory):
# sort both descending alphabetical and increasing
# length
files.sort()
files.sort(key=len)
# handle case where original file name is restored; put
# it to front of queue
if desc and "original file name:" in desc:
orig = None
for stmt in desc.split(","):
if "original file name:" in stmt:
orig = stmt.split("\"")[1]
if orig and orig in files:
files.remove(orig)
files.insert(0, orig)
for filename in files:
if count > ExtractionItem.RECURSION_BREADTH:
self.printf(">> Skipping: recursion breadth %d"\
% ExtractionItem.RECURSION_BREADTH)
self.terminate = True
return True
else:
new_item = ExtractionItem(self.extractor,
os.path.join(root,
filename),
self.depth + 1,
self.tag)
if new_item.extract():
# check that we are actually done before
# performing early termination. for example,
# we might decide to skip on one subitem,
# but we still haven't finished
if self.update_status():
return True
count += 1
return False
def main():
parser = argparse.ArgumentParser(description="Extracts filesystem and \
kernel from Linux-based firmware images")
parser.add_argument("input", action="store", help="Input file or directory")
parser.add_argument("output", action="store", nargs="?", default="images",
help="Output directory for extracted firmware")
parser.add_argument("-sql ", dest="sql", action="store", default=None,
help="Hostname of SQL server")
parser.add_argument("-nf", dest="rootfs", action="store_false",
default=True, help="Disable extraction of root \
filesystem (may decrease extraction time)")
parser.add_argument("-nk", dest="kernel", action="store_false",
default=True, help="Disable extraction of kernel \
(may decrease extraction time)")
parser.add_argument("-np", dest="parallel", action="store_false",
default=True, help="Disable parallel operation \
(may increase extraction time)")
parser.add_argument("-b", dest="brand", action="store", default=None,
help="Brand of the firmware image")
result = parser.parse_args()
extract = Extractor(result.input, result.output, result.rootfs,
result.kernel, result.parallel, result.sql,
result.brand)
extract.extract()
if __name__ == "__main__":
main()
|
|
from contextlib import contextmanager
import glob
import os
import time
import pytest
import logging
import subprocess
import typing
from cassandra import ConsistencyLevel, WriteTimeout, ReadTimeout
from cassandra.cluster import Session
from cassandra.query import SimpleStatement
from ccmlib.node import Node, handle_external_tool_process
from pytest import raises
from dtest import Tester, create_ks
from tools.assertions import assert_one
from tools.data import rows_to_list
from tools.jmxutils import JolokiaAgent, make_mbean
from tools.misc import retry_till_success
since = pytest.mark.since
logger = logging.getLogger(__name__)
def byteman_validate(node, script, verbose=False, opts=None):
opts = opts or []
cdir = node.get_install_dir()
byteman_cmd = []
byteman_cmd.append(os.path.join(os.environ['JAVA_HOME'],
'bin',
'java'))
byteman_cmd.append('-cp')
jars = [
glob.glob(os.path.join(cdir, 'build', 'lib', 'jars', 'byteman-[0-9]*.jar'))[0],
os.path.join(cdir, 'build', '*'),
]
byteman_cmd.append(':'.join(jars))
byteman_cmd.append('org.jboss.byteman.check.TestScript')
byteman_cmd.append('-p')
byteman_cmd.append(node.byteman_port)
if verbose and '-v' not in opts:
byteman_cmd.append('-v')
byteman_cmd.append(script)
# process = subprocess.Popen(byteman_cmd)
# out, err = process.communicate()
out = subprocess.check_output(byteman_cmd)
if (out is not None) and isinstance(out, bytes):
out = out.decode()
has_errors = 'ERROR' in out
if verbose and not has_errors:
print (out)
assert not has_errors, "byteman script didn't compile\n" + out
class TestReadRepair(Tester):
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
cluster = fixture_dtest_setup.cluster
cluster.populate(3)
# disable dynamic snitch to make replica selection deterministic
# when we use patient_exclusive_cql_connection, CL=1 and RF=n
cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
'endpoint_snitch': 'GossipingPropertyFileSnitch',
'dynamic_snitch': False})
for node in cluster.nodelist():
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file:
snitch_file.write("dc=datacenter1" + os.linesep)
snitch_file.write("rack=rack1" + os.linesep)
snitch_file.write("prefer_local=true" + os.linesep)
cluster.start(wait_for_binary_proto=True)
@since('3.0')
def test_alter_rf_and_run_read_repair(self):
"""
@jira_ticket CASSANDRA-10655
@jira_ticket CASSANDRA-10657
Test that querying only a subset of all the columns in a row doesn't confuse read-repair to avoid
the problem described in CASSANDRA-10655.
"""
# session is only used to setup & do schema modification. Actual data queries are done directly on
# each node, using an exclusive connection and CL.ONE
session = self.patient_cql_connection(self.cluster.nodelist()[0])
initial_replica, non_replicas = self.do_initial_setup(session)
# Execute a query at CL.ALL on one of the nodes which was *not* the initial replica. It should trigger a
# read repair and propagate the data to all 3 nodes.
# Note: result of the read repair contains only the selected column (a), not all columns
logger.debug("Executing 'SELECT a...' on non-initial replica to trigger read repair " + non_replicas[0].name)
read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
assert_one(read_repair_session, "SELECT a FROM alter_rf_test.t1 WHERE k=1", [1], cl=ConsistencyLevel.ALL)
# The read repair should have repaired the replicas, at least partially (see CASSANDRA-10655)
# verify by querying each replica in turn.
value_skipping_disabled = True if self.cluster.version() < '3.4' else False
self.check_data_on_each_replica(expect_fully_repaired=value_skipping_disabled, initial_replica=initial_replica)
# Now query again at CL.ALL but this time selecting all columns, which should ensure that 'b' also gets repaired
query = "SELECT * FROM alter_rf_test.t1 WHERE k=1"
logger.debug("Executing 'SELECT *...' on non-initial replica to trigger read repair " + non_replicas[0].name)
assert_one(read_repair_session, query, [1, 1, 1], cl=ConsistencyLevel.ALL)
# Check each replica individually again now that we expect the data to be fully repaired
self.check_data_on_each_replica(expect_fully_repaired=True, initial_replica=initial_replica)
@since('2.1', max_version='3.11.x')
def test_read_repair_chance(self):
"""
@jira_ticket CASSANDRA-12368
"""
# session is only used to setup & do schema modification. Actual data queries are done directly on
# each node, using an exclusive connection and CL.ONE
session = self.patient_cql_connection(self.cluster.nodelist()[0])
initial_replica, non_replicas = self.do_initial_setup(session)
# To ensure read repairs are triggered, set the table property to 100%
logger.debug("Setting table read repair chance to 1")
session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 1;""")
# Execute a query at CL.ONE on one of the nodes which was *not* the initial replica. It should trigger a
# read repair because read_repair_chance == 1, and propagate the data to all 3 nodes.
# Note: result of the read repair contains only the selected column (a), not all columns, so we won't expect
# 'b' to have been fully repaired afterwards.
logger.debug("Executing 'SELECT a...' on non-initial replica to trigger read repair " + non_replicas[0].name)
read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
read_repair_session.execute(SimpleStatement("SELECT a FROM alter_rf_test.t1 WHERE k=1",
consistency_level=ConsistencyLevel.ONE))
# Query each replica individually to ensure that read repair was triggered. We should expect that only
# the initial replica has data for both the 'a' and 'b' columns. If the cluster is on > 3.4, the read repair
# should only have affected the selected column (CASSANDRA-10655), so the other two replicas should only have
# that data.
# Note: we need to temporarily set read_repair_chance to 0 while we perform this check.
logger.debug("Setting table read repair chance to 0 while we verify each replica's data")
session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 0;""")
# The read repair is run in the background, so we spin while checking that the repair has completed
value_skipping_disabled = True if self.cluster.version() < '3.4' else False
retry_till_success(self.check_data_on_each_replica,
expect_fully_repaired=value_skipping_disabled,
initial_replica=initial_replica,
timeout=30,
bypassed_exception=NotRepairedException)
# Re-enable global read repair and perform another query on a non-replica. This time the query selects all
# columns so we also expect the value for 'b' to be repaired.
logger.debug("Setting table read repair chance to 1")
session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 1;""")
logger.debug("Executing 'SELECT *...' on non-initial replica to trigger read repair " + non_replicas[0].name)
read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
read_repair_session.execute(SimpleStatement("SELECT * FROM alter_rf_test.t1 WHERE k=1",
consistency_level=ConsistencyLevel.ONE))
# Query each replica again to ensure that second read repair was triggered. This time, we expect the
# data to be fully repaired (both 'a' and 'b' columns) by virtue of the query being 'SELECT *...'
# As before, we turn off read repair before doing this check.
logger.debug("Setting table read repair chance to 0 while we verify each replica's data")
session.execute("""ALTER TABLE alter_rf_test.t1 WITH read_repair_chance = 0;""")
retry_till_success(self.check_data_on_each_replica,
expect_fully_repaired=True,
initial_replica=initial_replica,
timeout=30,
bypassed_exception=NotRepairedException)
def do_initial_setup(self, session):
"""
Create a keyspace with rf=1 and a table containing a single row with 2 non-primary key columns.
Insert 1 row, placing the data on a single initial replica. Then, alter the keyspace to rf=3, but don't
repair. Tests will execute various reads on the replicas and assert the effects of read repair.
:param session: Used to perform the schema setup & insert the data
:return: a tuple containing the node which initially acts as the replica, and a list of the other two nodes
"""
# Disable speculative retry and [dclocal]read_repair in initial setup.
session.execute("""CREATE KEYSPACE alter_rf_test
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};""")
options = "speculative_retry='NONE'";
if self.cluster.version() < '4.0':
options = options + " AND read_repair_chance=0 AND dclocal_read_repair_chance=0"
session.execute("CREATE TABLE alter_rf_test.t1 (k int PRIMARY KEY, a int, b int) WITH " + options)
session.execute("INSERT INTO alter_rf_test.t1 (k, a, b) VALUES (1, 1, 1);")
# identify the initial replica and trigger a flush to ensure reads come from sstables
initial_replica, non_replicas = self.identify_initial_placement()
logger.debug("At RF=1 replica for data is " + initial_replica.name)
initial_replica.flush()
# Just some basic validation.
# At RF=1, it shouldn't matter which node we query, as the actual data should always come from the
# initial replica when reading at CL ONE
for n in self.cluster.nodelist():
logger.debug("Checking " + n.name)
session = self.patient_exclusive_cql_connection(n)
assert_one(session, "SELECT * FROM alter_rf_test.t1 WHERE k=1", [1, 1, 1], cl=ConsistencyLevel.ONE)
# Alter so RF=n but don't repair, calling tests will execute queries to exercise read repair,
# either at CL.ALL or after setting read_repair_chance to 100%.
logger.debug("Changing RF from 1 to 3")
session.execute("""ALTER KEYSPACE alter_rf_test
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};""")
return initial_replica, non_replicas
def identify_initial_placement(self):
"""
Identify which node in the 3 node cluster contains the specific key at the point that the test keyspace has
rf=1.
:return: tuple containing the initial replica, plus a list of the other 2 replicas.
"""
nodes = self.cluster.nodelist()
out, _, _ = nodes[0].nodetool("getendpoints alter_rf_test t1 1")
address = out.split('\n')[-2]
initial_replica = None
non_replicas = []
for node in nodes:
if node.address() == address:
initial_replica = node
else:
non_replicas.append(node)
assert initial_replica is not None, "Couldn't identify initial replica"
return initial_replica, non_replicas
def check_data_on_each_replica(self, expect_fully_repaired, initial_replica):
"""
Perform a SELECT * query at CL.ONE on each replica in turn. If expect_fully_repaired is True, we verify that
each replica returns the full row being queried. If not, then we only verify that the 'a' column has been
repaired.
"""
stmt = SimpleStatement("SELECT * FROM alter_rf_test.t1 WHERE k=1", consistency_level=ConsistencyLevel.ONE)
logger.debug("Checking all if read repair has completed on all replicas")
for n in self.cluster.nodelist():
logger.debug("Checking {n}, {x}expecting all columns"
.format(n=n.name, x="" if expect_fully_repaired or n == initial_replica else "not "))
session = self.patient_exclusive_cql_connection(n)
res = rows_to_list(session.execute(stmt))
logger.debug("Actual result: " + str(res))
expected = [[1, 1, 1]] if expect_fully_repaired or n == initial_replica else [[1, 1, None]]
if res != expected:
raise NotRepairedException()
@since('2.0')
def test_range_slice_query_with_tombstones(self):
"""
@jira_ticket CASSANDRA-8989
@jira_ticket CASSANDRA-9502
Range-slice queries with CL>ONE do unnecessary read-repairs.
Reading from table which contains collection type using token function and with CL > ONE causes overwhelming writes to replicas.
It's possible to check the behavior with tracing - pattern matching in system_traces.events.activity
"""
node1 = self.cluster.nodelist()[0]
session1 = self.patient_exclusive_cql_connection(node1)
session1.execute("CREATE KEYSPACE ks WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2}")
session1.execute("""
CREATE TABLE ks.cf (
key int primary key,
value double,
txt text
);
""")
for n in range(1, 2500):
str = "foo bar %d iuhiu iuhiu ihi" % n
session1.execute("INSERT INTO ks.cf (key, value, txt) VALUES (%d, %d, '%s')" % (n, n, str))
self.cluster.flush()
self.cluster.stop()
self.cluster.start(wait_for_binary_proto=True)
session1 = self.patient_exclusive_cql_connection(node1)
for n in range(1, 1000):
session1.execute("DELETE FROM ks.cf WHERE key = %d" % (n))
time.sleep(1)
node1.flush()
time.sleep(1)
query = SimpleStatement("SELECT * FROM ks.cf LIMIT 100", consistency_level=ConsistencyLevel.LOCAL_QUORUM)
future = session1.execute_async(query, trace=True)
future.result()
trace = future.get_query_trace(max_wait=120)
self.pprint_trace(trace)
for trace_event in trace.events:
# Step 1, find coordinator node:
activity = trace_event.description
assert "Appending to commitlog" not in activity
assert "Adding to cf memtable" not in activity
assert "Acquiring switchLock read lock" not in activity
@since('3.0')
def test_gcable_tombstone_resurrection_on_range_slice_query(self):
"""
@jira_ticket CASSANDRA-11427
Range queries before the 11427 will trigger read repairs for puregable tombstones on hosts that already compacted given tombstones.
This will result in constant transfer and compaction actions sourced by few nodes seeding purgeable tombstones and triggered e.g.
by periodical jobs scanning data range wise.
"""
node1, node2, _ = self.cluster.nodelist()
session1 = self.patient_cql_connection(node1)
create_ks(session1, 'gcts', 3)
query = """
CREATE TABLE gcts.cf1 (
key text,
c1 text,
PRIMARY KEY (key, c1)
)
WITH gc_grace_seconds=0
AND compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session1.execute(query)
# create row tombstone
delete_stmt = SimpleStatement("DELETE FROM gcts.cf1 WHERE key = 'a'", consistency_level=ConsistencyLevel.ALL)
session1.execute(delete_stmt)
# flush single sstable with tombstone
node1.flush()
node2.flush()
# purge tombstones from node2 (gc grace 0)
node2.compact()
# execute range slice query, which should not trigger read-repair for purged TS
future = session1.execute_async(SimpleStatement("SELECT * FROM gcts.cf1", consistency_level=ConsistencyLevel.ALL), trace=True)
future.result()
trace = future.get_query_trace(max_wait=120)
self.pprint_trace(trace)
for trace_event in trace.events:
activity = trace_event.description
assert "Sending READ_REPAIR message" not in activity
def pprint_trace(self, trace):
"""Pretty print a trace"""
if logging.root.level == logging.DEBUG:
print(("-" * 40))
for t in trace.events:
print(("%s\t%s\t%s\t%s" % (t.source, t.source_elapsed, t.description, t.thread_name)))
print(("-" * 40))
def quorum(query_string):
return SimpleStatement(query_string=query_string, consistency_level=ConsistencyLevel.QUORUM)
kcv = lambda k, c, v: [k, c, v]
listify = lambda results: [list(r) for r in results]
class StorageProxy(object):
def __init__(self, node):
assert isinstance(node, Node)
self.node = node
self.jmx = JolokiaAgent(node)
def start(self):
self.jmx.start()
def stop(self):
self.jmx.stop()
def _get_metric(self, metric):
mbean = make_mbean("metrics", type="ReadRepair", name=metric)
return self.jmx.read_attribute(mbean, "Count")
@property
def blocking_read_repair(self):
return self._get_metric("RepairedBlocking")
@property
def speculated_rr_read(self):
return self._get_metric("SpeculatedRead")
@property
def speculated_rr_write(self):
return self._get_metric("SpeculatedWrite")
def get_table_metric(self, keyspace, table, metric, attr="Count"):
mbean = make_mbean("metrics", keyspace=keyspace, scope=table, type="Table", name=metric)
return self.jmx.read_attribute(mbean, attr)
def __enter__(self):
""" For contextmanager-style usage. """
self.start()
return self
def __exit__(self, exc_type, value, traceback):
""" For contextmanager-style usage. """
self.stop()
class TestSpeculativeReadRepair(Tester):
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
cluster = fixture_dtest_setup.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
'dynamic_snitch': False,
'write_request_timeout_in_ms': 500,
'read_request_timeout_in_ms': 500})
cluster.populate(3, install_byteman=True, debug=True)
byteman_validate(cluster.nodelist()[0], './byteman/read_repair/sorted_live_endpoints.btm', verbose=True)
cluster.start(wait_for_binary_proto=True, jvm_args=['-XX:-PerfDisableSharedMem'])
session = fixture_dtest_setup.patient_exclusive_cql_connection(cluster.nodelist()[0], timeout=2)
session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}")
session.execute("CREATE TABLE ks.tbl (k int, c int, v int, primary key (k, c)) WITH speculative_retry = '250ms';")
def get_cql_connection(self, node, **kwargs):
return self.patient_exclusive_cql_connection(node, retry_policy=None, **kwargs)
@since('4.0')
def test_failed_read_repair(self):
"""
If none of the disagreeing nodes ack the repair mutation, the read should fail
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))
node2.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
node2.byteman_submit(['./byteman/read_repair/stop_rr_writes.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_rr_writes.btm'])
with raises(WriteTimeout):
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)"))
node2.byteman_submit(['./byteman/read_repair/sorted_live_endpoints.btm'])
session = self.get_cql_connection(node2)
with StorageProxy(node2) as storage_proxy:
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0
with raises(ReadTimeout):
session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert storage_proxy.blocking_read_repair > 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write > 0
@since('4.0')
def test_normal_read_repair(self):
""" test the normal case """
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))
node2.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")
# re-enable writes
node2.byteman_submit(['-u', './byteman/read_repair/stop_writes.btm'])
node2.byteman_submit(['./byteman/read_repair/sorted_live_endpoints.btm'])
coordinator = node2
# Stop reads on coordinator in order to make sure we do not go through
# the messaging service for the local reads
with StorageProxy(node2) as storage_proxy, stop_reads(coordinator):
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0
session = self.get_cql_connection(coordinator)
expected = [kcv(1, 0, 1), kcv(1, 1, 2)]
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == expected
assert storage_proxy.blocking_read_repair == 1
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0
@since('4.0')
def test_speculative_data_request(self):
""" If one node doesn't respond to a full data request, it should query the other """
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))
node2.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")
# re-enable writes
node2.byteman_submit(['-u', './byteman/read_repair/stop_writes.btm'])
node1.byteman_submit(['./byteman/read_repair/sorted_live_endpoints.btm'])
with StorageProxy(node1) as storage_proxy:
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0
session = self.get_cql_connection(node1)
node2.byteman_submit(['./byteman/read_repair/stop_data_reads.btm'])
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == [kcv(1, 0, 1), kcv(1, 1, 2)]
assert storage_proxy.blocking_read_repair == 1
assert storage_proxy.speculated_rr_read == 1
assert storage_proxy.speculated_rr_write == 0
@since('4.0')
def test_speculative_write(self):
""" if one node doesn't respond to a read repair mutation, it should be sent to the remaining node """
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))
node2.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")
# re-enable writes on node 3, leave them off on node2
node2.byteman_submit(['./byteman/read_repair/stop_rr_writes.btm'])
node1.byteman_submit(['./byteman/read_repair/sorted_live_endpoints.btm'])
with StorageProxy(node1) as storage_proxy:
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0
session = self.get_cql_connection(node1)
expected = [kcv(1, 0, 1), kcv(1, 1, 2)]
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == expected
assert storage_proxy.blocking_read_repair == 1
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 1
@since('4.0')
def test_quorum_requirement(self):
"""
Even if we speculate on every stage, we should still only require a quorum of responses for success
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))
node2.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")
# re-enable writes
node2.byteman_submit(['-u', './byteman/read_repair/stop_writes.btm'])
node3.byteman_submit(['-u', './byteman/read_repair/stop_writes.btm'])
# force endpoint order
node1.byteman_submit(['./byteman/read_repair/sorted_live_endpoints.btm'])
# node2.byteman_submit(['./byteman/read_repair/stop_digest_reads.btm'])
node2.byteman_submit(['./byteman/read_repair/stop_data_reads.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_rr_writes.btm'])
with StorageProxy(node1) as storage_proxy:
assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 0
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0
session = self.get_cql_connection(node1)
expected = [kcv(1, 0, 1), kcv(1, 1, 2)]
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == expected
assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 0
assert storage_proxy.blocking_read_repair == 1
assert storage_proxy.speculated_rr_read == 1
assert storage_proxy.speculated_rr_write == 1
@since('4.0')
def test_quorum_requirement_on_speculated_read(self):
"""
Even if we speculate on every stage, we should still only require a quorum of responses for success
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))
node2.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_writes.btm'])
session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")
# re-enable writes
node2.byteman_submit(['-u', './byteman/read_repair/stop_writes.btm'])
node3.byteman_submit(['-u', './byteman/read_repair/stop_writes.btm'])
# force endpoint order
node1.byteman_submit(['./byteman/read_repair/sorted_live_endpoints.btm'])
node2.byteman_submit(['./byteman/read_repair/stop_digest_reads.btm'])
node3.byteman_submit(['./byteman/read_repair/stop_data_reads.btm'])
node2.byteman_submit(['./byteman/read_repair/stop_rr_writes.btm'])
with StorageProxy(node1) as storage_proxy:
assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 0
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0
session = self.get_cql_connection(node1)
expected = [kcv(1, 0, 1), kcv(1, 1, 2)]
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == expected
assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 1
assert storage_proxy.blocking_read_repair == 1
assert storage_proxy.speculated_rr_read == 0 # there shouldn't be any replicas to speculate on
assert storage_proxy.speculated_rr_write == 1
@contextmanager
def _byteman_cycle(nodes, scripts):
script_path = lambda name: './byteman/read_repair/' + name + '.btm'
for script in scripts:
byteman_validate(nodes[0], script_path(script))
for node in nodes:
assert isinstance(node, Node)
for name in scripts:
print(node.name)
node.byteman_submit([script_path(name)])
yield
for node in nodes:
for name in scripts:
print(node.name)
node.byteman_submit(['-u', script_path(name)])
@contextmanager
def stop_writes(*nodes, kind='all'):
assert kind in ('all', 'normal', 'repair')
normal = 'stop_writes'
repair = 'stop_rr_writes'
with _byteman_cycle(nodes, {'normal': [normal], 'repair': [repair], 'all': [normal, repair]}[kind]):
yield
@contextmanager
def stop_reads(*nodes, kind='all'):
data = 'stop_data_reads'
digest = 'stop_digest_reads'
with _byteman_cycle(nodes, {'data': [data], 'digest': [digest], 'all': [data, digest]}[kind]):
yield
kcvv = lambda k, c, v1, v2: [k, c, v1, v2]
class TestReadRepairGuarantees(Tester):
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
cluster = fixture_dtest_setup.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
'dynamic_snitch': False,
'write_request_timeout_in_ms': 500,
'read_request_timeout_in_ms': 500})
cluster.populate(3, install_byteman=True, debug=True).start(wait_for_binary_proto=True,
jvm_args=['-XX:-PerfDisableSharedMem'])
session = fixture_dtest_setup.patient_exclusive_cql_connection(cluster.nodelist()[0], timeout=2)
session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}")
def get_cql_connection(self, node, **kwargs):
return self.patient_exclusive_cql_connection(node, retry_policy=None, **kwargs)
@since('4.0')
@pytest.mark.parametrize("repair_type,expect_monotonic",
(('blocking', True), ('none', False)),
ids=('blocking', 'none'))
def test_monotonic_reads(self, repair_type, expect_monotonic):
"""
tests how read repair provides, or breaks, read monotonicity
blocking read repair should maintain monotonic quorum reads, async and none should not
"""
assert repair_type in ('blocking', 'async', 'none')
node1, node2, node3 = self.cluster.nodelist()
session = self.get_cql_connection(node1, timeout=2)
ddl = "CREATE TABLE ks.tbl (k int, c int, v1 int, v2 int, primary key (k, c)) WITH read_repair = '" + repair_type + "';"
print (ddl)
session.execute(ddl)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v1, v2) VALUES (1, 0, 1, 1)"))
with stop_writes(node2, node3):
session.execute("INSERT INTO ks.tbl (k, c, v1, v2) VALUES (1, 0, 2, 2)")
with stop_reads(node3), stop_writes(node3):
if expect_monotonic:
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
else:
# if we don't expect monotonicity, read repair writes shouldn't block
with stop_writes(node2, kind='repair'):
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == [kcvv(1, 0, 2, 2)]
session = self.get_cql_connection(node3, timeout=2)
with stop_reads(node1):
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
if expect_monotonic:
assert listify(results) == [kcvv(1, 0, 2, 2)]
else:
assert listify(results) == [kcvv(1, 0, 1, 1)]
@since('4.0')
@pytest.mark.parametrize("repair_type,expect_atomic",
(('blocking', False), ('none', True)),
ids=('blocking', 'none'))
def test_atomic_writes(self, repair_type, expect_atomic):
"""
tests how read repair provides, or breaks, write atomicity
'none' read repair should maintain atomic writes, blocking and async should not
"""
assert repair_type in ('blocking', 'async', 'none')
node1, node2, node3 = self.cluster.nodelist()
session = self.get_cql_connection(node1, timeout=2)
ddl = "CREATE TABLE ks.tbl (k int, c int, v1 int, v2 int, primary key (k, c)) WITH read_repair = '" + repair_type + "';"
print (ddl)
session.execute(ddl)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v1, v2) VALUES (1, 0, 1, 1)"))
with stop_writes(node2, node3):
session.execute("INSERT INTO ks.tbl (k, c, v1, v2) VALUES (1, 0, 2, 2)")
with stop_reads(node3), stop_writes(node3):
results = session.execute(quorum("SELECT v1 FROM ks.tbl WHERE k=1"))
assert listify(results) == [[2]]
# make sure async read repair has a chance to write the repair value
if repair_type == 'async':
time.sleep(1)
session = self.get_cql_connection(node3, timeout=2)
with stop_reads(node1):
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
if expect_atomic:
assert listify(results) == [kcvv(1, 0, 1, 1)]
else:
assert listify(results) == [kcvv(1, 0, 2, 1)]
class NotRepairedException(Exception):
"""
Thrown to indicate that the data on a replica hasn't been doesn't match what we'd expect if a
specific read repair has run. See check_data_on_each_replica.
"""
pass
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from util import *
from pattern import graph
from pattern.graph import commonsense
#-------------------------------------------------------------------------
class TestUtilityFunctions(unittest.TestCase):
def setUp(self):
pass
def test_deepcopy(self):
# Object with a copy() method are responsible for deep-copying
# themselves.
class MyObject:
def __init__(self, i):
self.i = i
def copy(self):
return MyObject(graph.deepcopy(self.i))
# Assert deep copy for different types.
for o1 in (
None, True, False,
"a", u"a",
1, 1.0, 1, complex(1),
list([1]), tuple([1]), set([1]), frozenset([1]),
dict(a=1), {frozenset(["a"]): 1}, {MyObject(1): 1},
MyObject(1)):
o2 = graph.deepcopy(o1)
if isinstance(o2, (list, tuple, set, dict, MyObject)):
self.assertTrue(id(o1) != id(o2))
print("pattern.graph.deepcopy()")
def test_unique(self):
# Assert list copy with unique items.
v = graph.unique([1, 1, 1])
self.assertEqual(len(v), 1)
self.assertEqual(v[0], 1)
print("pattern.graph.unique()")
def test_coordinates(self):
# Assert 2D coordinates.
x, y = graph.coordinates(10, 10, 100, 30)
self.assertAlmostEqual(x, 96.60, places=2)
self.assertAlmostEqual(y, 60.00, places=2)
print("pattern.graph.coordinates()")
#-------------------------------------------------------------------------
class TestNode(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_node("a", radius=5, stroke=(
0, 0, 0, 1), strokewidth=1, fill=None, text=(0, 0, 0, 1))
self.g.add_node("b", radius=5)
self.g.add_node("c", radius=5)
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_node(self):
# Assert node properties.
n = self.g["a"]
self.assertTrue(isinstance(n, graph.Node))
self.assertTrue(n == self.g["a"])
self.assertTrue(n != self.g["b"])
self.assertTrue(n.graph == self.g)
self.assertTrue(n._distance == self.g.distance)
self.assertTrue(n.id == "a")
self.assertTrue(n.x == 0.0)
self.assertTrue(n.y == 0.0)
self.assertTrue(n.force.x == graph.Vector(0.0, 0.0).x)
self.assertTrue(n.force.y == graph.Vector(0.0, 0.0).y)
self.assertTrue(n.radius == 5)
self.assertTrue(n.fill == None)
self.assertTrue(n.stroke == (0, 0, 0, 1))
self.assertTrue(n.strokewidth == 1)
self.assertTrue(n.text.string == u"a")
self.assertTrue(n.text.width == 85)
self.assertTrue(n.text.fill == (0, 0, 0, 1))
self.assertTrue(n.text.fontsize == 11)
self.assertTrue(n.fixed == False)
self.assertTrue(n.weight == 0)
self.assertTrue(n.centrality == 0)
print("pattern.graph.Node")
def test_edge(self):
# Assert node edges.
n1 = self.g["a"]
n2 = self.g["b"]
self.assertTrue(n1.edges[0].node1.id == "a")
self.assertTrue(n1.edges[0].node2.id == "b")
self.assertTrue(n1.links[0].id == "b")
self.assertTrue(n1.links[0] == self.g.edges[0].node2)
self.assertTrue(n1.links.edge("b") == self.g.edges[0])
self.assertTrue(n1.links.edge(n2) == self.g.edges[0])
print("pattern.graph.Node.links")
print("pattern.graph.Node.edges")
def test_flatten(self):
# Assert node spreading activation.
n = self.g["a"]
self.assertTrue(set(n.flatten(depth=0)) == set([n]))
self.assertTrue(set(n.flatten(depth=1)) == set([n, n.links[0]]))
self.assertTrue(set(n.flatten(depth=2)) == set(self.g.nodes))
print("pattern.graph.Node.flatten()")
def test_text(self):
n = self.g.add_node("d", text=None)
self.assertTrue(n.text == None)
print("pattern.graph.Node.text")
#-------------------------------------------------------------------------
class TestEdge(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_node("a")
self.g.add_node("b")
self.g.add_edge("a", "b", weight=0.0, length=1.0,
type="is-a", stroke=(0, 0, 0, 1), strokewidth=1)
def test_edge(self):
# Assert edge properties.
e = self.g.edges[0]
self.assertTrue(isinstance(e, graph.Edge))
self.assertTrue(e.node1 == self.g["a"])
self.assertTrue(e.node2 == self.g["b"])
self.assertTrue(e.weight == 0.0)
self.assertTrue(e.length == 1.0)
self.assertTrue(e.type == "is-a")
self.assertTrue(e.stroke == (0, 0, 0, 1))
self.assertTrue(e.strokewidth == 1)
print("pattern.graph.Edge")
#-------------------------------------------------------------------------
class TestGraph(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph(layout=graph.SPRING, distance=10.0)
self.g.add_node("a")
self.g.add_node("b")
self.g.add_node("c")
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_graph(self):
# Assert graph properties.
g = self.g.copy()
self.assertTrue(len(g.nodes) == 3)
self.assertTrue(len(g.edges) == 2)
self.assertTrue(g.distance == 10.0)
self.assertTrue(g.density == 2 / 3.0)
self.assertTrue(g.is_complete == False)
self.assertTrue(g.is_sparse == False)
self.assertTrue(g.is_dense == True)
self.assertTrue(g._adjacency == None)
self.assertTrue(isinstance(g.layout, graph.GraphLayout))
self.assertTrue(isinstance(g.layout, graph.GraphSpringLayout))
print("pattern.graph.Graph")
def test_graph_nodes(self):
# Assert graph nodes.
g = self.g.copy()
g.append(graph.Node, "d")
g.add_node("e", base=graph.Node, root=True)
self.assertTrue("d" in g)
self.assertTrue("e" in g)
self.assertTrue(g.root == g["e"])
self.assertTrue(g["e"] == g.node("e") == g.nodes[-1])
g.remove(g["d"])
g.remove(g["e"])
self.assertTrue("d" not in g)
self.assertTrue("e" not in g)
print("pattern.graph.Graph.add_node()")
def test_graph_edges(self):
# Assert graph edges.
g = self.g.copy()
v1 = g.add_edge("d", "e") # Automatically create Node(d) and Node(e).
v2 = g.add_edge("d", "e") # Yields existing edge.
v3 = g.add_edge("e", "d") # Opposite direction.
self.assertEqual(v1, v2)
self.assertEqual(v2, g.edge("d", "e"))
self.assertEqual(v3, g.edge("e", "d"))
self.assertEqual(g["d"].links.edge(g["e"]), v2)
self.assertEqual(g["e"].links.edge(g["d"]), v3)
g.remove(g["d"])
g.remove(g["e"])
# Edges d->e and e->d should now be removed automatically.
self.assertEqual(len(g.edges), 2)
print("pattern.graph.Graph.add_edge()")
def test_cache(self):
# Assert adjacency cache is flushed when nodes, edges or direction
# changes.
g = self.g.copy()
g.eigenvector_centrality()
self.assertEqual(g._adjacency[0]["a"], {})
self.assertEqual(g._adjacency[0]["b"]["a"], 1.0)
g.add_node("d")
g.add_node("e")
self.assertEqual(g._adjacency, None)
g.betweenness_centrality()
self.assertEqual(g._adjacency[0]["a"]["b"], 1.0)
self.assertEqual(g._adjacency[0]["b"]["a"], 1.0)
g.add_edge("d", "e", weight=0.0)
g.remove(g.node("d"))
g.remove(g.node("e"))
print("pattern.graph.Graph._adjacency")
def test_paths(self):
# Assert node paths.
g = self.g.copy()
self.assertEqual(g.paths("a", "c"), g.paths(g["a"], g["c"]))
self.assertEqual(g.paths("a", "c"), [[g["a"], g["b"], g["c"]]])
self.assertEqual(g.paths("a", "c", length=2), [])
# Assert node shortest paths.
g.add_edge("a", "c")
self.assertEqual(g.paths("a", "c", length=2), [[g["a"], g["c"]]])
self.assertEqual(g.shortest_path("a", "c"), [g["a"], g["c"]])
self.assertEqual(g.shortest_path("c", "a"), [g["c"], g["a"]])
self.assertEqual(g.shortest_path("c", "a", directed=True), None)
g.remove(g.edge("a", "c"))
g.add_node("d")
self.assertEqual(g.shortest_path("a", "d"), None)
self.assertEqual(g.shortest_paths("a")["b"], [g["a"], g["b"]])
self.assertEqual(g.shortest_paths("a")["c"], [g["a"], g["b"], g["c"]])
self.assertEqual(g.shortest_paths("a")["d"], None)
self.assertEqual(g.shortest_paths("c", directed=True)["a"], None)
g.remove(g["d"])
print("pattern.graph.Graph.paths()")
print("pattern.graph.Graph.shortest_path()")
print("pattern.graph.Graph.shortest_paths()")
def test_eigenvector_centrality(self):
# Assert eigenvector centrality.
self.assertEqual(self.g["a"]._weight, None)
v = self.g.eigenvector_centrality()
self.assertTrue(isinstance(v["a"], float))
self.assertTrue(v["a"] == v[self.g.node("a")])
self.assertTrue(v["a"] < v["c"])
self.assertTrue(v["b"] < v["c"])
print("pattern.graph.Graph.eigenvector_centrality()")
def test_betweenness_centrality(self):
# Assert betweenness centrality.
self.assertEqual(self.g["a"]._centrality, None)
v = self.g.betweenness_centrality()
self.assertTrue(isinstance(v["a"], float))
self.assertTrue(v["a"] == v[self.g.node("a")])
self.assertTrue(v["a"] < v["b"])
self.assertTrue(v["c"] < v["b"])
print("pattern.graph.Graph.betweenness_centrality()")
def test_sorted(self):
# Assert graph node sorting
o1 = self.g.sorted(order=graph.WEIGHT, threshold=0.0)
o2 = self.g.sorted(order=graph.CENTRALITY, threshold=0.0)
self.assertEqual(o1[0], self.g["c"])
self.assertEqual(o2[0], self.g["b"])
print("pattern.graph.Graph.sorted()")
def test_prune(self):
# Assert leaf pruning.
g = self.g.copy()
g.prune(1)
self.assertEqual(len(g), 1)
self.assertEqual(g.nodes, [g["b"]])
print("pattern.graph.Graph.prune()")
def test_fringe(self):
# Assert leaf fetching.
g = self.g.copy()
self.assertEqual(g.fringe(0), [g["a"], g["c"]])
# FIXME the ordering is variable in python3
self.assertEqual(set(g.fringe(1)), set([g["a"], g["b"], g["c"]]))
print("pattern.graph.Graph.fringe()")
def test_split(self):
# Asset subgraph splitting.
self.assertTrue(isinstance(self.g.split(), list))
self.assertTrue(isinstance(self.g.split()[0], graph.Graph))
print("pattern.graph.Graph.split()")
def test_update(self):
# Assert node position after updating layout algorithm.
self.g.update()
for n in self.g.nodes:
self.assertTrue(n.x != 0)
self.assertTrue(n.y != 0)
self.g.layout.reset()
for n in self.g.nodes:
self.assertTrue(n.x == 0)
self.assertTrue(n.y == 0)
print("pattern.graph.Graph.update()")
def test_copy(self):
# Assert deep copy of Graph.
g1 = self.g
g2 = self.g.copy()
self.assertTrue(set(g1) == set(g2)) # Same node id's.
self.assertTrue(id(g1["a"]) != id(g2["b"])) # Different node objects.
g3 = self.g.copy(nodes=[self.g["a"], self.g["b"]])
g3 = self.g.copy(nodes=["a", "b"])
self.assertTrue(len(g3.nodes), 2)
self.assertTrue(len(g3.edges), 1)
# Assert copy with subclasses of Node and Edge.
class MyNode(graph.Node):
pass
class MyEdge(graph.Edge):
pass
g4 = graph.Graph()
g4.append(MyNode, "a")
g4.append(MyNode, "b")
g4.append(MyEdge, "a", "b")
g4 = g4.copy()
self.assertTrue(isinstance(g4.nodes[0], MyNode))
self.assertTrue(isinstance(g4.edges[0], MyEdge))
print("pattern.graph.Graph.copy()")
#-------------------------------------------------------------------------
class TestGraphLayout(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph(layout=graph.SPRING, distance=10.0)
self.g.add_node("a")
self.g.add_node("b")
self.g.add_node("c")
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_layout(self):
# Assert GraphLayout properties.
gl = graph.GraphLayout(graph=self.g)
self.assertTrue(gl.graph == self.g)
self.assertTrue(gl.bounds == (0, 0, 0, 0))
self.assertTrue(gl.iterations == 0)
gl.update()
self.assertTrue(gl.iterations == 1)
print("pattern.graph.GraphLayout")
class TestGraphSpringLayout(TestGraphLayout):
def test_layout(self):
# Assert GraphSpringLayout properties.
gl = self.g.layout
self.assertTrue(gl.graph == self.g)
self.assertTrue(gl.k == 4.0)
self.assertTrue(gl.force == 0.01)
self.assertTrue(gl.repulsion == 50)
self.assertTrue(gl.bounds == (0, 0, 0, 0))
self.assertTrue(gl.iterations == 0)
gl.update()
self.assertTrue(gl.iterations == 1)
self.assertTrue(gl.bounds[0] < 0)
self.assertTrue(gl.bounds[1] < 0)
self.assertTrue(gl.bounds[2] > 0)
self.assertTrue(gl.bounds[3] > 0)
print("pattern.graph.GraphSpringLayout")
def test_distance(self):
# Assert 2D distance.
n1 = graph.Node()
n2 = graph.Node()
n1.x = -100
n2.x = +100
d = self.g.layout._distance(n1, n2)
self.assertEqual(d, (200.0, 0.0, 200.0, 40000.0))
print("pattern.graph.GraphSpringLayout._distance")
def test_repulsion(self):
# Assert repulsive node force.
gl = self.g.layout
d1 = gl._distance(self.g["a"], self.g["c"])[2]
gl.update()
d2 = gl._distance(self.g["a"], self.g["c"])[2]
self.assertTrue(d2 > d1)
self.g.layout.reset()
print("pattern.graph.GraphSpringLayout._repulse()")
def test_attraction(self):
# Assert attractive edge force.
gl = self.g.layout
self.g["a"].x = -100
self.g["b"].y = +100
d1 = gl._distance(self.g["a"], self.g["b"])[2]
gl.update()
d2 = gl._distance(self.g["a"], self.g["b"])[2]
self.assertTrue(d2 < d1)
print("pattern.graph.GraphSpringLayout._attract()")
#-------------------------------------------------------------------------
class TestGraphTraversal(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_edge("a", "b", weight=0.5)
self.g.add_edge("a", "c")
self.g.add_edge("b", "d")
self.g.add_edge("d", "e")
self.g.add_node("x")
def test_search(self):
# Assert depth-first vs. breadth-first search.
def visit(node):
a.append(node)
def traversable(node, edge):
if edge.node2.id == "e":
return False
g = self.g
a = []
graph.depth_first_search(g["a"], visit, traversable)
self.assertEqual(a, [g["a"], g["b"], g["d"], g["c"]])
a = []
graph.breadth_first_search(g["a"], visit, traversable)
self.assertEqual(a, [g["a"], g["b"], g["c"], g["d"]])
print("pattern.graph.depth_first_search()")
print("pattern.graph.breadth_first_search()")
def test_paths(self):
# Assert depth-first all paths.
g = self.g.copy()
g.add_edge("a", "d")
for id1, id2, length, path in (
("a", "a", 1, [["a"]]),
("a", "d", 3, [["a", "d"], ["a", "b", "d"]]),
("a", "d", 2, [["a", "d"]]),
("a", "d", 1, []),
("a", "x", 1, [])):
p = graph.paths(g, id1, id2, length)
self.assertEqual(p, path)
print("pattern.graph.paths()")
def test_edges(self):
# Assert path of nodes to edges.
g = self.g
p = [g["a"], g["b"], g["d"], g["x"]]
e = list(graph.edges(p))
self.assertEqual(e, [g.edge("a", "b"), g.edge("b", "d"), None])
print("pattern.graph.edges()")
def test_adjacency(self):
# Assert adjacency map with different settings.
a = [
graph.adjacency(self.g),
graph.adjacency(self.g, directed=True),
graph.adjacency(self.g, directed=True, reversed=True),
graph.adjacency(self.g, stochastic=True),
graph.adjacency(self.g, heuristic=lambda id1, id2: 0.1),
]
for i in range(len(a)):
a[i] = sorted((id1, sorted((id2, round(w, 2))
for id2, w in p.items())) for id1, p in a[i].items())
self.assertEqual(a[0], [
("a", [("b", 0.75), ("c", 1.0)]),
("b", [("a", 0.75), ("d", 1.0)]),
("c", [("a", 1.0)]),
("d", [("b", 1.0), ("e", 1.0)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[1], [
("a", [("b", 0.75), ("c", 1.0)]),
("b", [("d", 1.0)]),
("c", []),
("d", [("e", 1.0)]),
("e", []),
("x", [])])
self.assertEqual(a[2], [
("a", []),
("b", [("a", 0.75)]),
("c", [("a", 1.0)]),
("d", [("b", 1.0)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[3], [
("a", [("b", 0.43), ("c", 0.57)]),
("b", [("a", 0.43), ("d", 0.57)]),
("c", [("a", 1.0)]),
("d", [("b", 0.5), ("e", 0.5)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[4], [
("a", [("b", 0.85), ("c", 1.1)]),
("b", [("a", 0.85), ("d", 1.1)]),
("c", [("a", 1.1)]),
("d", [("b", 1.1), ("e", 1.1)]),
("e", [("d", 1.1)]),
("x", [])])
print("pattern.graph.adjacency()")
def test_dijkstra_shortest_path(self):
# Assert Dijkstra's algorithm (node1 -> node2).
g = self.g.copy()
g.add_edge("d", "a")
for id1, id2, heuristic, directed, path in (
("a", "d", None, False, ["a", "d"]),
("a", "d", None, True, ["a", "b", "d"]),
("a", "d", lambda id1, id2: id1 == "d" and id2 == "a" and 1 or 0, False, ["a", "b", "d"])):
p = graph.dijkstra_shortest_path(g, id1, id2, heuristic, directed)
self.assertEqual(p, path)
print("pattern.graph.dijkstra_shortest_path()")
def test_dijkstra_shortest_paths(self):
# Assert Dijkstra's algorithm (node1 -> all).
g = self.g.copy()
g.add_edge("d", "a")
a = [
graph.dijkstra_shortest_paths(g, "a"),
graph.dijkstra_shortest_paths(g, "a", directed=True),
graph.dijkstra_shortest_paths(
g, "a", heuristic=lambda id1, id2: id1 == "d" and id2 == "a" and 1 or 0)
]
for i in range(len(a)):
a[i] = sorted(a[i].items())
self.assertEqual(a[0], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "d"]),
("e", ["a", "d", "e"]),
("x", None)])
self.assertEqual(a[1], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "b", "d"]),
("e", ["a", "b", "d", "e"]),
("x", None)])
self.assertEqual(a[2], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "b", "d"]),
("e", ["a", "b", "d", "e"]),
("x", None)])
print("pattern.graph.dijkstra_shortest_paths()")
def test_floyd_warshall_all_pairs_distance(self):
# Assert all pairs path distance.
p1 = graph.floyd_warshall_all_pairs_distance(self.g)
p2 = sorted((id1, sorted((id2, round(w, 2))
for id2, w in p.items())) for id1, p in p1.items())
self.assertEqual(p2, [
("a", [("a", 0.00), ("b", 0.75),
("c", 1.00), ("d", 1.75), ("e", 2.75)]),
("b", [("a", 0.75), ("b", 0.00),
("c", 1.75), ("d", 1.00), ("e", 2.00)]),
("c", [("a", 1.00), ("b", 1.75),
("c", 2.00), ("d", 2.75), ("e", 3.75)]),
("d", [("a", 1.75), ("b", 1.00),
("c", 2.75), ("d", 0.00), ("e", 1.00)]),
("e", [("a", 2.75), ("b", 2.00),
("c", 3.75), ("d", 1.00), ("e", 2.00)]),
("x", [])])
# Assert predecessor tree.
self.assertEqual(
graph.predecessor_path(p1.predecessors, "a", "d"), ["a", "b", "d"])
print("pattern.graph.floyd_warshall_all_pairs_distance()")
#-------------------------------------------------------------------------
class TestGraphPartitioning(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_edge("a", "b", weight=0.5)
self.g.add_edge("a", "c")
self.g.add_edge("b", "d")
self.g.add_edge("d", "e")
self.g.add_edge("x", "y")
self.g.add_node("z")
def test_union(self):
self.assertEqual(graph.union([1, 2], [2, 3]), [1, 2, 3])
def test_intersection(self):
self.assertEqual(graph.intersection([1, 2], [2, 3]), [2])
def test_difference(self):
self.assertEqual(graph.difference([1, 2], [2, 3]), [1])
def test_partition(self):
# Assert unconnected subgraph partitioning.
g = graph.partition(self.g)
self.assertTrue(len(g) == 3)
self.assertTrue(isinstance(g[0], graph.Graph))
self.assertTrue(sorted(g[0].keys()), ["a", "b", "c", "d", "e"])
self.assertTrue(sorted(g[1].keys()), ["x", "y"])
self.assertTrue(sorted(g[2].keys()), ["z"])
print("pattern.graph.partition()")
def test_clique(self):
# Assert node cliques.
v = graph.clique(self.g, "a")
self.assertEqual(v, ["a", "b"])
self.g.add_edge("b", "c")
v = graph.clique(self.g, "a")
self.assertEqual(v, ["a", "b", "c"])
v = graph.cliques(self.g, 2)
self.assertEqual(
v, [["a", "b", "c"], ["b", "d"], ["d", "e"], ["x", "y"]])
print("pattern.graph.clique()")
print("pattern.graph.cliques()")
#-------------------------------------------------------------------------
class TestGraphMaintenance(unittest.TestCase):
def setUp(self):
pass
def test_unlink(self):
# Assert remove all edges to/from Node(a).
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("a", "c")
graph.unlink(g, g["a"])
self.assertTrue(len(g.edges) == 0)
# Assert remove edges between Node(a) and Node(b)
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("a", "c")
graph.unlink(g, g["a"], "b")
self.assertTrue(len(g.edges) == 1)
print("pattern.graph.unlink()")
def test_redirect(self):
# Assert transfer connections of Node(a) to Node(d).
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("c", "a")
g.add_node("d")
graph.redirect(g, g["a"], "d")
self.assertTrue(len(g["a"].edges) == 0)
self.assertTrue(len(g["d"].edges) == 2)
self.assertTrue(g.edge("d", "c").node1 == g["c"])
print("pattern.graph.redirect()")
def test_cut(self):
# Assert unlink Node(b) and redirect a->c and a->d.
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("b", "c")
g.add_edge("b", "d")
graph.cut(g, g["b"])
self.assertTrue(len(g["b"].edges) == 0)
self.assertTrue(g.edge("a", "c") is not None)
self.assertTrue(g.edge("a", "d") is not None)
print("pattern.graph.cut()")
def test_insert(self):
g = graph.Graph()
g.add_edge("a", "b")
g.add_node("c")
graph.insert(g, g["c"], g["a"], g["b"])
self.assertTrue(g.edge("a", "b") is None)
self.assertTrue(g.edge("a", "c") is not None)
self.assertTrue(g.edge("c", "b") is not None)
print("pattern.graph.insert()")
#-------------------------------------------------------------------------
class TestGraphCommonsense(unittest.TestCase):
def setUp(self):
pass
def test_halo(self):
# Assert concept halo (e.g., latent related concepts).
g = commonsense.Commonsense()
v = [concept.id for concept in g["rose"].halo]
self.assertTrue("red" in v)
self.assertTrue("romance" in v)
# Concept.properties is the list of properties (adjectives) in the
# halo.
v = g["rose"].properties
self.assertTrue("red" in v)
self.assertTrue("romance" not in v)
print("pattern.graph.commonsense.Concept.halo")
print("pattern.graph.commonsense.Concept.properties")
def test_field(self):
# Assert semantic field (e.g., concept taxonomy).
g = commonsense.Commonsense()
v = [concept.id for concept in g.field("color")]
self.assertTrue("red" in v)
self.assertTrue("green" in v)
self.assertTrue("blue" in v)
print("pattern.graph.commonsense.Commonsense.field()")
def test_similarity(self):
# Assert that tiger is more similar to lion than to spoon
# (which is common sense).
g = commonsense.Commonsense()
w1 = g.similarity("tiger", "lion")
w2 = g.similarity("tiger", "spoon")
self.assertTrue(w1 > w2)
print("pattern.graph.commonsense.Commonsense.similarity()")
#-------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.interactive.interactive_context."""
import builtins
import os
import shutil
import tempfile
import textwrap
from typing import Any, Dict, List
from unittest import mock
import jinja2
import nbformat
import tensorflow as tf
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.experimental.interactive import interactive_context
from tfx.orchestration.experimental.interactive import standard_visualizations
from tfx.orchestration.launcher.in_process_component_launcher import InProcessComponentLauncher
from tfx.types import component_spec
from tfx.types import standard_artifacts
from tfx.utils import telemetry_utils
class InteractiveContextTest(tf.test.TestCase):
def setUp(self):
super().setUp()
builtins.__dict__['__IPYTHON__'] = True
self._tmpdir = None
def tearDown(self):
if self._tmpdir:
shutil.rmtree(self._tmpdir, ignore_errors=True)
super().tearDown()
def _setupTestNotebook(self, notebook_name='test_notebook.ipynb'):
notebook = nbformat.v4.new_notebook(
cells=[
nbformat.v4.new_markdown_cell(source='A markdown cell.'),
nbformat.v4.new_code_cell(source='foo = 1'),
nbformat.v4.new_markdown_cell(source='Another markdown cell.'),
nbformat.v4.new_code_cell(source=textwrap.dedent('''\
%%skip_for_export
!pip install something
!ls
x = 1
y = 2
print('this cell should not be exported')''')),
nbformat.v4.new_code_cell(source=textwrap.dedent('''\
def bar():
%some_line_magic print('this line should not be exported')
a = "hello"
b = "world"
return a + b''')),
nbformat.v4.new_code_cell(source=textwrap.dedent('''\
def baz():
c = "nyan"
d = "cat"
return c + d''')),
]
)
self._tmpdir = tempfile.mkdtemp()
self._exportdir = tempfile.mkdtemp()
self._notebook_fp = os.path.join(self._tmpdir, notebook_name)
nbformat.write(notebook, self._notebook_fp)
def testRequiresIPythonExecutes(self):
self.foo_called = False
def foo():
self.foo_called = True
interactive_context.requires_ipython(foo)()
self.assertTrue(self.foo_called)
def testRequiresIPythonNoOp(self):
del builtins.__dict__['__IPYTHON__']
self.foo_called = False
def foo():
self.foo_called = True
interactive_context.requires_ipython(foo)()
self.assertFalse(self.foo_called)
def testBasicRun(self):
class _FakeComponentSpec(types.ComponentSpec):
PARAMETERS = {}
INPUTS = {}
OUTPUTS = {}
class _FakeExecutor(base_executor.BaseExecutor):
CALLED = False
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
_FakeExecutor.CALLED = True
class _FakeComponent(base_component.BaseComponent):
SPEC_CLASS = _FakeComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(_FakeExecutor)
def __init__(self, spec: types.ComponentSpec):
super().__init__(spec=spec)
c = interactive_context.InteractiveContext()
component = _FakeComponent(_FakeComponentSpec())
c.run(component)
self.assertTrue(_FakeExecutor.CALLED)
def testRunMethodRequiresIPython(self):
del builtins.__dict__['__IPYTHON__']
c = interactive_context.InteractiveContext()
self.assertIsNone(c.run(None))
def testUnresolvedChannel(self):
class _FakeComponentSpec(types.ComponentSpec):
PARAMETERS = {}
INPUTS = {
'input':
component_spec.ChannelParameter(type=standard_artifacts.Examples)
}
OUTPUTS = {}
class _FakeExecutor(base_executor.BaseExecutor):
CALLED = False
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
_FakeExecutor.CALLED = True
class _FakeComponent(base_component.BaseComponent):
SPEC_CLASS = _FakeComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(_FakeExecutor)
def __init__(self, spec: types.ComponentSpec):
super().__init__(spec=spec)
c = interactive_context.InteractiveContext()
foo = types.Channel(type=standard_artifacts.Examples).set_artifacts(
[standard_artifacts.Examples()])
component = _FakeComponent(_FakeComponentSpec(input=foo))
with self.assertRaisesRegex(ValueError, 'Unresolved input channel'):
c.run(component)
@mock.patch.object(jinja2.Environment, 'get_template',
return_value=jinja2.Template('{{ notebook_content }}'))
def testExportToPipeline(self, mock_get_template):
self._setupTestNotebook()
c = interactive_context.InteractiveContext()
export_filepath = os.path.join(self._exportdir, 'exported_pipeline.py')
c.export_to_pipeline(notebook_filepath=self._notebook_fp,
export_filepath=export_filepath,
runner_type='beam')
with open(export_filepath, 'r') as exported_pipeline:
code = exported_pipeline.read()
self.assertEqual(code, textwrap.dedent('''\
foo = 1
def bar():
a = "hello"
b = "world"
return a + b
def baz():
c = "nyan"
d = "cat"
return c + d'''))
def testExportToPipelineRaisesErrorInvalidRunnerType(self):
self._setupTestNotebook()
c = interactive_context.InteractiveContext()
export_filepath = os.path.join(self._exportdir, 'exported_pipeline.py')
with self.assertRaisesRegex(ValueError, 'runner_type'):
c.export_to_pipeline(notebook_filepath=self._notebook_fp,
export_filepath=export_filepath,
runner_type='foobar')
@mock.patch('tfx.orchestration.experimental.interactive.'
'standard_visualizations.ExampleAnomaliesVisualization.display')
def testShow(self, *unused_mocks):
context = interactive_context.InteractiveContext()
mock_object = mock.MagicMock()
standard_visualizations.ExampleAnomaliesVisualization.display = mock_object
mock_object.assert_not_called()
artifact = standard_artifacts.ExampleAnomalies()
context.show(
types.Channel(type=standard_artifacts.ExampleAnomalies).set_artifacts(
[artifact]))
mock_object.assert_called_with(artifact)
@mock.patch('tfx.orchestration.launcher.in_process_component_launcher.'
'InProcessComponentLauncher.create')
def testTelemetry(self, mock_launcher_create):
class _FakeLauncher:
def __init__(self):
self.recorded_labels = []
def launch(self):
self.recorded_labels = telemetry_utils.make_beam_labels_args()
return mock.MagicMock()
class _FakeComponentSpec(types.ComponentSpec):
PARAMETERS = {}
INPUTS = {}
OUTPUTS = {}
class _FakeExecutor(base_executor.BaseExecutor):
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
pass
class _FakeComponent(base_component.BaseComponent):
SPEC_CLASS = _FakeComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(_FakeExecutor)
def __init__(self):
super().__init__(spec=_FakeComponentSpec())
# Set up fake on launcher.
fake_launcher = _FakeLauncher()
mock_launcher_create.side_effect = [fake_launcher]
InProcessComponentLauncher.create = mock_launcher_create
context = interactive_context.InteractiveContext()
context.run(_FakeComponent())
self.assertIn('--labels tfx_runner=interactivecontext',
' '.join(fake_launcher.recorded_labels))
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.http import urlencode
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from horizon.workflows import views
INDEX_URL = reverse('horizon:admin:access_and_security:index')
NAMESPACE = "horizon:admin:access_and_security:floating_ips"
class FloatingIpViewTests(test.TestCase):
@test.create_stubs({api.network: ('floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate(self):
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
url = reverse('%s:associate' % NAMESPACE)
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
@test.create_stubs({api.network: ('floating_ip_target_list',
'floating_ip_target_get_by_instance',
'tenant_floating_ip_list',)})
def test_associate_with_instance_id(self):
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_target_get_by_instance(
IsA(http.HttpRequest), 'TEST-ID', self.servers.list()) \
.AndReturn('TEST-ID')
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
base_url = reverse('%s:associate' % NAMESPACE)
params = urlencode({'instance_id': 'TEST-ID'})
url = '?'.join([base_url, params])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
@test.create_stubs({api.network: ('floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_with_port_id(self):
targets = [api.nova.FloatingIpTarget(s) for s in self.servers.list()]
targets[0].port_id = '101'
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(targets)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
base_url = reverse('%s:associate' % NAMESPACE)
params = urlencode({'port_id': '101'})
url = '?'.join([base_url, params])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post_with_redirect(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
next = reverse("horizon:admin:instances:index")
form_data = {'instance_id': server.id,
'next': next,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, next)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post_with_exception(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',),
api.network: ('floating_ip_disassociate',
'floating_ip_supported',
'tenant_floating_ip_get',
'tenant_floating_ip_list',),
api.neutron: ('is_extension_supported',)})
def test_disassociate_post(self):
floating_ip = self.floating_ips.first()
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',),
api.network: ('floating_ip_disassociate',
'floating_ip_supported',
'tenant_floating_ip_get',
'tenant_floating_ip_list',),
api.neutron: ('is_extension_supported',)})
def test_disassociate_post_with_exception(self):
floating_ip = self.floating_ips.first()
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list',
'floating_ip_pools_list',),
api.nova: ('keypair_list',
'server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def test_allocate_button_attributes(self):
keypairs = self.keypairs.list()
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
quota_data = self.quota_usages.first()
quota_data['floating_ips']['available'] = 10
sec_groups = self.security_groups.list()
api.network.floating_ip_supported(
IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.security_group_list(
IsA(http.HttpRequest)).MultipleTimes()\
.AndReturn(sec_groups)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.nova.keypair_list(
IsA(http.HttpRequest)) \
.AndReturn(keypairs)
api.nova.server_list(
IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'network').MultipleTimes() \
.AndReturn(True)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'ec2').MultipleTimes() \
.AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL +
"?tab=access_security_tabs__floating_ips_tab")
allocate_action = self.getAndAssertTableAction(res, 'floating_ips',
'allocate')
self.assertEqual(set(['ajax-modal']), set(allocate_action.classes))
self.assertEqual('Allocate IP To Project',
six.text_type(allocate_action.verbose_name))
self.assertIsNone(allocate_action.policy_rules)
url = 'horizon:admin:access_and_security:floating_ips:allocate'
self.assertEqual(url, allocate_action.url)
@test.create_stubs({api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list',
'floating_ip_pools_list',),
api.nova: ('keypair_list',
'server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def test_allocate_button_disabled_when_quota_exceeded(self):
keypairs = self.keypairs.list()
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
quota_data = self.quota_usages.first()
quota_data['floating_ips']['available'] = 0
sec_groups = self.security_groups.list()
api.network.floating_ip_supported(
IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.security_group_list(
IsA(http.HttpRequest)).MultipleTimes()\
.AndReturn(sec_groups)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.nova.keypair_list(
IsA(http.HttpRequest)) \
.AndReturn(keypairs)
api.nova.server_list(
IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'network').MultipleTimes() \
.AndReturn(True)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'ec2').MultipleTimes() \
.AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL +
"?tab=access_security_tabs__floating_ips_tab")
allocate_action = self.getAndAssertTableAction(res, 'floating_ips',
'allocate')
self.assertTrue('disabled' in allocate_action.classes,
'The create button should be disabled')
self.assertEqual('Allocate IP To Project (Quota exceeded)',
six.text_type(allocate_action.verbose_name))
class FloatingIpNeutronViewTests(FloatingIpViewTests):
def setUp(self):
super(FloatingIpViewTests, self).setUp()
self._floating_ips_orig = self.floating_ips
self.floating_ips = self.floating_ips_uuid
def tearDown(self):
self.floating_ips = self._floating_ips_orig
super(FloatingIpViewTests, self).tearDown()
@test.create_stubs({api.nova: ('tenant_quota_get', 'flavor_list',
'server_list'),
api.network: ('floating_ip_pools_list',
'floating_ip_supported',
'security_group_list',
'tenant_floating_ip_list'),
api.neutron: ('is_extension_supported',
'tenant_quota_get',
'network_list',
'router_list',
'subnet_list'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_correct_quotas_displayed(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'compute') \
.MultipleTimes().AndReturn(True)
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
api.neutron.router_list(IsA(http.HttpRequest)) \
.AndReturn(self.routers.list())
api.neutron.subnet_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(self.subnets.list())
api.neutron.subnet_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(list())
api.neutron.network_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(self.networks.list())
api.neutron.network_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(list())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.floating_ips.list())
api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('%s:allocate' % NAMESPACE)
res = self.client.get(url)
self.assertEqual(res.context['usages']['floating_ips']['quota'],
self.neutron_quotas.first().get('floatingip').limit)
@test.create_stubs({api.nova: ('tenant_quota_get', 'flavor_list',
'server_list'),
api.network: ('floating_ip_pools_list',
'floating_ip_supported',
'security_group_list',
'tenant_floating_ip_list'),
api.neutron: ('is_extension_supported',
'tenant_quota_get',
'network_list',
'router_list',
'subnet_list'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_correct_quotas_displayed_shared_networks(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'compute') \
.MultipleTimes().AndReturn(True)
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
api.neutron.router_list(IsA(http.HttpRequest)) \
.AndReturn(self.routers.list())
api.neutron.subnet_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(list())
api.neutron.subnet_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(self.subnets.list())
api.neutron.network_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(list())
api.neutron.network_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(self.networks.list())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.floating_ips.list())
api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('%s:allocate' % NAMESPACE)
res = self.client.get(url)
self.assertEqual(res.context['usages']['floating_ips']['quota'],
self.neutron_quotas.first().get('floatingip').limit)
|
|
VERSION_ANY = 0
CRC_POLY = 0xEDB88320
MFS_ROOT_ID = 1
MFS_NAME_MAX = 255
MFS_MAX_FILE_SIZE = 0x20000000000
# 1.6.21
VERSION = 0x010615
CHUNKSIZE = 1<<26
GETDIR_FLAG_WITHATTR = 0x01
GETDIR_FLAG_ADDTOCACHE = 0x02
GETDIR_FLAG_DIRCACHE = 0x04
#type for readdir command
TYPE_FILE = 'f'
TYPE_SYMLINK = 'l'
TYPE_DIRECTORY = 'd'
TYPE_FIFO = 'q'
TYPE_BLOCKDEV = 'b'
TYPE_CHARDEV = 'c'
TYPE_SOCKET = 's'
TYPE_TRASH = 't'
TYPE_RESERVED = 'r'
TYPE_UNKNOWN = '?'
# status code
STATUS_OK = 0 # OK
ERROR_EPERM = 1 # Operation not permitted
ERROR_ENOTDIR = 2 # Not a directory
ERROR_ENOENT = 3 # No such file or directory
ERROR_EACCES = 4 # Permission denied
ERROR_EEXIST = 5 # File exists
ERROR_EINVAL = 6 # Invalid argument
ERROR_ENOTEMPTY = 7 # Directory not empty
ERROR_CHUNKLOST = 8 # Chunk lost
ERROR_OUTOFMEMORY = 9 # Out of memory
ERROR_INDEXTOOBIG = 10 # Index too big
ERROR_LOCKED = 11 # Chunk locked
ERROR_NOCHUNKSERVERS = 12 # No chunk servers
ERROR_NOCHUNK = 13 # No such chunk
ERROR_CHUNKBUSY = 14 # Chunk is busy
ERROR_REGISTER = 15 # Incorrect register BLOB
ERROR_NOTDONE = 16 # None of chunk servers performed requested operation
ERROR_NOTOPENED = 17 # File not opened
ERROR_NOTSTARTED = 18 # Write not started
ERROR_WRONGVERSION = 19 # Wrong chunk version
ERROR_CHUNKEXIST = 20 # Chunk already exists
ERROR_NOSPACE = 21 # No space left
ERROR_IO = 22 # IO error
ERROR_BNUMTOOBIG = 23 # Incorrect block number
ERROR_WRONGSIZE = 24 # Incorrect size
ERROR_WRONGOFFSET = 25 # Incorrect offset
ERROR_CANTCONNECT = 26 # Can't connect
ERROR_WRONGCHUNKID = 27 # Incorrect chunk id
ERROR_DISCONNECTED = 28 # Disconnected
ERROR_CRC = 29 # CRC error
ERROR_DELAYED = 30 # Operation delayed
ERROR_CANTCREATEPATH = 31 # Can't create path
ERROR_MISMATCH = 32 # Data mismatch
ERROR_EROFS = 33 # Read-only file system
ERROR_QUOTA = 34 # Quota exceeded
ERROR_BADSESSIONID = 35 # Bad session id
ERROR_NOPASSWORD = 36 # Password is needed
ERROR_BADPASSWORD = 37 # Incorrect password
ERROR_MAX = 38
# flags: "flags" fileld in "CUTOMA_FUSE_AQUIRE"
WANT_READ = 1
WANT_WRITE = 2
AFTER_CREATE = 4
# flags: "setmask" field in "CUTOMA_FUSE_SETATTR"
# SET_GOAL_FLAG,SET_DELETE_FLAG are no longer supported
# SET_LENGTH_FLAG,SET_OPENED_FLAG are deprecated
# instead of using FUSE_SETATTR with SET_GOAL_FLAG use FUSE_SETGOAL command
# instead of using FUSE_SETATTR with SET_GOAL_FLAG use FUSE_SETTRASH_TIMEOUT command
# instead of using FUSE_SETATTR with SET_LENGTH_FLAG/SET_OPENED_FLAG use FUSE_TRUNCATE command
SET_GOAL_FLAG = 1 << 0
SET_MODE_FLAG = 1 << 1
SET_UID_FLAG = 1 << 2
SET_GID_FLAG = 1 << 3
SET_LENGTH_FLAG = 1 << 4
SET_MTIME_FLAG = 1 << 5
SET_ATIME_FLAG = 1 << 6
SET_OPENED_FLAG = 1 << 7
SET_DELETE_FLAG = 1 << 8
ANTOAN_NOP = 0
# CHUNKSERVER <-> CLIENT/CHUNKSERVER
CUTOCS_READ = 200
# chunkid:64 version:32 offset:32 size:32
CSTOCU_READ_STATUS = 201
# chunkid:64 status:8
CSTOCU_READ_DATA = 202
# chunkid:64 blocknum:16 offset:16 size:32 crc:32 size*[ databyte:8 ]
CUTOCS_WRITE = 210
# chunkid:64 version:32 N*[ ip:32 port:16 ]
CSTOCU_WRITE_STATUS = 211
# chunkid:64 writeid:32 status:8
CUTOCS_WRITE_DATA = 212
# chunkid:64 writeid:32 blocknum:16 offset:16 size:32 crc:32 size*[ databyte:8 ]
CUTOCS_WRITE_FINISH = 213
# chunkid:64 version:32
#ANY <-> CHUNKSERVER
ANTOCS_CHUNK_CHECKSUM = 300
# chunkid:64 version:32
CSTOAN_CHUNK_CHECKSUM = 301
# chunkid:64 version:32 checksum:32
# chunkid:64 version:32 status:8
ANTOCS_CHUNK_CHECKSUM_TAB = 302
# chunkid:64 version:32
CSTOAN_CHUNK_CHECKSUM_TAB = 303
# chunkid:64 version:32 1024*[checksum:32]
# chunkid:64 version:32 status:8
# CLIENT <-> MASTER
# old attr record:
# type:8 flags:8 mode:16 uid:32 gid:32 atime:32 mtime:32 ctime:32 length:64
# total: 32B (1+1+2+4+4+4+4+4+8
#
# flags: ---DGGGG
# |\--/
# | \------ goal
# \--------- delete imediatelly
# new attr record:
# type:8 mode:16 uid:32 gid:32 atime:32 mtime:32 ctime:32 nlink:32 length:64
# total: 35B
#
# mode: FFFFMMMMMMMMMMMM
# \--/\----------/
# \ \------- mode
# \-------------- flags
#
# in case of BLOCKDEV and CHARDEV instead of 'length:64' on the end there is 'mojor:16 minor:16 empty:32'
# NAME type:
# ( leng:8 data:lengB
FUSE_REGISTER_BLOB_NOACL = "kFh9mdZsR84l5e675v8bi54VfXaXSYozaU3DSz9AsLLtOtKipzb9aQNkxeOISx64"
# CUTOMA:
# clientid:32 [ version:32 ]
# MATOCU:
# clientid:32
# status:8
FUSE_REGISTER_BLOB_TOOLS_NOACL = "kFh9mdZsR84l5e675v8bi54VfXaXSYozaU3DSz9AsLLtOtKipzb9aQNkxeOISx63"
# CUTOMA:
# -
# MATOCU:
# status:8
FUSE_REGISTER_BLOB_ACL = "DjI1GAQDULI5d2YjA26ypc3ovkhjvhciTQVx3CS4nYgtBoUcsljiVpsErJENHaw0"
REGISTER_GETRANDOM = 1
# rcode==1: generate random blob
# CUTOMA:
# rcode:8
# MATOCU:
# randomblob:32B
REGISTER_NEWSESSION = 2
# rcode==2: first register
# CUTOMA:
# rcode:8 version:32 ileng:32 info:ilengB pleng:32 path:plengB [ passcode:16B ]
# MATOCU:
# sessionid:32 sesflags:8 rootuid:32 rootgid:32
# status:8
REGISTER_RECONNECT = 3
# rcode==3: mount reconnect
# CUTOMA:
# rcode:8 sessionid:32 version:32
# MATOCU:
# status:8
REGISTER_TOOLS = 4
# rcode==4: tools connect
# CUTOMA:
# rcode:8 sessionid:32 version:32
# MATOCU:
# status:8
REGISTER_NEWMETASESSION = 5
# rcode==5: first register
# CUTOMA:
# rcode:8 version:32 ileng:32 info:ilengB [ passcode:16B ]
# MATOCU:
# sessionid:32 sesflags:8
# status:8
CUTOMA_FUSE_REGISTER = 400
# blob:64B ... (depends on blob - see blob descriptions above
MATOCU_FUSE_REGISTER = 401
# depends on blob - see blob descriptions above
CUTOMA_FUSE_STATFS = 402
# msgid:32 -
MATOCU_FUSE_STATFS = 403
# msgid:32 totalspace:64 availspace:64 trashspace:64 inodes:32
CUTOMA_FUSE_ACCESS = 404
# msgid:32 inode:32 uid:32 gid:32 modemask:8
MATOCU_FUSE_ACCESS = 405
# msgid:32 status:8
CUTOMA_FUSE_LOOKUP = 406
# msgid:32 inode:32 name:NAME uid:32 gid:32
MATOCU_FUSE_LOOKUP = 407
# msgid:32 status:8
# msgid:32 inode:32 attr:35B
CUTOMA_FUSE_GETATTR = 408
# msgid:32 inode:32
# msgid:32 inode:32 uid:32 gid:32
MATOCU_FUSE_GETATTR = 409
# msgid:32 status:8
# msgid:32 attr:35B
CUTOMA_FUSE_SETATTR = 410
# msgid:32 inode:32 uid:32 gid:32 setmask:8 attr:32B - compatibility with very old version
# msgid:32 inode:32 uid:32 gid:32 setmask:16 attr:32B - compatibility with old version
# msgid:32 inode:32 uid:32 gid:32 setmask:8 attrmode:16 attruid:32 attrgid:32 attratime:32 attrmtime:32
MATOCU_FUSE_SETATTR = 411
# msgid:32 status:8
# msgid:32 attr:35B
CUTOMA_FUSE_READLINK = 412
# msgid:32 inode:32
MATOCU_FUSE_READLINK = 413
# msgid:32 status:8
# msgid:32 length:32 path:lengthB
CUTOMA_FUSE_SYMLINK = 414
# msgid:32 inode:32 name:NAME length:32 path:lengthB uid:32 gid:32
MATOCU_FUSE_SYMLINK = 415
# msgid:32 status:8
# msgid:32 inode:32 attr:35B
CUTOMA_FUSE_MKNOD = 416
# msgid:32 inode:32 name:NAME type:8 mode:16 uid:32 gid:32 rdev:32
MATOCU_FUSE_MKNOD = 417
# msgid:32 status:8
# msgid:32 inode:32 attr:35B
CUTOMA_FUSE_MKDIR = 418
# msgid:32 inode:32 name:NAME mode:16 uid:32 gid:32
MATOCU_FUSE_MKDIR = 419
# msgid:32 status:8
# msgid:32 inode:32 attr:35B
CUTOMA_FUSE_UNLINK = 420
# msgid:32 inode:32 name:NAME uid:32 gid:32
MATOCU_FUSE_UNLINK = 421
# msgid:32 status:8
CUTOMA_FUSE_RMDIR = 422
# msgid:32 inode:32 name:NAME uid:32 gid:32
MATOCU_FUSE_RMDIR = 423
# msgid:32 status:8
CUTOMA_FUSE_RENAME = 424
# msgid:32 inode_src:32 name_src:NAME inode_dst:32 name_dst:NAME uid:32 gid:32
MATOCU_FUSE_RENAME = 425
# msgid:32 status:8
CUTOMA_FUSE_LINK = 426
# msgid:32 inode:32 inode_dst:32 name_dst:NAME uid:32 gid:32
MATOCU_FUSE_LINK = 427
# msgid:32 status:8
# msgid:32 inode:32 attr:35B
CUTOMA_FUSE_GETDIR = 428
# msgid:32 inode:32 uid:32 gid:32 - old version (works like new version with flags==0
# msgid:32 inode:32 uid:32 gid:32 flags:8
MATOCU_FUSE_GETDIR = 429
# msgid:32 status:8
# msgid:32 N*[ name:NAME inode:32 type:8 ] - when GETDIR_FLAG_WITHATTR in flags is not set
# msgid:32 N*[ name:NAME inode:32 type:35B ] - when GETDIR_FLAG_WITHATTR in flags is set
CUTOMA_FUSE_OPEN = 430
# msgid:32 inode:32 uid:32 gid:32 flags:8
MATOCU_FUSE_OPEN = 431
# msgid:32 status:8
# since 1.6.9 if no error:
# msgid:32 attr:35B
CUTOMA_FUSE_READ_CHUNK = 432
# msgid:32 inode:32 chunkindx:32
MATOCU_FUSE_READ_CHUNK = 433
# msgid:32 status:8
# msgid:32 length:64 chunkid:64 version:32 N*[ip:32 port:16]
# msgid:32 length:64 srcs:8 srcs*[chunkid:64 version:32 ip:32 port:16] - not implemented
CUTOMA_FUSE_WRITE_CHUNK = 434 # it creates, duplicates or sets new version of chunk if necessary */
# msgid:32 inode:32 chunkindx:32
MATOCU_FUSE_WRITE_CHUNK = 435
# msgid:32 status:8
# msgid:32 length:64 chunkid:64 version:32 N*[ip:32 port:16]
CUTOMA_FUSE_WRITE_CHUNK_END = 436
# msgid:32 chunkid:64 inode:32 length:64
MATOCU_FUSE_WRITE_CHUNK_END = 437
# msgid:32 status:8
CUTOMA_FUSE_APPEND = 438
# msgid:32 inode:32 srcinode:32 uid:32 gid:32 - append to existing element
MATOCU_FUSE_APPEND = 439
# msgid:32 status:8
CUTOMA_FUSE_CHECK = 440
# msgid:32 inode:32
MATOCU_FUSE_CHECK = 441
# msgid:32 status:8
# msgid:32 N*[ copies:8 chunks:16 ]
CUTOMA_FUSE_GETTRASHTIME = 442
# msgid:32 inode:32 gmode:8
MATOCU_FUSE_GETTRASHTIME = 443
# msgid:32 status:8
# msgid:32 tdirs:32 tfiles:32 tdirs*[ trashtime:32 dirs:32 ] tfiles*[ trashtime:32 files:32 ]
CUTOMA_FUSE_SETTRASHTIME = 444
# msgid:32 inode:32 uid:32 trashtimeout:32 smode:8
MATOCU_FUSE_SETTRASHTIME = 445
# msgid:32 status:8
# msgid:32 changed:32 notchanged:32 notpermitted:32
CUTOMA_FUSE_GETGOAL = 446
# msgid:32 inode:32 gmode:8
MATOCU_FUSE_GETGOAL = 447
# msgid:32 status:8
# msgid:32 gdirs:8 gfiles:8 gdirs*[ goal:8 dirs:32 ] gfiles*[ goal:8 files:32 ]
CUTOMA_FUSE_SETGOAL = 448
# msgid:32 inode:32 uid:32 goal:8 smode:8
MATOCU_FUSE_SETGOAL = 449
# msgid:32 status:8
# msgid:32 changed:32 notchanged:32 notpermitted:32
CUTOMA_FUSE_GETTRASH = 450
# msgid:32
MATOCU_FUSE_GETTRASH = 451
# msgid:32 status:8
# msgid:32 N*[ name:NAME inode:32 ]
CUTOMA_FUSE_GETDETACHEDATTR = 452
# msgid:32 inode:32 dtype:8
MATOCU_FUSE_GETDETACHEDATTR = 453
# msgid:32 status:8
# msgid:32 attr:35B
CUTOMA_FUSE_GETTRASHPATH = 454
# msgid:32 inode:32
MATOCU_FUSE_GETTRASHPATH = 455
# msgid:32 status:8
# msgid:32 length:32 path:lengthB
CUTOMA_FUSE_SETTRASHPATH = 456
# msgid:32 inode:32 length:32 path:lengthB
MATOCU_FUSE_SETTRASHPATH = 457
# msgid:32 status:8
CUTOMA_FUSE_UNDEL = 458
# msgid:32 inode:32
MATOCU_FUSE_UNDEL = 459
# msgid:32 status:8
CUTOMA_FUSE_PURGE = 460
# msgid:32 inode:32
MATOCU_FUSE_PURGE = 461
# msgid:32 status:8
CUTOMA_FUSE_GETDIRSTATS = 462
# msgid:32 inode:32
MATOCU_FUSE_GETDIRSTATS = 463
# msgid:32 status:8
# msgid:32 inodes:32 dirs:32 files:32 ugfiles:32 mfiles:32 chunks:32 ugchunks:32 mchunks32 length:64 size:64 gsize:64
CUTOMA_FUSE_TRUNCATE = 464
# msgid:32 inode:32 [opened:8] uid:32 gid:32 opened:8 length:64
MATOCU_FUSE_TRUNCATE = 465
# msgid:32 status:8
# msgid:32 attr:35B
CUTOMA_FUSE_REPAIR = 466
# msgid:32 inode:32 uid:32 gid:32
MATOCU_FUSE_REPAIR = 467
# msgid:32 status:8
# msgid:32 notchanged:32 erased:32 repaired:32
CUTOMA_FUSE_SNAPSHOT = 468
# msgid:32 inode:32 inode_dst:32 name_dst:NAME uid:32 gid:32 canoverwrite:8
MATOCU_FUSE_SNAPSHOT = 469
# msgid:32 status:8
CUTOMA_FUSE_GETRESERVED = 470
# msgid:32
MATOCU_FUSE_GETRESERVED = 471
# msgid:32 status:8
# msgid:32 N*[ name:NAME inode:32 ]
CUTOMA_FUSE_GETEATTR = 472
# msgid:32 inode:32 gmode:8
MATOCU_FUSE_GETEATTR = 473
# msgid:32 status:8
# msgid:32 eattrdirs:8 eattrfiles:8 eattrdirs*[ eattr:8 dirs:32 ] eattrfiles*[ eattr:8 files:32 ]
CUTOMA_FUSE_SETEATTR = 474
# msgid:32 inode:32 uid:32 eattr:8 smode:8
MATOCU_FUSE_SETEATTR = 475
# msgid:32 status:8
# msgid:32 changed:32 notchanged:32 notpermitted:32
CUTOMA_FUSE_QUOTACONTROL = 476
# msgid:32 inode:32 qflags:8 - delete quota
# msgid:32 inode:32 qflags:8 sinodes:32 slength:64 ssize:64 srealsize:64 hinodes:32 hlength:64 hsize:64 hrealsize:64 - set quota
MATOCU_FUSE_QUOTACONTROL = 477
# msgid:32 status:8
# msgid:32 qflags:8 sinodes:32 slength:64 ssize:64 srealsize:64 hinodes:32 hlength:64 hsize:64 hrealsize:64 curinodes:32 curlength:64 cursize:64 currealsize:64
CUTOMA_FUSE_DIR_REMOVED = 490
# msgid:32 N*[ inode:32 ]
MATOCU_FUSE_NOTIFY_ATTR = 491
# msgid:32 N*[ parent:32 inode:32 attr:35B ]
MATOCU_FUSE_NOTIFY_DIR = 492
# msgid:32 N*[ inode:32 ]
# special - reserved (opened) inodes - keep opened files.
CUTOMA_FUSE_RESERVED_INODES = 499
# N*[inode:32]
errtab = [
"OK",
"Operation not permitted",
"Not a directory",
"No such file or directory",
"Permission denied",
"File exists",
"Invalid argument",
"Directory not empty",
"Chunk lost",
"Out of memory",
"Index too big",
"Chunk locked",
"No chunk servers",
"No such chunk",
"Chunk is busy",
"Incorrect register BLOB",
"None of chunk servers performed requested operation",
"File not opened",
"Write not started",
"Wrong chunk version",
"Chunk already exists",
"No space left",
"IO error",
"Incorrect block number",
"Incorrect size",
"Incorrect offset",
"Can't connect",
"Incorrect chunk id",
"Disconnected",
"CRC error",
"Operation delayed",
"Can't create path",
"Data mismatch",
"Read-only file system",
"Quota exceeded",
"Bad session id",
"Password is needed",
"Incorrect password",
"Unknown MFS error",
]
def mfs_strerror(code):
if code > ERROR_MAX:
code = ERROR_MAX
return errtab[code]
S_IFMT = 0170000 # type of file */
S_IFIFO = 0010000 # named pipe (fifo) */
S_IFCHR = 0020000 # character special */
S_IFDIR = 0040000 # directory */
S_IFBLK = 0060000 # block special */
S_IFREG = 0100000 # regular */
S_IFLNK = 0120000 # symbolic link */
S_IFSOCK = 0140000 # socket */
S_IFWHT = 0160000 # whiteout */
S_ISUID = 0004000 # set user id on execution */
S_ISGID = 0002000 # set group id on execution */
S_ISVTX = 0001000 # save swapped text even after use */
S_IRUSR = 0000400 # read permission, owner */
S_IWUSR = 0000200 # write permission, owner */
S_IXUSR = 0000100 # execute/search permission, owner */
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Litecoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import CBlockHeader, MIN_VERSION_SUPPORTED, msg_addr, msg_block, MSG_BLOCK, msg_blocktxn, msg_cmpctblock, msg_feefilter, msg_getaddr, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_ping, msg_pong, msg_reject, msg_sendcmpct, msg_sendheaders, msg_tx, MSG_TX, MSG_TYPE_MASK, msg_verack, msg_version, NODE_NETWORK, NODE_WITNESS, sha256
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet4": b"\xfd\xd2\xc8\xf1", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.network = net
logger.debug('Connecting to Litecoin Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if not self.is_connected:
raise IOError('Not connected')
self._log_message("send", message)
tmsg = self._build_message(message)
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as Bitcoin Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(tmsg)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def _build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Litecoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
self.reject_code_received = None
self.reject_reason_received = None
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def on_reject(self, message):
"""Store reject reason and code for testing."""
self.reject_code_received = message.code
self.reject_reason_received = message.reason
def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_code and reject_reason are set: assert that the correct reject message is received"""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
if request_block:
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_code=None, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_code and reject_reason are set: assert that the correct reject message is received."""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for tx in txs:
self.tx_store[tx.sha256] = tx
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
|
|
from itertools import product
from operator import eq, ne
import warnings
import numpy as np
from toolz import take
from catalyst.lib.labelarray import LabelArray
from catalyst.testing import check_arrays, parameter_space, CatalystTestCase
from catalyst.testing.predicates import assert_equal
from catalyst.utils.compat import unicode
def rotN(l, N):
"""
Rotate a list of elements.
Pulls N elements off the end of the list and appends them to the front.
>>> rotN(['a', 'b', 'c', 'd'], 2)
['c', 'd', 'a', 'b']
>>> rotN(['a', 'b', 'c', 'd'], 3)
['d', 'a', 'b', 'c']
"""
assert len(l) >= N, "Can't rotate list by longer than its length."
return l[N:] + l[:N]
def all_ufuncs():
ufunc_type = type(np.isnan)
return (f for f in vars(np).values() if isinstance(f, ufunc_type))
class LabelArrayTestCase(CatalystTestCase):
@classmethod
def init_class_fixtures(cls):
super(LabelArrayTestCase, cls).init_class_fixtures()
cls.rowvalues = row = ['', 'a', 'b', 'ab', 'a', '', 'b', 'ab', 'z']
cls.strs = np.array([rotN(row, i) for i in range(3)], dtype=object)
def test_fail_on_direct_construction(self):
# See http://docs.scipy.org/doc/numpy-1.10.0/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray # noqa
with self.assertRaises(TypeError) as e:
np.ndarray.__new__(LabelArray, (5, 5))
self.assertEqual(
str(e.exception),
"Direct construction of LabelArrays is not supported."
)
@parameter_space(
__fail_fast=True,
compval=['', 'a', 'z', 'not in the array'],
shape=[(27,), (3, 9), (3, 3, 3)],
array_astype=(bytes, unicode, object),
missing_value=('', 'a', 'not in the array', None),
)
def test_compare_to_str(self,
compval,
shape,
array_astype,
missing_value):
strs = self.strs.reshape(shape).astype(array_astype)
if missing_value is None:
# As of numpy 1.9.2, object array != None returns just False
# instead of an array, with a deprecation warning saying the
# behavior will change in the future. Work around that by just
# using the ufunc.
notmissing = np.not_equal(strs, missing_value)
else:
if not isinstance(missing_value, array_astype):
missing_value = array_astype(missing_value, 'utf-8')
notmissing = (strs != missing_value)
arr = LabelArray(strs, missing_value=missing_value)
if not isinstance(compval, array_astype):
compval = array_astype(compval, 'utf-8')
# arr.missing_value should behave like NaN.
check_arrays(
arr == compval,
(strs == compval) & notmissing,
)
check_arrays(
arr != compval,
(strs != compval) & notmissing,
)
np_startswith = np.vectorize(lambda elem: elem.startswith(compval))
check_arrays(
arr.startswith(compval),
np_startswith(strs) & notmissing,
)
np_endswith = np.vectorize(lambda elem: elem.endswith(compval))
check_arrays(
arr.endswith(compval),
np_endswith(strs) & notmissing,
)
np_contains = np.vectorize(lambda elem: compval in elem)
check_arrays(
arr.has_substring(compval),
np_contains(strs) & notmissing,
)
@parameter_space(
__fail_fast=True,
f=[
lambda s: str(len(s)),
lambda s: s[0],
lambda s: ''.join(reversed(s)),
lambda s: '',
]
)
def test_map(self, f):
data = np.array(
[['E', 'GHIJ', 'HIJKLMNOP', 'DEFGHIJ'],
['CDE', 'ABCDEFGHIJKLMNOPQ', 'DEFGHIJKLMNOPQRS', 'ABCDEFGHIJK'],
['DEFGHIJKLMNOPQR', 'DEFGHI', 'DEFGHIJ', 'FGHIJK'],
['EFGHIJKLM', 'EFGHIJKLMNOPQRS', 'ABCDEFGHI', 'DEFGHIJ']],
dtype=object,
)
la = LabelArray(data, missing_value=None)
numpy_transformed = np.vectorize(f)(data)
la_transformed = la.map(f).as_string_array()
assert_equal(numpy_transformed, la_transformed)
@parameter_space(missing=['A', None])
def test_map_ignores_missing_value(self, missing):
data = np.array([missing, 'B', 'C'], dtype=object)
la = LabelArray(data, missing_value=missing)
def increment_char(c):
return chr(ord(c) + 1)
result = la.map(increment_char)
expected = LabelArray([missing, 'C', 'D'], missing_value=missing)
assert_equal(result.as_string_array(), expected.as_string_array())
@parameter_space(
__fail_fast=True,
f=[
lambda s: 0,
lambda s: 0.0,
lambda s: object(),
]
)
def test_map_requires_f_to_return_a_string_or_none(self, f):
la = LabelArray(self.strs, missing_value=None)
with self.assertRaises(TypeError):
la.map(f)
def test_map_can_only_return_none_if_missing_value_is_none(self):
# Should work.
la = LabelArray(self.strs, missing_value=None)
result = la.map(lambda x: None)
check_arrays(
result,
LabelArray(np.full_like(self.strs, None), missing_value=None),
)
la = LabelArray(self.strs, missing_value="__MISSING__")
with self.assertRaises(TypeError):
la.map(lambda x: None)
@parameter_space(
__fail_fast=True,
missing_value=('', 'a', 'not in the array', None),
)
def test_compare_to_str_array(self, missing_value):
strs = self.strs
shape = strs.shape
arr = LabelArray(strs, missing_value=missing_value)
if missing_value is None:
# As of numpy 1.9.2, object array != None returns just False
# instead of an array, with a deprecation warning saying the
# behavior will change in the future. Work around that by just
# using the ufunc.
notmissing = np.not_equal(strs, missing_value)
else:
notmissing = (strs != missing_value)
check_arrays(arr.not_missing(), notmissing)
check_arrays(arr.is_missing(), ~notmissing)
# The arrays are equal everywhere, but comparisons against the
# missing_value should always produce False
check_arrays(strs == arr, notmissing)
check_arrays(strs != arr, np.zeros_like(strs, dtype=bool))
def broadcastable_row(value, dtype):
return np.full((shape[0], 1), value, dtype=strs.dtype)
def broadcastable_col(value, dtype):
return np.full((1, shape[1]), value, dtype=strs.dtype)
# Test comparison between arr and a like-shap 2D array, a column
# vector, and a row vector.
for comparator, dtype, value in product((eq, ne),
(bytes, unicode, object),
set(self.rowvalues)):
check_arrays(
comparator(arr, np.full_like(strs, value)),
comparator(strs, value) & notmissing,
)
check_arrays(
comparator(arr, broadcastable_row(value, dtype=dtype)),
comparator(strs, value) & notmissing,
)
check_arrays(
comparator(arr, broadcastable_col(value, dtype=dtype)),
comparator(strs, value) & notmissing,
)
@parameter_space(
__fail_fast=True,
slice_=[
0, 1, -1,
slice(None),
slice(0, 0),
slice(0, 3),
slice(1, 4),
slice(0),
slice(None, 1),
slice(0, 4, 2),
(slice(None), 1),
(slice(None), slice(None)),
(slice(None), slice(1, 2)),
]
)
def test_slicing_preserves_attributes(self, slice_):
arr = LabelArray(self.strs.reshape((9, 3)), missing_value='')
sliced = arr[slice_]
self.assertIsInstance(sliced, LabelArray)
self.assertIs(sliced.categories, arr.categories)
self.assertIs(sliced.reverse_categories, arr.reverse_categories)
self.assertIs(sliced.missing_value, arr.missing_value)
def test_infer_categories(self):
"""
Test that categories are inferred in sorted order if they're not
explicitly passed.
"""
arr1d = LabelArray(self.strs, missing_value='')
codes1d = arr1d.as_int_array()
self.assertEqual(arr1d.shape, self.strs.shape)
self.assertEqual(arr1d.shape, codes1d.shape)
categories = arr1d.categories
unique_rowvalues = set(self.rowvalues)
# There should be an entry in categories for each unique row value, and
# each integer stored in the data array should be an index into
# categories.
self.assertEqual(list(categories), sorted(set(self.rowvalues)))
self.assertEqual(
set(codes1d.ravel()),
set(range(len(unique_rowvalues)))
)
for idx, value in enumerate(arr1d.categories):
check_arrays(
self.strs == value,
arr1d.as_int_array() == idx,
)
# It should be equivalent to pass the same set of categories manually.
arr1d_explicit_categories = LabelArray(
self.strs,
missing_value='',
categories=arr1d.categories,
)
check_arrays(arr1d, arr1d_explicit_categories)
for shape in (9, 3), (3, 9), (3, 3, 3):
strs2d = self.strs.reshape(shape)
arr2d = LabelArray(strs2d, missing_value='')
codes2d = arr2d.as_int_array()
self.assertEqual(arr2d.shape, shape)
check_arrays(arr2d.categories, categories)
for idx, value in enumerate(arr2d.categories):
check_arrays(strs2d == value, codes2d == idx)
def test_reject_ufuncs(self):
"""
The internal values of a LabelArray should be opaque to numpy ufuncs.
Test that all unfuncs fail.
"""
l = LabelArray(self.strs, '')
ints = np.arange(len(l))
with warnings.catch_warnings():
# Some ufuncs return NotImplemented, but warn that they will fail
# in the future. Both outcomes are fine, so ignore the warnings.
warnings.filterwarnings(
'ignore',
message="unorderable dtypes.*",
category=DeprecationWarning,
)
warnings.filterwarnings(
'ignore',
message="elementwise comparison failed.*",
category=FutureWarning,
)
for func in all_ufuncs():
# Different ufuncs vary between returning NotImplemented and
# raising a TypeError when provided with unknown dtypes.
# This is a bit unfortunate, but still better than silently
# accepting an int array.
try:
if func.nin == 1:
ret = func(l)
elif func.nin == 2:
ret = func(l, ints)
else:
self.fail("Who added a ternary ufunc !?!")
except TypeError:
pass
else:
self.assertIs(ret, NotImplemented)
@parameter_space(
__fail_fast=True,
val=['', 'a', 'not in the array', None],
missing_value=['', 'a', 'not in the array', None],
)
def test_setitem_scalar(self, val, missing_value):
arr = LabelArray(self.strs, missing_value=missing_value)
if not arr.has_label(val):
self.assertTrue(
(val == 'not in the array')
or (val is None and missing_value is not None)
)
for slicer in [(0, 0), (0, 1), 1]:
with self.assertRaises(ValueError):
arr[slicer] = val
return
arr[0, 0] = val
self.assertEqual(arr[0, 0], val)
arr[0, 1] = val
self.assertEqual(arr[0, 1], val)
arr[1] = val
if val == missing_value:
self.assertTrue(arr.is_missing()[1].all())
else:
self.assertTrue((arr[1] == val).all())
self.assertTrue((arr[1].as_string_array() == val).all())
arr[:, -1] = val
if val == missing_value:
self.assertTrue(arr.is_missing()[:, -1].all())
else:
self.assertTrue((arr[:, -1] == val).all())
self.assertTrue((arr[:, -1].as_string_array() == val).all())
arr[:] = val
if val == missing_value:
self.assertTrue(arr.is_missing().all())
else:
self.assertFalse(arr.is_missing().any())
self.assertTrue((arr == val).all())
def test_setitem_array(self):
arr = LabelArray(self.strs, missing_value=None)
orig_arr = arr.copy()
# Write a row.
self.assertFalse(
(arr[0] == arr[1]).all(),
"This test doesn't test anything because rows 0"
" and 1 are already equal!"
)
arr[0] = arr[1]
for i in range(arr.shape[1]):
self.assertEqual(arr[0, i], arr[1, i])
# Write a column.
self.assertFalse(
(arr[:, 0] == arr[:, 1]).all(),
"This test doesn't test anything because columns 0"
" and 1 are already equal!"
)
arr[:, 0] = arr[:, 1]
for i in range(arr.shape[0]):
self.assertEqual(arr[i, 0], arr[i, 1])
# Write the whole array.
arr[:] = orig_arr
check_arrays(arr, orig_arr)
@staticmethod
def check_roundtrip(arr):
assert_equal(
arr.as_string_array(),
LabelArray(
arr.as_string_array(),
arr.missing_value,
).as_string_array(),
)
@staticmethod
def create_categories(width, plus_one):
length = int(width / 8) + plus_one
return [
''.join(cs)
for cs in take(
2 ** width + plus_one,
product([chr(c) for c in range(256)], repeat=length),
)
]
def test_narrow_code_storage(self):
create_categories = self.create_categories
check_roundtrip = self.check_roundtrip
# uint8
categories = create_categories(8, plus_one=False)
arr = LabelArray(
[],
missing_value=categories[0],
categories=categories,
)
self.assertEqual(arr.itemsize, 1)
check_roundtrip(arr)
# uint8 inference
arr = LabelArray(categories, missing_value=categories[0])
self.assertEqual(arr.itemsize, 1)
check_roundtrip(arr)
# just over uint8
categories = create_categories(8, plus_one=True)
arr = LabelArray(
[],
missing_value=categories[0],
categories=categories,
)
self.assertEqual(arr.itemsize, 2)
check_roundtrip(arr)
# fits in uint16
categories = create_categories(16, plus_one=False)
arr = LabelArray(
[], missing_value=categories[0],
categories=categories,
)
self.assertEqual(arr.itemsize, 2)
check_roundtrip(arr)
# uint16 inference
arr = LabelArray(categories, missing_value=categories[0])
self.assertEqual(arr.itemsize, 2)
check_roundtrip(arr)
# just over uint16
categories = create_categories(16, plus_one=True)
arr = LabelArray(
[],
missing_value=categories[0],
categories=categories,
)
self.assertEqual(arr.itemsize, 4)
check_roundtrip(arr)
# uint32 inference
arr = LabelArray(categories, missing_value=categories[0])
self.assertEqual(arr.itemsize, 4)
check_roundtrip(arr)
# NOTE: we could do this for 32 and 64; however, no one has enough RAM
# or time for that.
def test_narrow_condense_back_to_valid_size(self):
categories = ['a'] * (2 ** 8 + 1)
arr = LabelArray(categories, missing_value=categories[0])
assert_equal(arr.itemsize, 1)
self.check_roundtrip(arr)
# longer than int16 but still fits when deduped
categories = self.create_categories(16, plus_one=False)
categories.append(categories[0])
arr = LabelArray(categories, missing_value=categories[0])
assert_equal(arr.itemsize, 2)
self.check_roundtrip(arr)
def test_map_shrinks_code_storage_if_possible(self):
arr = LabelArray(
# Drop the last value so we fit in a uint16 with None as a missing
# value.
self.create_categories(16, plus_one=False)[:-1],
missing_value=None,
)
self.assertEqual(arr.itemsize, 2)
def either_A_or_B(s):
return ('A', 'B')[sum(ord(c) for c in s) % 2]
result = arr.map(either_A_or_B)
self.assertEqual(set(result.categories), {'A', 'B', None})
self.assertEqual(result.itemsize, 1)
assert_equal(
np.vectorize(either_A_or_B)(arr.as_string_array()),
result.as_string_array(),
)
def test_map_never_increases_code_storage_size(self):
# This tests a pathological case where a user maps an impure function
# that returns a different label on every invocation, which in a naive
# implementation could cause us to need to **increase** the size of our
# codes after a map.
#
# This doesn't happen, however, because we guarantee that the user's
# mapping function will be called on each unique category exactly once,
# which means we can never increase the number of categories in the
# LabelArray after mapping.
# Using all but one of the categories so that we still fit in a uint8
# with an extra category for None as a missing value.
categories = self.create_categories(8, plus_one=False)[:-1]
larger_categories = self.create_categories(16, plus_one=False)
# Double the length of the categories so that we have to increase the
# required size after our map.
categories_twice = categories + categories
arr = LabelArray(categories_twice, missing_value=None)
assert_equal(arr.itemsize, 1)
gen_unique_categories = iter(larger_categories)
def new_string_every_time(c):
# Return a new unique category every time so that every result is
# different.
return next(gen_unique_categories)
result = arr.map(new_string_every_time)
# Result should still be of size 1.
assert_equal(result.itemsize, 1)
# Result should be the first `len(categories)` entries from the larger
# categories, repeated twice.
expected = LabelArray(
larger_categories[:len(categories)] * 2,
missing_value=None,
)
assert_equal(result.as_string_array(), expected.as_string_array())
def manual_narrow_condense_back_to_valid_size_slow(self):
"""This test is really slow so we don't want it run by default.
"""
# tests that we don't try to create an 'int24' (which is meaningless)
categories = self.create_categories(24, plus_one=False)
categories.append(categories[0])
arr = LabelArray(categories, missing_value=categories[0])
assert_equal(arr.itemsize, 4)
self.check_roundtrip(arr)
|
|
from zeroos.core0.client import Client as core0_client
import time, re
from unittest import TestCase
class Client(TestCase):
def __init__(self, ip, password):
self.client = core0_client(ip, password=password)
def stdout(self, resource):
return resource.get().stdout.replace('\n', '').lower()
def bash(self, cmd):
response = self.client.bash(cmd).get()
return response
def get_nodes_cpus(self):
info = self.client.info.cpu()
cpuInfo = []
for processor in info:
cpuInfo.append(processor)
return cpuInfo
def get_nodes_nics(self):
r = self.client.bash('ip -br a').get().stdout
nics = [x.split()[0] for x in r.splitlines()]
nicInfo = []
for nic in nics:
if '@' in nic:
nic = nic[:nic.index('@')]
addrs = self.client.bash('ip -br a show "{}"'.format(nic)).get()
addrs = addrs.stdout.splitlines()[0].split()[2:]
mtu = int(self.stdout(self.client.bash('cat /sys/class/net/{}/mtu'.format(nic))))
hardwareaddr = self.stdout(self.client.bash('cat /sys/class/net/{}/address'.format(nic)))
if hardwareaddr == '00:00:00:00:00:00':
hardwareaddr = ''
addrs = [x for x in addrs]
if addrs == []:
addrs = None
tmp = {"name": nic, "hardwareaddr": hardwareaddr, "mtu": mtu, "addrs": addrs}
nicInfo.append(tmp)
return nicInfo
def get_node_bridges(self):
bridgesInfo = []
nics = self.client.bash('ls /sys/class/net').get().stdout.splitlines()
for nic in nics:
status = self.client.bash('cat /sys/class/net/{}/operstate'.format(nic)).get().stdout.strip()
bridge = {"name": nic, "status": status}
bridgesInfo.append(bridge)
return bridgesInfo
def get_nodes_mem(self):
lines = self.client.bash('cat /proc/meminfo').get().stdout.splitlines()
memInfo = {'available': 0, 'buffers': 0, 'cached': 0,
'inactive': 0, 'total': 0}
for line in lines:
line = line.replace('\t', '').strip()
key = line[:line.find(':')].lower()
value = line[line.find(':') + 2:line.find('kB')].strip()
if 'mem' == key[:3]:
key = key[3:]
if key in memInfo.keys():
memInfo[key] = int(value) * 1024
return memInfo
def get_nodes_info(self):
hostname = self.client.system('uname -n').get().stdout.strip()
krn_name = self.client.system('uname -s').get().stdout.strip().lower()
return {"hostname": hostname, "os": krn_name}
def get_nodes_disks(self):
disks_info = []
disks = self.client.disk.list()['blockdevices']
for disk in disks:
disk_type = None
disk_parts = []
if 'children' in disk.keys():
for part in disk['children']:
disk_parts.append({
"name": '/dev/{}'.format(part['name']),
"size": int(int(part['size']) / 1073741824),
"partuuid": part['partuuid'] or '',
"label": part['label'] or '',
"fstype": part['fstype'] or ''
})
if int(disk['rota']):
if int(disk['size']) > (1073741824 ** 1024 * 7):
disk_type = 'archive'
else:
disk_type = 'hdd'
else:
if 'nvme' in disk['name']:
disk_type = 'nvme'
else:
disk_type = 'ssd'
disks_info.append({
"device": '/dev/{}'.format(disk['name']),
"size": int(int(disk['size']) / 1073741824),
"type": disk_type,
"partitions": disk_parts
})
return disks_info
def get_jobs_list(self):
jobs = self.client.job.list()
gridjobs = []
temp = {}
for job in jobs:
temp['id'] = job['cmd']['id']
if job['cmd']['arguments']:
if ('name' in job['cmd']['arguments'].keys()):
temp['name'] = job['cmd']['arguments']['name']
temp['starttime'] = job['starttime']
gridjobs.append(temp)
return gridjobs
def get_node_state(self):
state = self.client.json('core.state', {})
del state['cpu']
return state
def start_job(self):
job_id = self.client.system("tailf /etc/nsswitch.conf").id
jobs = self.client.job.list()
for job in jobs:
if job['cmd']['id'] == job_id:
return job_id
return False
def start_process(self):
self.client.system("tailf /etc/nsswitch.conf")
processes = self.get_processes_list()
for process in processes:
if "/etc/nsswitch.conf" in process['cmdline']:
return process['pid']
return False
def getFreeDisks(self):
freeDisks = []
disks = self.client.disk.list()['blockdevices']
for disk in disks:
if 'children' not in disk.keys():
disktype = None
diskSize = int(disk['size']) / (1024 * 1024 * 1024)
if disk['rota'] == '1':
if int(diskSize) > (1024 * 7):
disktype = 'archive'
else:
disktype = 'hdd'
else:
if 'nvme' in disk['kname']:
disktype = 'nvme'
else:
disktype = 'ssd'
current_disk = {'type':disktype, 'name':'/dev/{}'.format(disk['kname']), 'size':int(diskSize)}
freeDisks.append(current_disk)
return freeDisks
def get_processes_list(self):
processes = self.client.process.list()
return processes
def get_container_client(self, container_name):
container = self.client.container.find(container_name)
self.assertTrue(container, "Can't get container for the given container name")
container_id = list(container.keys())[0]
container_client = self.client.container.client(int(container_id))
return container_client
def get_container_info(self, container_id):
container = (self.client.container.find(container_id))
self.assertTrue(container, "Can't get container for the given container name")
container_id = list(container.keys())[0]
container_info = {}
golden_data = self.client.container.list().get(str(container_id), None)
self.assertTrue(golden_data, "No Golden data")
golden_value = golden_data['container']
container_info['nics'] = (
[{i: nic[i] for i in nic if i != 'hwaddr'} for nic in golden_value['arguments']['nics']])
container_info['ports'] = (['%s:%s' % (key, value) for key, value in golden_value['arguments']['port'].items()])
container_info['hostNetworking'] = golden_value['arguments']['host_network']
container_info['hostname'] = golden_value['arguments']['hostname']
container_info['flist'] = golden_value['arguments']['root']
container_info['storage'] = golden_value['arguments']['storage']
return container_info
def get_container_job_list(self, container_name):
container_id = list(self.client.container.find(container_name).keys())[0]
golden_values = []
container = self.client.container.client(int(container_id))
container_data = container.job.list()
# cannot compare directly as the job.list is considered a job and has a different id everytime is is called
for i, golden_value in enumerate(container_data[:]):
if golden_value.get('command', "") == 'job.list':
container_data.pop(i)
continue
golden_values.append((golden_value['cmd']['id'], golden_value['starttime']))
return set(golden_values)
def wait_on_container_update(self, container_name, timeout, removed):
for _ in range(timeout):
if removed:
if not self.client.container.find(container_name):
return True
else:
if self.client.container.find(container_name):
return True
time.sleep(1)
return False
def wait_on_container_job_update(self, container_name, job_id, timeout, removed):
container_id = int(list(self.client.container.find(container_name).keys())[0])
container = self.client.container.client(container_id)
for _ in range(timeout):
if removed:
if job_id not in [item['cmd']['id'] for item in container.job.list()]:
return True
else:
if job_id in [item['cmd']['id'] for item in container.job.list()]:
return True
time.sleep(1)
return False
def get_client_zt_ip(self, client):
nics = client.info.nic()
nic = [nic for nic in nics if 'zt' in nic['name']]
if not nic:
self.assertTrue(nic, 'No NIC found')
address = nic[0]['addrs'][0]['addr']
if not address:
self.lg.info('can\'t find zerotier netowrk interface')
self.assertTrue(address, 'No address found')
ip = address[:address.find('/')]
return ip
def get_container_bridge_ip(self, client, ip_range):
nics = client.info.nic()
full_ip_range = self.get_ip_range(ip_range)
self.assertTrue(nics, print(nics))
print(nics)
ip = ''
for nic in nics:
addresses = [x['addr'] for x in nic['addrs'] if x['addr'][:x['addr'].find('/')] in full_ip_range]
if addresses:
address = addresses[0]
ip = address[:address.find('/')]
return ip
def check_container_vlan_vxlan_ip(self, client, cidr_ip):
nics = client.info.nic()
for nic in nics:
address = [x['addr'] for x in nic['addrs'] if x['addr'][:x['addr'].find('/')] == cidr_ip]
if address:
return True
return False
def create_ovs_container(self):
containers = self.client.container.find('ovs')
ovs_exist = [key for key, value in containers.items()]
if not ovs_exist:
ovs_flist = "https://hub.gig.tech/gig-official-apps/ovs.flist"
ovs = int(self.client.container.create(ovs_flist, host_network=True, tags=['ovs'], privileged=True).get())
ovs_client = self.client.container.client(ovs)
time.sleep(2)
ovs_client.json('ovs.bridge-add', {"bridge": "backplane"})
ovs_client.json('ovs.vlan-ensure', {'master': 'backplane', 'vlan': 2000, 'name': 'vxbackend'})
def get_ip_range(self, ip_range):
base = re.findall("^\d{1,3}\.\d{1,3}\.\d{1,3}\.", ip_range[0])[0]
start = int(ip_range[0].split('.')[3])
end = int(ip_range[1].split('.')[3])
for i in range(start+1, end):
ip_range.append(base+str(i))
return ip_range
|
|
"""
Files Pipeline
See documentation in topics/media-pipeline.rst
"""
import hashlib
import os
import os.path
import time
import logging
from email.utils import parsedate_tz, mktime_tz
from six.moves.urllib.parse import urlparse
from collections import defaultdict
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from twisted.internet import defer, threads
from scrapy.pipelines.media import MediaPipeline
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.misc import md5sum
from scrapy.utils.log import failure_to_exc_info
logger = logging.getLogger(__name__)
class FileException(Exception):
"""General media error exception"""
class FSFilesStore(object):
def __init__(self, basedir):
if '://' in basedir:
basedir = basedir.split('://', 1)[1]
self.basedir = basedir
self._mkdir(self.basedir)
self.created_directories = defaultdict(set)
def persist_file(self, path, buf, info, meta=None, headers=None):
absolute_path = self._get_filesystem_path(path)
self._mkdir(os.path.dirname(absolute_path), info)
with open(absolute_path, 'wb') as f:
f.write(buf.getvalue())
def stat_file(self, path, info):
absolute_path = self._get_filesystem_path(path)
try:
last_modified = os.path.getmtime(absolute_path)
except: # FIXME: catching everything!
return {}
with open(absolute_path, 'rb') as f:
checksum = md5sum(f)
return {'last_modified': last_modified, 'checksum': checksum}
def _get_filesystem_path(self, path):
path_comps = path.split('/')
return os.path.join(self.basedir, *path_comps)
def _mkdir(self, dirname, domain=None):
seen = self.created_directories[domain] if domain else set()
if dirname not in seen:
if not os.path.exists(dirname):
os.makedirs(dirname)
seen.add(dirname)
class S3FilesStore(object):
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
POLICY = 'public-read'
HEADERS = {
'Cache-Control': 'max-age=172800',
}
def __init__(self, uri):
assert uri.startswith('s3://')
self.bucket, self.prefix = uri[5:].split('/', 1)
def stat_file(self, path, info):
def _onsuccess(boto_key):
checksum = boto_key.etag.strip('"')
last_modified = boto_key.last_modified
modified_tuple = parsedate_tz(last_modified)
modified_stamp = int(mktime_tz(modified_tuple))
return {'checksum': checksum, 'last_modified': modified_stamp}
return self._get_boto_key(path).addCallback(_onsuccess)
def _get_boto_bucket(self):
from boto.s3.connection import S3Connection
# disable ssl (is_secure=False) because of this python bug:
# http://bugs.python.org/issue5103
c = S3Connection(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, is_secure=False)
return c.get_bucket(self.bucket, validate=False)
def _get_boto_key(self, path):
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, path)
return threads.deferToThread(b.get_key, key_name)
def persist_file(self, path, buf, info, meta=None, headers=None):
"""Upload file to S3 storage"""
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, path)
k = b.new_key(key_name)
if meta:
for metakey, metavalue in six.iteritems(meta):
k.set_metadata(metakey, str(metavalue))
h = self.HEADERS.copy()
if headers:
h.update(headers)
buf.seek(0)
return threads.deferToThread(k.set_contents_from_string, buf.getvalue(),
headers=h, policy=self.POLICY)
class FilesPipeline(MediaPipeline):
"""Abstract pipeline that implement the file downloading
This pipeline tries to minimize network transfers and file processing,
doing stat of the files and determining if file is new, uptodate or
expired.
`new` files are those that pipeline never processed and needs to be
downloaded from supplier site the first time.
`uptodate` files are the ones that the pipeline processed and are still
valid files.
`expired` files are those that pipeline already processed but the last
modification was made long time ago, so a reprocessing is recommended to
refresh it in case of change.
"""
MEDIA_NAME = "file"
EXPIRES = 90
STORE_SCHEMES = {
'': FSFilesStore,
'file': FSFilesStore,
's3': S3FilesStore,
}
DEFAULT_FILES_URLS_FIELD = 'file_urls'
DEFAULT_FILES_RESULT_FIELD = 'files'
def __init__(self, store_uri, download_func=None):
if not store_uri:
raise NotConfigured
self.store = self._get_store(store_uri)
super(FilesPipeline, self).__init__(download_func=download_func)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
cls.FILES_URLS_FIELD = settings.get('FILES_URLS_FIELD', cls.DEFAULT_FILES_URLS_FIELD)
cls.FILES_RESULT_FIELD = settings.get('FILES_RESULT_FIELD', cls.DEFAULT_FILES_RESULT_FIELD)
cls.EXPIRES = settings.getint('FILES_EXPIRES', 90)
store_uri = settings['FILES_STORE']
return cls(store_uri)
def _get_store(self, uri):
if os.path.isabs(uri): # to support win32 paths like: C:\\some\dir
scheme = 'file'
else:
scheme = urlparse(uri).scheme
store_cls = self.STORE_SCHEMES[scheme]
return store_cls(uri)
def media_to_download(self, request, info):
def _onsuccess(result):
if not result:
return # returning None force download
last_modified = result.get('last_modified', None)
if not last_modified:
return # returning None force download
age_seconds = time.time() - last_modified
age_days = age_seconds / 60 / 60 / 24
if age_days > self.EXPIRES:
return # returning None force download
referer = request.headers.get('Referer')
logger.debug(
'File (uptodate): Downloaded %(medianame)s from %(request)s '
'referred in <%(referer)s>',
{'medianame': self.MEDIA_NAME, 'request': request,
'referer': referer},
extra={'spider': info.spider}
)
self.inc_stats(info.spider, 'uptodate')
checksum = result.get('checksum', None)
return {'url': request.url, 'path': path, 'checksum': checksum}
path = self.file_path(request, info=info)
dfd = defer.maybeDeferred(self.store.stat_file, path, info)
dfd.addCallbacks(_onsuccess, lambda _: None)
dfd.addErrback(
lambda f:
logger.error(self.__class__.__name__ + '.store.stat_file',
exc_info=failure_to_exc_info(f),
extra={'spider': info.spider})
)
return dfd
def media_failed(self, failure, request, info):
if not isinstance(failure.value, IgnoreRequest):
referer = request.headers.get('Referer')
logger.warning(
'File (unknown-error): Error downloading %(medianame)s from '
'%(request)s referred in <%(referer)s>: %(exception)s',
{'medianame': self.MEDIA_NAME, 'request': request,
'referer': referer, 'exception': failure.value},
extra={'spider': info.spider}
)
raise FileException
def media_downloaded(self, response, request, info):
referer = request.headers.get('Referer')
if response.status != 200:
logger.warning(
'File (code: %(status)s): Error downloading file from '
'%(request)s referred in <%(referer)s>',
{'status': response.status,
'request': request, 'referer': referer},
extra={'spider': info.spider}
)
raise FileException('download-error')
if not response.body:
logger.warning(
'File (empty-content): Empty file from %(request)s referred '
'in <%(referer)s>: no-content',
{'request': request, 'referer': referer},
extra={'spider': info.spider}
)
raise FileException('empty-content')
status = 'cached' if 'cached' in response.flags else 'downloaded'
logger.debug(
'File (%(status)s): Downloaded file from %(request)s referred in '
'<%(referer)s>',
{'status': status, 'request': request, 'referer': referer},
extra={'spider': info.spider}
)
self.inc_stats(info.spider, status)
try:
path = self.file_path(request, response=response, info=info)
checksum = self.file_downloaded(response, request, info)
except FileException as exc:
logger.warning(
'File (error): Error processing file from %(request)s '
'referred in <%(referer)s>: %(errormsg)s',
{'request': request, 'referer': referer, 'errormsg': str(exc)},
extra={'spider': info.spider}, exc_info=True
)
raise
except Exception as exc:
logger.error(
'File (unknown-error): Error processing file from %(request)s '
'referred in <%(referer)s>',
{'request': request, 'referer': referer},
exc_info=True, extra={'spider': info.spider}
)
raise FileException(str(exc))
return {'url': request.url, 'path': path, 'checksum': checksum}
def inc_stats(self, spider, status):
spider.crawler.stats.inc_value('file_count', spider=spider)
spider.crawler.stats.inc_value('file_status_count/%s' % status, spider=spider)
### Overridable Interface
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.FILES_URLS_FIELD, [])]
def file_downloaded(self, response, request, info):
path = self.file_path(request, response=response, info=info)
buf = BytesIO(response.body)
self.store.persist_file(path, buf, info)
checksum = md5sum(buf)
return checksum
def item_completed(self, results, item, info):
if isinstance(item, dict) or self.FILES_RESULT_FIELD in item.fields:
item[self.FILES_RESULT_FIELD] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('FilesPipeline.file_key(url) method is deprecated, please use '
'file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() method has been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
## end of deprecation warning block
media_guid = hashlib.sha1(url).hexdigest() # change to request.url after deprecation
media_ext = os.path.splitext(url)[1] # change to request.url after deprecation
return 'full/%s%s' % (media_guid, media_ext)
# deprecated
def file_key(self, url):
return self.file_path(url)
file_key._base = True
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Enable 'with' statements in Python 2.5
from __future__ import with_statement
import optparse
import os.path
import shutil
import subprocess
import sys
import time
import traceback
ARCH_MAP = {
'32': {
'gyp_arch': 'ia32',
'scons_platform': 'x86-32',
},
'64': {
'gyp_arch': 'x64',
'scons_platform': 'x86-64',
},
'arm': {
'gyp_arch': 'arm',
'scons_platform': 'arm',
},
}
def GetHostPlatform():
sys_platform = sys.platform.lower()
if sys_platform.startswith('linux'):
return 'linux'
elif sys_platform in ('win', 'win32', 'windows', 'cygwin'):
return 'win'
elif sys_platform in ('darwin', 'mac'):
return 'mac'
else:
raise Exception('Can not determine the platform!')
def SetDefaultContextAttributes(context):
"""
Set default values for the attributes needed by the SCons function, so that
SCons can be run without needing ParseStandardCommandLine
"""
context['platform'] = GetHostPlatform()
context['mode'] = 'opt'
context['default_scons_mode'] = ['opt-host', 'nacl']
context['clang'] = False
context['asan'] = False
context['pnacl'] = False
context['use_glibc'] = False
context['use_breakpad_tools'] = False
context['max_jobs'] = 8
def ParseStandardCommandLine(context):
"""
The standard buildbot scripts require 3 arguments to run. The first
argument (dbg/opt) controls if the build is a debug or a release build. The
second argument (32/64) controls the machine architecture being targeted.
The third argument (newlib/glibc) controls which c library we're using for
the nexes. Different buildbots may have different sets of arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-n', '--dry-run', dest='dry_run', default=False,
action='store_true', help='Do not execute any commands.')
parser.add_option('--inside-toolchain', dest='inside_toolchain',
default=bool(os.environ.get('INSIDE_TOOLCHAIN')),
action='store_true', help='Inside toolchain build.')
parser.add_option('--clang', dest='clang', default=False,
action='store_true', help='Build trusted code with Clang.')
parser.add_option('--coverage', dest='coverage', default=False,
action='store_true',
help='Build and test for code coverage.')
parser.add_option('--validator', dest='validator', default=False,
action='store_true',
help='Only run validator regression test')
parser.add_option('--asan', dest='asan', default=False,
action='store_true', help='Build trusted code with ASan.')
parser.add_option('--step-suffix', metavar='SUFFIX', default='',
help='Append SUFFIX to buildbot step names.')
parser.add_option('--no-gyp', dest='no_gyp', default=False,
action='store_true', help='Do not run the gyp build')
parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools',
default=False, action='store_true',
help='Use breakpad tools for testing')
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Expected 3 arguments: mode arch clib')
# script + 3 args == 4
mode, arch, clib = args
if mode not in ('dbg', 'opt', 'coverage'):
parser.error('Invalid mode %r' % mode)
if arch not in ARCH_MAP:
parser.error('Invalid arch %r' % arch)
if clib not in ('newlib', 'glibc', 'pnacl'):
parser.error('Invalid clib %r' % clib)
# TODO(ncbray) allow a command-line override
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = mode
context['arch'] = arch
# ASan is Clang, so set the flag to simplify other checks.
context['clang'] = options.clang or options.asan
context['validator'] = options.validator
context['asan'] = options.asan
# TODO(ncbray) turn derived values into methods.
context['gyp_mode'] = {
'opt': 'Release',
'dbg': 'Debug',
'coverage': 'Debug'}[mode]
context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch']
context['gyp_vars'] = []
if context['clang']:
context['gyp_vars'].append('clang=1')
if context['asan']:
context['gyp_vars'].append('asan=1')
context['default_scons_platform'] = ARCH_MAP[arch]['scons_platform']
context['default_scons_mode'] = ['nacl']
# Only Linux can build trusted code on ARM.
# TODO(mcgrathr): clean this up somehow
if arch != 'arm' or platform == 'linux':
context['default_scons_mode'] += [mode + '-host']
context['use_glibc'] = clib == 'glibc'
context['pnacl'] = clib == 'pnacl'
context['max_jobs'] = 8
context['dry_run'] = options.dry_run
context['inside_toolchain'] = options.inside_toolchain
context['step_suffix'] = options.step_suffix
context['no_gyp'] = options.no_gyp
context['coverage'] = options.coverage
context['use_breakpad_tools'] = options.use_breakpad_tools
# Don't run gyp on coverage builds.
if context['coverage']:
context['no_gyp'] = True
for key, value in sorted(context.config.items()):
print '%s=%s' % (key, value)
def EnsureDirectoryExists(path):
"""
Create a directory if it does not already exist.
Does not mask failures, but there really shouldn't be any.
"""
if not os.path.exists(path):
os.makedirs(path)
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if GetHostPlatform() == 'win':
count = 0
while True:
try:
op(*args)
break
except Exception:
print "FAILED: %s %s" % (op.__name__, repr(args))
count += 1
if count < 5:
print "RETRY: %s %s" % (op.__name__, repr(args))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def _RemoveDirectory(path):
print 'Removing %s' % path
if os.path.exists(path):
shutil.rmtree(path)
print ' Succeeded.'
else:
print ' Path does not exist, nothing to do.'
def RemoveDirectory(path):
"""
Remove a directory if it exists.
Does not mask failures, although it does retry a few times on Windows.
"""
Retry(_RemoveDirectory, path)
# This is a sanity check so Command can print out better error information.
def FileCanBeFound(name, paths):
# CWD
if os.path.exists(name):
return True
# Paths with directories are not resolved using the PATH variable.
if os.path.dirname(name):
return False
# In path
for path in paths.split(os.pathsep):
full = os.path.join(path, name)
if os.path.exists(full):
return True
return False
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('../xcodebuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def RemoveSconsBuildDirectories():
RemoveDirectory('scons-out')
RemoveDirectory('breakpad-out')
# Execute a command using Python's subprocess module.
def Command(context, cmd, cwd=None):
print 'Running command: %s' % ' '.join(cmd)
# Python's subprocess has a quirk. A subprocess can execute with an
# arbitrary, user-defined environment. The first argument of the command,
# however, is located using the PATH variable of the Python script that is
# launching the subprocess. Modifying the PATH in the environment passed to
# the subprocess does not affect Python's search for the first argument of
# the command (the executable file.) This is a little counter intuitive,
# so we're forcing the search to use the same PATH variable as is seen by
# the subprocess.
env = context.MakeCommandEnv()
script_path = os.environ['PATH']
os.environ['PATH'] = env['PATH']
try:
if FileCanBeFound(cmd[0], env['PATH']) or context['dry_run']:
# Make sure that print statements before the subprocess call have been
# flushed, otherwise the output of the subprocess call may appear before
# the print statements.
sys.stdout.flush()
if context['dry_run']:
retcode = 0
else:
retcode = subprocess.call(cmd, cwd=cwd, env=env)
else:
# Provide a nicer failure message.
# If subprocess cannot find the executable, it will throw a cryptic
# exception.
print 'Executable %r cannot be found.' % cmd[0]
retcode = 1
finally:
os.environ['PATH'] = script_path
print 'Command return code: %d' % retcode
if retcode != 0:
raise StepFailed()
return retcode
# A specialized version of CommandStep.
def SCons(context, mode=None, platform=None, parallel=False, browser_test=False,
args=(), cwd=None):
python = sys.executable
if mode is None: mode = context['default_scons_mode']
if platform is None: platform = context['default_scons_platform']
if parallel:
jobs = context['max_jobs']
else:
jobs = 1
cmd = []
if browser_test and context.Linux():
# Although we could use the "browser_headless=1" Scons option, it runs
# xvfb-run once per Chromium invocation. This is good for isolating
# the tests, but xvfb-run has a stupid fixed-period sleep, which would
# slow down the tests unnecessarily.
cmd.extend(['xvfb-run', '--auto-servernum'])
cmd.extend([
python, 'scons.py',
'--verbose',
'-k',
'-j%d' % jobs,
'--mode='+','.join(mode),
'platform='+platform,
])
if context['clang']: cmd.append('--clang')
if context['asan']: cmd.append('--asan')
if context['use_glibc']: cmd.append('--nacl_glibc')
if context['pnacl']: cmd.append('bitcode=1')
if context['use_breakpad_tools']:
cmd.append('breakpad_tools_dir=breakpad-out')
# Append used-specified arguments.
cmd.extend(args)
Command(context, cmd, cwd)
class StepFailed(Exception):
"""
Thrown when the step has failed.
"""
class StopBuild(Exception):
"""
Thrown when the entire build should stop. This does not indicate a failure,
in of itself.
"""
class Step(object):
"""
This class is used in conjunction with a Python "with" statement to ensure
that the preamble and postamble of each build step gets printed and failures
get logged. This class also ensures that exceptions thrown inside a "with"
statement don't take down the entire build.
"""
def __init__(self, name, status, halt_on_fail=True):
self.status = status
if 'step_suffix' in status.context:
suffix = status.context['step_suffix']
else:
suffix = ''
self.name = name + suffix
self.halt_on_fail = halt_on_fail
self.step_failed = False
# Called on entry to a 'with' block.
def __enter__(self):
print
print '@@@BUILD_STEP %s@@@' % self.name
self.status.ReportBegin(self.name)
# The method is called on exit from a 'with' block - even for non-local
# control flow, i.e. exceptions, breaks, continues, returns, etc.
# If an exception is thrown inside a block wrapped with a 'with' statement,
# the __exit__ handler can suppress the exception by returning True. This is
# used to isolate each step in the build - if an exception occurs in a given
# step, the step is treated as a failure. This allows the postamble for each
# step to be printed and also allows the build to continue of the failure of
# a given step doesn't halt the build.
def __exit__(self, type, exception, trace):
if exception is None:
# If exception is None, no exception occurred.
step_failed = False
elif isinstance(exception, StepFailed):
step_failed = True
print
print 'Halting build step because of failure.'
print
else:
step_failed = True
print
print 'The build step threw an exception...'
print
traceback.print_exception(type, exception, trace, file=sys.stdout)
print
if step_failed:
self.status.ReportFail(self.name)
print '@@@STEP_FAILURE@@@'
if self.halt_on_fail:
print
print 'Entire build halted because %s failed.' % self.name
raise StopBuild()
else:
self.status.ReportPass(self.name)
# Suppress any exception that occurred.
return True
# Adds an arbitrary link inside the build stage on the waterfall.
def StepLink(text, link):
print '@@@STEP_LINK@%s@%s@@@' % (text, link)
# Adds arbitrary text inside the build stage on the waterfall.
def StepText(text):
print '@@@STEP_TEXT@%s@@@' % (text)
class BuildStatus(object):
"""
Keeps track of the overall status of the build.
"""
def __init__(self, context):
self.context = context
self.ever_failed = False
self.steps = []
def ReportBegin(self, name):
pass
def ReportPass(self, name):
self.steps.append((name, 'passed'))
def ReportFail(self, name):
self.steps.append((name, 'failed'))
self.ever_failed = True
# Handy info when this script is run outside of the buildbot.
def DisplayBuildStatus(self):
print
for step, status in self.steps:
print '%-40s[%s]' % (step, status)
print
if self.ever_failed:
print 'Build failed.'
else:
print 'Build succeeded.'
def ReturnValue(self):
return int(self.ever_failed)
class BuildContext(object):
"""
Encapsulates the information needed for running a build command. This
includes environment variables and default arguments for SCons invocations.
"""
# Only allow these attributes on objects of this type.
__slots__ = ['status', 'global_env', 'config']
def __init__(self):
# The contents of global_env override os.environ for any commands run via
# self.Command(...)
self.global_env = {}
# PATH is a special case. See: Command.
self.global_env['PATH'] = os.environ.get('PATH', '')
self.config = {}
self['dry_run'] = False
# Emulate dictionary subscripting.
def __getitem__(self, key):
return self.config[key]
# Emulate dictionary subscripting.
def __setitem__(self, key, value):
self.config[key] = value
# Emulate dictionary membership test
def __contains__(self, key):
return key in self.config
def Windows(self):
return self.config['platform'] == 'win'
def Linux(self):
return self.config['platform'] == 'linux'
def Mac(self):
return self.config['platform'] == 'mac'
def GetEnv(self, name):
return self.global_env[name]
def SetEnv(self, name, value):
self.global_env[name] = str(value)
def MakeCommandEnv(self):
# The external environment is not sanitized.
e = dict(os.environ)
# Arbitrary variables can be overridden.
e.update(self.global_env)
return e
def RunBuild(script, status):
try:
script(status, status.context)
except StopBuild:
pass
# Workaround for an annotator bug.
# TODO([email protected]) remove when the bug is fixed.
if status.ever_failed:
with Step('summary', status):
print 'There were failed stages.'
# Display a summary of the build. Useful when running outside the buildbot.
status.DisplayBuildStatus()
sys.exit(status.ReturnValue())
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from flypy import jit
import numpy as np
class TestArrayAttributes(unittest.TestCase):
def test_array_create(self):
@jit
def identity(a):
return a
a = np.arange(10)
result = identity(a)
self.assertTrue(np.all(a == result))
def test_array_length(self):
@jit
def length(a):
return len(a)
self.assertEqual(length(np.arange(10)), 10)
self.assertEqual(length(np.empty((12, 8))), 12)
class TestArrayIndexing(unittest.TestCase):
def test_1d_array_index(self):
@jit
def index(a):
return a[6]
a = np.arange(10)
self.assertEqual(a[6], index(a))
def test_2d_array_index(self):
@jit
def index(a):
return a[6, 9]
a = np.arange(8 * 12).reshape(8, 12)
self.assertEqual(a[6, 9], index(a))
def test_nd_array_index(self):
@jit
def index(a, t):
return a[t]
def test(t, dtype=np.float64):
shape = tuple(np.array(t) + 5)
a = np.empty(shape, dtype=dtype)
a[t] = 6.4
self.assertEqual(6.4, index(a, t))
test((2,))
test((2, 6))
test((2, 6, 9))
test((2, 6, 9, 4))
test((2, 6, 9, 4, 3))
def test_1d_array_setitem(self):
@jit
def index(a):
a[6] = 14
a = np.arange(10)
index(a)
self.assertEqual(a[6], 14)
def test_1d_array_setitem_x(self):
@jit
def index(a, i):
a[i] = 14
for i in [0, 1, 8, 9]:
a = np.arange(10)
index(a, i)
self.assertEqual(a[i], 14, "1D array getitem(%d)" % i)
def test_2d_array_setitem(self):
@jit
def index(a):
a[6, 9] = 14
a = np.arange(8 * 12).reshape(8, 12)
index(a)
self.assertEqual(a[6, 9], 14)
def test_2d_array_setitem_x(self):
@jit
def index(a, i, j):
a[i, j] = 14
x = [0, 1, 6, 7]
y = [0, 1, 10, 11]
for i in x:
for j in y:
a = np.arange(8 * 12).reshape(8, 12)
index(a, i, j)
self.assertEqual(a[i, j], 14, "2D array getitem(%d, %d)" %
(i, j))
def test_2d_array_setitem_0index(self):
@jit
def index(a):
a[0, 0] = 14
a = np.arange(8 * 12).reshape(8, 12)
index(a)
self.assertEqual(a[0, 0], 14)
def test_nd_array_setitem(self):
@jit
def index(a, t):
a[t] = 14
def test(t, dtype=np.float64):
shape = tuple(np.array(t) + 5)
a = np.empty(shape, dtype=dtype)
index(a, t)
self.assertEqual(a[t], 14)
test((2,))
test((2, 6))
test((2, 6, 9))
test((2, 6, 9, 4))
test((2, 6, 9, 4, 3))
def test_partial_getitem(self):
@jit
def index(a):
return a[6]
a = np.arange(8 * 12).reshape(8, 12)
result = index(a)
self.assertEqual(len(result), 12)
self.assertTrue(np.all(result == a[6]))
def test_partial_setitem(self):
@jit
def index(a):
a[6] = 4
a = np.arange(8 * 12).reshape(8, 12)
index(a)
self.assertTrue(np.all(a[6] == 4))
class TestArraySlicing(unittest.TestCase):
def test_1d_array_slice(self):
@jit
def index(a):
return a[:]
a = np.arange(10)
self.assertTrue(np.all(a == index(a)))
def test_1d_array_slice_bounds(self):
@jit
def index(a, start, stop, step):
return a[start:stop:step]
def test(start=0, stop=10, step=1):
a = np.arange(10)
result = index(a, start, stop, step)
expected = a[start:stop:step]
self.assertTrue(np.all(result == expected), (result, expected))
# Ascending
test(1)
test(3)
test(2, 8, 3)
test(2, 9, 3)
# Descending (wrap-around)
test(-2)
test(-2, -3)
test(-2, -3, -1)
# Wrap around and adjust
test(-12, 3, 1)
test(12, 4, -1)
test(12, -3, -1)
test(8, -12, -1)
def test_2d_array_slice(self):
@jit
def index(a):
return a[:, 5]
a = np.arange(8 * 12).reshape(8, 12)
result = index(a)
self.assertTrue(np.all(a[:, 5] == result))
if __name__ == '__main__':
unittest.main(verbosity=3)
|
|
from __future__ import division, unicode_literals
import collections
import contextlib
import json
import numbers
try:
import requests
except ImportError:
requests = None
from jsonschema import _utils, _validators
from jsonschema.compat import (
PY3, Sequence, urljoin, urlsplit, urldefrag, unquote, urlopen,
str_types, int_types, iteritems,
)
from jsonschema.exceptions import RefResolutionError, SchemaError, UnknownType
_unset = _utils.Unset()
validators = {}
meta_schemas = _utils.URIDict()
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
:argument str version: an identifier to use as the version's name
:returns: a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
if "id" in cls.META_SCHEMA:
meta_schemas[cls.META_SCHEMA["id"]] = cls
return cls
return _validates
def create(meta_schema, validators=(), version=None, default_types=None): # noqa
if default_types is None:
default_types = {
"array" : list, "boolean" : bool, "integer" : int_types,
"null" : type(None), "number" : numbers.Number, "object" : dict,
"string" : str_types,
}
class Validator(object):
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
DEFAULT_TYPES = dict(default_types)
def __init__(
self, schema, types=(), resolver=None, format_checker=None,
):
self._types = dict(self.DEFAULT_TYPES)
self._types.update(types)
if resolver is None:
resolver = RefResolver.from_schema(schema)
self.resolver = resolver
self.format_checker = format_checker
self.schema = schema
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
raise SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
with self.resolver.in_scope(_schema.get("id", "")):
ref = _schema.get("$ref")
if ref is not None:
validators = [("$ref", ref)]
else:
validators = iteritems(_schema)
for k, v in validators:
validator = self.VALIDATORS.get(k)
if validator is None:
continue
errors = validator(self, v, instance, _schema) or ()
for error in errors:
# set details if not already set by the called fn
error._set(
validator=k,
validator_value=v,
instance=instance,
schema=_schema,
)
if k != "$ref":
error.schema_path.appendleft(k)
yield error
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
if path is not None:
error.path.appendleft(path)
if schema_path is not None:
error.schema_path.appendleft(schema_path)
yield error
def validate(self, *args, **kwargs):
for error in self.iter_errors(*args, **kwargs):
raise error
def is_type(self, instance, type):
if type not in self._types:
raise UnknownType(type)
pytypes = self._types[type]
# bool inherits from int, so ensure bools aren't reported as ints
if isinstance(instance, bool):
pytypes = _utils.flatten(pytypes)
is_number = any(
issubclass(pytype, numbers.Number) for pytype in pytypes
)
if is_number and bool not in pytypes:
return False
return isinstance(instance, pytypes)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
return error is None
if version is not None:
Validator = validates(version)(Validator)
name = "{0}Validator".format(version.title().replace(" ", ""))
if not PY3 and isinstance(name, unicode):
name = name.encode("utf-8")
Validator.__name__ = name
return Validator
def extend(validator, validators, version=None):
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
default_types=validator.DEFAULT_TYPES,
)
Draft3Validator = create(
meta_schema=_utils.load_schema("draft3"),
validators={
"$ref" : _validators.ref,
"additionalItems" : _validators.additionalItems,
"additionalProperties" : _validators.additionalProperties,
"dependencies" : _validators.dependencies,
"disallow" : _validators.disallow_draft3,
"divisibleBy" : _validators.multipleOf,
"enum" : _validators.enum,
"extends" : _validators.extends_draft3,
"format" : _validators.format,
"items" : _validators.items,
"maxItems" : _validators.maxItems,
"maxLength" : _validators.maxLength,
"maximum" : _validators.maximum,
"minItems" : _validators.minItems,
"minLength" : _validators.minLength,
"minimum" : _validators.minimum,
"multipleOf" : _validators.multipleOf,
"pattern" : _validators.pattern,
"patternProperties" : _validators.patternProperties,
"properties" : _validators.properties_draft3,
"type" : _validators.type_draft3,
"uniqueItems" : _validators.uniqueItems,
},
version="draft3",
)
Draft4Validator = create(
meta_schema=_utils.load_schema("draft4"),
validators={
"$ref" : _validators.ref,
"additionalItems" : _validators.additionalItems,
"additionalProperties" : _validators.additionalProperties,
"allOf" : _validators.allOf_draft4,
"anyOf" : _validators.anyOf_draft4,
"dependencies" : _validators.dependencies,
"enum" : _validators.enum,
"format" : _validators.format,
"items" : _validators.items,
"maxItems" : _validators.maxItems,
"maxLength" : _validators.maxLength,
"maxProperties" : _validators.maxProperties_draft4,
"maximum" : _validators.maximum,
"minItems" : _validators.minItems,
"minLength" : _validators.minLength,
"minProperties" : _validators.minProperties_draft4,
"minimum" : _validators.minimum,
"multipleOf" : _validators.multipleOf,
"not" : _validators.not_draft4,
"oneOf" : _validators.oneOf_draft4,
"pattern" : _validators.pattern,
"patternProperties" : _validators.patternProperties,
"properties" : _validators.properties_draft4,
"required" : _validators.required_draft4,
"type" : _validators.type_draft4,
"uniqueItems" : _validators.uniqueItems,
},
version="draft4",
)
class RefResolver(object):
"""
Resolve JSON References.
:argument str base_uri: URI of the referring document
:argument referrer: the actual referring document
:argument dict store: a mapping from URIs to documents to cache
:argument bool cache_remote: whether remote refs should be cached after
first resolution
:argument dict handlers: a mapping from URI schemes to functions that
should be used to retrieve them
"""
def __init__(
self, base_uri, referrer, store=(), cache_remote=True, handlers=(),
):
self.base_uri = base_uri
self.resolution_scope = base_uri
# This attribute is not used, it is for backwards compatibility
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
self.store = _utils.URIDict(
(id, validator.META_SCHEMA)
for id, validator in iteritems(meta_schemas)
)
self.store.update(store)
self.store[base_uri] = referrer
@classmethod
def from_schema(cls, schema, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
:argument schema schema: the referring schema
:rtype: :class:`RefResolver`
"""
return cls(schema.get("id", ""), schema, *args, **kwargs)
@contextlib.contextmanager
def in_scope(self, scope):
old_scope = self.resolution_scope
self.resolution_scope = urljoin(old_scope, scope)
try:
yield
finally:
self.resolution_scope = old_scope
@contextlib.contextmanager
def resolving(self, ref):
"""
Context manager which resolves a JSON ``ref`` and enters the
resolution scope of this ref.
:argument str ref: reference to resolve
"""
full_uri = urljoin(self.resolution_scope, ref)
uri, fragment = urldefrag(full_uri)
if not uri:
uri = self.base_uri
if uri in self.store:
document = self.store[uri]
else:
try:
document = self.resolve_remote(uri)
except Exception as exc:
raise RefResolutionError(exc)
old_base_uri, self.base_uri = self.base_uri, uri
try:
with self.in_scope(uri):
yield self.resolve_fragment(document, fragment)
finally:
self.base_uri = old_base_uri
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
:argument document: the referrant document
:argument str fragment: a URI fragment to resolve within it
"""
fragment = fragment.lstrip("/")
parts = unquote(fragment).split("/") if fragment else []
for part in parts:
part = part.replace("~1", "/").replace("~0", "~")
if isinstance(document, Sequence):
# Array indexes should be turned into integers
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except (TypeError, LookupError):
raise RefResolutionError(
"Unresolvable JSON pointer: %r" % fragment
)
return document
def resolve_remote(self, uri):
"""
Resolve a remote ``uri``.
Does not check the store first, but stores the retrieved document in
the store if :attr:`RefResolver.cache_remote` is True.
.. note::
If the requests_ library is present, ``jsonschema`` will use it to
request the remote ``uri``, so that the correct encoding is
detected and used.
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
:argument str uri: the URI to resolve
:returns: the retrieved document
.. _requests: http://pypi.python.org/pypi/requests/
"""
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
elif (
scheme in ["http", "https"] and
requests and
getattr(requests.Response, "json", None) is not None
):
# Requests has support for detecting the correct encoding of
# json over http
if callable(requests.Response.json):
result = requests.get(uri).json()
else:
result = requests.get(uri).json
else:
# Otherwise, pass off to urllib and assume utf-8
result = json.loads(urlopen(uri).read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
class ErrorTree(object):
"""
ErrorTrees make it easier to check which validations failed.
"""
_instance = _unset
def __init__(self, errors=()):
self.errors = {}
self._contents = collections.defaultdict(self.__class__)
for error in errors:
container = self
for element in error.path:
container = container[element]
container.errors[error.validator] = error
self._instance = error.instance
def __contains__(self, index):
"""
Check whether ``instance[index]`` has any errors.
"""
return index in self._contents
def __getitem__(self, index):
"""
Retrieve the child tree one level down at the given ``index``.
If the index is not in the instance that this tree corresponds to and
is not known by this tree, whatever error would be raised by
``instance.__getitem__`` will be propagated (usually this is some
subclass of :class:`LookupError`.
"""
if self._instance is not _unset and index not in self:
self._instance[index]
return self._contents[index]
def __setitem__(self, index, value):
self._contents[index] = value
def __iter__(self):
"""
Iterate (non-recursively) over the indices in the instance with errors.
"""
return iter(self._contents)
def __len__(self):
"""
Same as :attr:`total_errors`.
"""
return self.total_errors
def __repr__(self):
return "<%s (%s total errors)>" % (self.__class__.__name__, len(self))
@property
def total_errors(self):
"""
The total number of errors in the entire tree, including children.
"""
child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
return len(self.errors) + child_errors
def validator_for(schema, default=_unset):
if default is _unset:
default = Draft4Validator
return meta_schemas.get(schema.get("$schema", ""), default)
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
cls(schema, *args, **kwargs).validate(instance)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.linux.utils.execute')
self.execute = self.execute_p.start()
self.addCleanup(self.execute_p.stop)
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute('o', 'link', ('list',), 'sudo')
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
root_helper='sudo')
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
root_helper=None)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
root_helper=None)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase('sudo')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
root_helper=None)
def test_run_namespace(self):
base = ip_lib.SubProcessBase('sudo', 'ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
root_helper='sudo')
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase('sudo', 'ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
root_helper='sudo')
def test_as_root_no_root_helper(self):
base = ip_lib.SubProcessBase()
self.assertRaises(exceptions.SudoRequired,
base._as_root,
[], 'link', ('list',))
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
self.addCleanup(self.execute_p.stop)
def test_get_devices(self):
self.execute.return_value = '\n'.join(LINK_SAMPLE)
retval = ip_lib.IPWrapper('sudo').get_devices()
self.assertEqual(retval,
[ip_lib.IPDevice('lo'),
ip_lib.IPDevice('eth0'),
ip_lib.IPDevice('br-int'),
ip_lib.IPDevice('gw-ddc717df-49')])
self.execute.assert_called_once_with('o', 'link', ('list',),
'sudo', None)
def test_get_devices_malformed_line(self):
self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish'])
retval = ip_lib.IPWrapper('sudo').get_devices()
self.assertEqual(retval,
[ip_lib.IPDevice('lo'),
ip_lib.IPDevice('eth0'),
ip_lib.IPDevice('br-int'),
ip_lib.IPDevice('gw-ddc717df-49')])
self.execute.assert_called_once_with('o', 'link', ('list',),
'sudo', None)
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces('sudo')
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with('', 'netns', ('list',),
root_helper='sudo')
def test_add_tuntap(self):
ip_lib.IPWrapper('sudo').add_tuntap('tap0')
self.execute.assert_called_once_with('', 'tuntap',
('add', 'tap0', 'mode', 'tap'),
'sudo', None)
def test_add_veth(self):
ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1')
self.execute.assert_called_once_with('', 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
'sudo', None)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with('', 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
'sudo', None)
def test_get_device(self):
dev = ip_lib.IPWrapper('sudo', 'ns').device('eth0')
self.assertEqual(dev.root_helper, 'sudo')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper('sudo')
with mock.patch.object(ip.netns, 'exists') as ns_exists:
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'), 'sudo', None)])
ip_dev.assert_has_calls([mock.call('lo', 'sudo', 'ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper('sudo').ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper('sudo', 'ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper('sudo').add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'sudo', 'ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1')
self.assertNotEqual(dev1, None)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.root_helper = 'sudo'
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run('link', 'show')
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run('link', options='o')
self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))])
def test_as_root(self):
self.ip_cmd._as_root('link')
self.ip.assert_has_calls(
[mock.call._as_root([], 'foo', ('link', ), False)])
def test_as_root_with_options(self):
self.ip_cmd._as_root('link', options='o')
self.ip.assert_has_calls(
[mock.call._as_root('o', 'foo', ('link', ), False)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev.root_helper = 'sudo'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
self.parent.root_helper = 'sudo'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, force_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
force_root_namespace)])
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
self.link_cmd.set_up()
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
self.link_cmd.set_down()
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call('o', ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255')
self._assert_sudo([4],
('add', '192.168.45.100/24', 'brd', '192.168.45.255',
'scope', 'global', 'dev', 'tap0'))
def test_add_address_scoped(self):
self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255',
scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24', 'brd', '192.168.45.255',
'scope', 'link', 'dev', 'tap0'))
def test_del_address(self):
self.addr_cmd.delete(4, '192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush()
self._assert_sudo([], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24',
broadcast='172.16.77.255'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64',
broadcast='::'),
dict(ip_version=6, scope='link',
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64',
broadcast='::')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(self.addr_cmd.list(), expected)
self._assert_call([], ('show', 'tap0'))
def test_list_filtered(self):
expected = [
dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24',
broadcast='172.16.77.255')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
def test_add_gateway(self):
gateway = '192.168.45.100'
metric = 100
self.route_cmd.add_gateway(gateway, metric)
self._assert_sudo([],
('replace', 'default', 'via', gateway,
'metric', metric,
'dev', self.parent.name))
def test_del_gateway(self):
gateway = '192.168.45.100'
self.route_cmd.delete_gateway(gateway)
self._assert_sudo([],
('del', 'default', 'via', gateway,
'dev', self.parent.name))
def test_get_gateway(self):
test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}}]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), force_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
def test_delete_namespace(self):
with mock.patch('neutron.agent.linux.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True)
def test_namespace_exists(self):
retval = '\n'.join(NETNS_SAMPLE)
self.parent._as_root.return_value = retval
self.assertTrue(
self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
self._assert_sudo('o', ('list',), force_root_namespace=True)
def test_namespace_doest_not_exist(self):
retval = '\n'.join(NETNS_SAMPLE)
self.parent._as_root.return_value = retval
self.assertFalse(
self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
self._assert_sudo('o', ('list',), force_root_namespace=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.linux.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
root_helper='sudo',
check_exit_code=True)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.linux.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['FOO=1', 'BAR=2', 'ip', 'netns', 'exec', 'ns', 'ip', 'link',
'list'],
root_helper='sudo', check_exit_code=True)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with('o', 'link', ('show', 'eth0'))
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
|
|
# -*- coding: utf-8 -*-
import json
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import last_modified as cache_last_modified
from django.views.decorators.cache import never_cache as force_cache_validation
from django.core.cache import get_cache
from django.shortcuts import redirect
from mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,
MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,
MapEntityDelete, MapEntityFormat,
HttpJSONResponse)
from geotrek.authent.decorators import same_structure_required
from geotrek.common.utils import classproperty
from .models import Path, Trail, Topology
from .forms import PathForm, TrailForm
from .filters import PathFilterSet, TrailFilterSet
from . import graph as graph_lib
logger = logging.getLogger(__name__)
@login_required
def last_list(request):
last = request.session.get('last_list') # set in MapEntityList
if not last:
return redirect('core:path_list')
return redirect(last)
home = last_list
class CreateFromTopologyMixin(object):
def on_topology(self):
pk = self.request.GET.get('topology')
if pk:
try:
return Topology.objects.existing().get(pk=pk)
except Topology.DoesNotExist:
logger.warning("Intervention on unknown topology %s" % pk)
return None
def get_initial(self):
initial = super(CreateFromTopologyMixin, self).get_initial()
# Create intervention with an existing topology as initial data
topology = self.on_topology()
if topology:
initial['topology'] = topology.serialize(with_pk=False)
return initial
class PathLayer(MapEntityLayer):
model = Path
properties = ['name']
class PathList(MapEntityList):
queryset = Path.objects.prefetch_related('networks').select_related('stake')
filterform = PathFilterSet
@classproperty
def columns(cls):
columns = ['id', 'name', 'networks', 'stake']
if settings.TRAIL_MODEL_ENABLED:
columns.append('trails')
return columns
def get_queryset(self):
"""
denormalize ``trail`` column from list.
"""
qs = super(PathList, self).get_queryset()
denormalized = {}
if settings.TRAIL_MODEL_ENABLED:
paths_id = qs.values_list('id', flat=True)
paths_trails = Trail.objects.filter(aggregations__path__id__in=paths_id)
by_id = dict([(trail.id, trail) for trail in paths_trails])
trails_paths_ids = paths_trails.values_list('id', 'aggregations__path__id')
for trail_id, path_id in trails_paths_ids:
denormalized.setdefault(path_id, []).append(by_id[trail_id])
for path in qs:
path_trails = denormalized.get(path.id, [])
setattr(path, '_trails', path_trails)
yield path
class PathJsonList(MapEntityJsonList, PathList):
pass
class PathFormatList(MapEntityFormat, PathList):
pass
class PathDetail(MapEntityDetail):
model = Path
def context_data(self, *args, **kwargs):
context = super(PathDetail, self).context_data(*args, **kwargs)
context['can_edit'] = self.get_object().same_structure(self.request.user)
return context
class PathDocument(MapEntityDocument):
model = Path
def get_context_data(self, *args, **kwargs):
self.get_object().prepare_elevation_chart(self.request.build_absolute_uri('/'))
return super(PathDocument, self).get_context_data(*args, **kwargs)
class PathCreate(MapEntityCreate):
model = Path
form_class = PathForm
class PathUpdate(MapEntityUpdate):
model = Path
form_class = PathForm
@same_structure_required('core:path_detail')
def dispatch(self, *args, **kwargs):
return super(PathUpdate, self).dispatch(*args, **kwargs)
class PathDelete(MapEntityDelete):
model = Path
@same_structure_required('core:path_detail')
def dispatch(self, *args, **kwargs):
return super(PathDelete, self).dispatch(*args, **kwargs)
@login_required
@cache_last_modified(lambda x: Path.latest_updated())
@force_cache_validation
def get_graph_json(request):
cache = get_cache('fat')
key = 'path_graph_json'
result = cache.get(key)
latest = Path.latest_updated()
if result and latest:
cache_latest, json_graph = result
# Not empty and still valid
if cache_latest and cache_latest >= latest:
return HttpJSONResponse(json_graph)
# cache does not exist or is not up to date
# rebuild the graph and cache the json
graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())
json_graph = json.dumps(graph)
cache.set(key, (latest, json_graph))
return HttpJSONResponse(json_graph)
class TrailLayer(MapEntityLayer):
queryset = Trail.objects.existing()
properties = ['name']
class TrailList(MapEntityList):
queryset = Trail.objects.existing()
filterform = TrailFilterSet
columns = ['id', 'name', 'departure', 'arrival']
class TrailDetail(MapEntityDetail):
queryset = Trail.objects.existing()
def context_data(self, *args, **kwargs):
context = super(TrailDetail, self).context_data(*args, **kwargs)
context['can_edit'] = self.get_object().same_structure(self.request.user)
return context
class TrailDocument(MapEntityDocument):
queryset = Trail.objects.existing()
class TrailCreate(CreateFromTopologyMixin, MapEntityCreate):
model = Trail
form_class = TrailForm
class TrailUpdate(MapEntityUpdate):
queryset = Trail.objects.existing()
form_class = TrailForm
@same_structure_required('core:trail_detail')
def dispatch(self, *args, **kwargs):
return super(TrailUpdate, self).dispatch(*args, **kwargs)
class TrailDelete(MapEntityDelete):
queryset = Trail.objects.existing()
@same_structure_required('core:trail_detail')
def dispatch(self, *args, **kwargs):
return super(TrailDelete, self).dispatch(*args, **kwargs)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from contextlib import contextmanager
import cgi
import json
import os
import re
import sqlite3 as sql
import sys
print("Content-Type: text/plain\n\n")
DBFILE = os.path.expanduser('~/bikeracks.db')
KEY_ID = 'rack_id'
KEY_VALUE = 'url'
TABLE = 'photos'
INSPECT_DB_SCHEMA = r'''
PRAGMA TABLE_INFO({table});
'''.format(table=TABLE)
CREATE_DB_SCHEMA = r'''
CREATE TABLE IF NOT EXISTS {table} ({id} int, {value} text);
'''.format(table=TABLE, id=KEY_ID, value=KEY_VALUE)
DROP_DB_SCHEMA = r'''DROP TABLE IF EXISTS {table};'''.format(table=TABLE)
STORE_QUERY = r'''INSERT INTO {table} VALUES (:id, :value);'''.format(
table=TABLE)
RETRIEVE_QUERY = r'''SELECT {value} FROM {table} WHERE {id}=:id;'''.format(
value=KEY_VALUE, table=TABLE, id=KEY_ID)
VALUE_VALIDATOR = re.compile(
r'^https?://i.imgur.com/\w+.(jpg|png|gif)$')
GET = 'REQUEST_METHOD_GET'
POST = 'REQUEST_METHOD_POST'
DELETE = 'REQUEST_METHOD_DELETE'
PUT = 'REQUEST_METHOD_PUT'
INVALID = 'REQUEST_METHOD_INVALID'
REQUEST_METHODS = {
'GET': GET,
'POST': POST,
'DELETE': DELETE,
'PUT': PUT,
}
GET_URI_VALIDATOR = re.compile(r"^/bikeracks/rack/\d+(?:-\d+)?$")
POST_URI_VALIDATOR = GET_URI_VALIDATOR
DBCONN = None
@contextmanager
def db_cursor():
global DBCONN
conn = DBCONN or sql.connect(DBFILE)
c = conn.cursor()
yield c
conn.commit()
def delete():
with db_cursor() as c:
c.execute(DROP_DB_SCHEMA)
def initialize_db():
with db_cursor() as c:
try:
c.execute(INSPECT_DB_SCHEMA)
cols = tuple(x[1] for x in c.fetchall())
assert cols == (KEY_ID, KEY_VALUE)
except AssertionError, sql.OperationalError:
delete()
c.execute(CREATE_DB_SCHEMA)
def validate_params(values):
values = values.copy()
if 'id' in values:
try:
values['id'] = int(values['id'])
except ValueError:
return False, 'Bad ID: {0}'.format(values['id'])
if values['id'] < 0:
return False, 'Negative ID: {0}'.format(values['id'])
if 'value' in values:
if values['value'] and not VALUE_VALIDATOR.match(values['value']):
return False, 'Bad Value: ' + str(values['value'])
else:
values['value'] = cgi.escape(values['value'])
return True, values
def _params_query(cursor, params, query):
good, res = validate_params(params)
if good:
params = res
cursor.execute(query, params)
return "OK!"
else:
return res
def store(id, value):
with db_cursor() as c:
return _params_query(c, {'id': id, 'value': value}, STORE_QUERY)
def remove(id, value):
with db_cursor() as c:
return _params_query(c, {'id': id, 'value': value}, REMOVE_QUERY)
def retrieve(id):
with db_cursor() as c:
_params_query(c, {'id': id}, RETRIEVE_QUERY)
links = [link for links in c.fetchall() for link in links]
return links
def validate_route(form, get_validator, post_validator):
BAD_PARAM = 'Required parameter {0!r} missing mismatched.\n\n{1!r}'
BAD_REQUEST = 'URL invalid for request of type {0!r}'
method = INVALID
params = {'reason': 'unable to route request'}
uri = os.environ['REQUEST_URI']
request_type = REQUEST_METHODS.get(os.environ['REQUEST_METHOD'], INVALID)
if request_type == GET:
if get_validator.match(uri):
key = get_from_form(form, KEY_ID)
if key is not None:
method = GET
params = {KEY_ID: key}
else:
method = INVALID
params = {'reason': BAD_PARAM.format(KEY_ID, form)}
else:
method = INVALID
params = {'reason': BAD_REQUEST.format(request_type)}
return method, params
elif request_type == POST:
if post_validator.match(uri):
key = get_from_form(form, KEY_ID)
if key is not None:
value = get_from_form(form, KEY_VALUE)
if value is not None:
method = POST
params = {KEY_ID: key, KEY_VALUE: value}
return method, params
else:
method = INVALID
params = {'reason': BAD_PARAM.format(KEY_ID, form)}
return method, params
else:
method = INVALID
params = {'reason': BAD_PARAM.format(KEY_ID, form)}
return method, params
else:
method = INVALID
params = {'reason': BAD_REQUEST.format(request_type)}
return method, params
return method, params
def get_from_form(form, key):
if key in form:
if isinstance(form[key], list):
val = getattr(form[key][0], 'value', form[key][0])
if all(v.value == val for v in form[key]):
return val
else:
val = getattr(form[key], 'value', form[key])
return val
return None
def dump_environ():
for k, v in os.environ.items():
print("{0}: {1}".format(k, v))
def main():
try:
form = cgi.FieldStorage()
method, params = validate_route(
form, GET_URI_VALIDATOR, POST_URI_VALIDATOR)
initialize_db()
resp = {'error': 'Undefined response'}
if method == GET:
resp = {'data': retrieve(params[KEY_ID])}
elif method == POST:
resp = {'data': store(params[KEY_ID], params[KEY_VALUE])}
elif method == INVALID:
resp = {'error': params['reason']}
else:
resp = {'error': "This should never happen. Go hide under the bed."}
print(json.dumps(resp))
except Exception as e:
import traceback
a, b, c = sys.exc_info()
traceback.print_exception(a, b, c, 100, sys.stdout)
if __name__ == '__main__':
main()
|
|
# vim: set ts=2 expandtab:
'''
Module: control_characters.py
Desc: ARIB (Japanese Closed Caption) Control character support
Author: John O'Neil
Email: [email protected]
DATE: Sunday, March 9th 2014
'''
import read
from code_set import code_set_handler_from_final_byte
from code_set import in_code_set_table
from arib_exceptions import DecodingError
import read
DEBUG = False
class NUL(object):
'''Null
Control code, which can be added or deleted without effecting to
information content.
'''
CODE = 0x00
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'NUL'
@staticmethod
def handler(f):
return NUL(f)
class SP(object):
'''Space
'''
CODE = 0x20
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u' '
@staticmethod
def handler(f):
return SP(f)
class DEL(object):
'''Delete
'''
# See control character table 7-14 on page 89 arib std b-24
CODE = 0x7f
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'DEL'
@staticmethod
def handler(f):
return DEL(f)
class BEL(object):
'''Bell
Control code used when calling attention (alarm or signal)
'''
CODE = 0X07
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'BEL'
@staticmethod
def handler(f):
return BEL(f)
class APB(object):
'''Active position backward
Active position goes backward along character path in the length of
character path of character field. When the reference point of the character
field exceeds the edge of display area by this movement, move in the
opposite side of the display area along the character path of the active
position, for active position up.
'''
CODE = 0x08
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'APB'
@staticmethod
def handler(f):
return APB(f)
class APF(object):
'''Active position forward
Active position goes forward along character path in the length of
character path of character field. When the reference point of the character
field exceeds the edge of display area by this movement, move in the
opposite side of the display area along the character path of the active
position, for active position down.
'''
CODE = 0x09
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'APF'
@staticmethod
def handler(f):
return APF(f)
class APD(object):
'''Active position down
Moves to next line along line direction in the length of line direction of
the character field. When the reference point of the character field exceeds
the edge of display area by this movement, move to the first line of the
display area along the line direction.
'''
CODE = 0x0a
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'APD'
@staticmethod
def handler(f):
return APD(f)
class APU(object):
'''Active position up
Moves to the previous line along line direction in the length of line
direction of the character field. When the reference point of the character
field exceeds the edge of display area by this movement, move to the last
line of the display area along the line direction.
'''
CODE = 0x0b
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'APU'
@staticmethod
def handler(f):
return APU(f)
class CS(object):
'''Clear screen
Display area of the display screen is erased.
'''
CODE = 0x0c
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<clear screen>'
@staticmethod
def handler(f):
return CS(f)
class APR(object):
'''Active position return
Active position down is made, moving to the first position of the same
line.
'''
CODE = 0x0d
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'APR'
@staticmethod
def handler(f):
return APR(f)
class LS1(object):
'''Locking shift 1
Code to invoke character code set.
Sets GL code area to current G1 code set
'''
CODE = 0x0e
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'LS1'
@staticmethod
def handler(f):
return LS1(f)
class LS0(object):
'''Locking shift 0
Code to invoke character code set.
Sets GL code area to the current G0 code set
'''
CODE = 0x0f
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'LS0'
@staticmethod
def handler(f):
return LS0(f)
class PAPF(object):
'''Parameterized active position forward
Active position forward is made in specified times by parameter P1 (1
byte).
Parameter P1 shall be within the range of 04/0 to 07/15 and time shall be
specified within the range of 0 to 63 in binary value of 6-bit from b6 to b1.
(b8 and b7 are not used.)
'''
CODE = 0x16
def __init__(self, f):
# read the single byte paramter for now but ignore its effect on text placement
# TODO: implement proper screen text placement
read.ucb(f)
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 2
def __unicode__(self):
return u'<PAPF>'
@staticmethod
def handler(f):
return PAPF(f)
class CAN(object):
'''Cancel
From the current active position to the end of the line is covered with
background colour in the width of line direction in the current character
field. Active position is not moved.
'''
CODE = 0x18
def __init__(self, f):
pass
@staticmethod
def handler(f):
pass
class SS2(object):
'''Single shift 2
Code to invoke character code set.
Sets the GL code area to the G2 code set for one character
'''
CODE = 0x19
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'SS2'
@staticmethod
def handler(f):
return SS2(f)
class LS2(object):
'''Class only generated by ESC sequence below.
Represents Locking shift in GL area to current G2 codeset
'''
CODE = 0x6e
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 2
def __call__(self, decoder):
'''cause in INVOCATION change on decoder
'''
# print 'setting GL to G2 with contents {g}'.format(g=str(type(decoder._G2.get())))
decoder._GL = decoder._G2
def __unicode__(self):
return u'LS2'
@staticmethod
def handler(f=None):
return LS2(f)
class LS3(object):
'''Class only generated by ESC sequence below.
Represents Locking shift in GL area to current G3 codeset
'''
CODE = 0x6f
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 2
def __call__(self, decoder):
'''cause in INVOCATION change on decoder
'''
decoder._GL = decoder._G3
def __unicode__(self):
return u'LS3'
@staticmethod
def handler(f=None):
return LS3(f)
class LS1R(object):
'''Class only generated by ESC sequence below.
Represents Locking shift in GR area to current G1 codeset
'''
CODE = 0x7e
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 2
def __call__(self, decoder):
'''cause in INVOCATION change on decoder
'''
decoder._GR = decoder._G1
def __unicode__(self):
return u'LS1R'
@staticmethod
def handler(f=None):
return LS1R(f)
class LS2R(object):
'''Class only generated by ESC sequence below.
Represents Locking shift in GR area to current G2 codeset
'''
CODE = 0x7d
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 2
def __call__(self, decoder):
'''cause in INVOCATION change on decoder
'''
decoder._GR = decoder._G2
def __unicode__(self):
return u'LS2R'
@staticmethod
def handler(f=None):
return LS2R(f)
class LS3R(object):
'''Class only generated by ESC sequence below.
Represents Locking shift in GR area to current G3 codeset
'''
CODE = 0x7c
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 2
def __call__(self, decoder):
'''cause in INVOCATION change on decoder
'''
decoder._GR = decoder._G3
def __unicode__(self):
return u'LS3R'
@staticmethod
def handler(f=None):
return LS3R(f)
INVOCATION_TABLE = {
LS2.CODE : LS2.handler,
LS3.CODE : LS3.handler,
LS1R.CODE : LS1R.handler,
LS2R.CODE : LS2R.handler,
LS3R.CODE : LS3R.handler,
}
class G0(object):
CODE = 0x28
def __init__(self):
pass
@staticmethod
def factory():
return G0()
def load(self, esc, f):
b = read.ucb(f)
if b == DRCS.CODE:
if DEBUG:
print 'G0 DRCS {:#x}'.format(b)
esc._args.append(b)
DRCS.handler(esc, f)
elif in_code_set_table(b):
if DEBUG:
print 'G0 CODESET {:#x}'.format(b)
esc._args.append(b)
else:
raise DecodingError()
def designate(self, decoder, final_byte):
'''cause in Designation change on decoder
'''
decoder._G0.set(code_set_handler_from_final_byte(final_byte))
class G1(object):
CODE = 0x29
def __init__(self):
pass
@staticmethod
def factory():
return G1()
def load(self, esc, f):
b = read.ucb(f)
if b == DRCS.CODE:
if DEBUG:
print 'G1 DRCS {:#x}'.format(b)
esc._args.append(b)
DRCS.handler(esc, f)
elif in_code_set_table(b):
if DEBUG:
print 'G1 CODESET {:#x}'.format(b)
esc._args.append(b)
else:
raise DecodingError()
def designate(self, decoder, final_byte):
'''cause in Designation change on decoder
'''
decoder._G1.set(code_set_handler_from_final_byte(final_byte))
class G2(object):
CODE = 0x2a
def __init__(self):
pass
@staticmethod
def factory():
return G2()
def load(self, esc, f):
b = read.ucb(f)
if b == DRCS.CODE:
if DEBUG:
print 'G2 DRCS {:#x}'.format(b)
esc._args.append(b)
DRCS.handler(esc, f)
elif in_code_set_table(b):
if DEBUG:
print 'G2 CODESET {:#x}'.format(b)
esc._args.append(b)
else:
raise DecodingError()
def designate(self, decoder, final_byte):
'''cause in Designation change on decoder
'''
decoder._G2.set(code_set_handler_from_final_byte(final_byte))
class G3(object):
CODE = 0x2b
def __init__(self):
pass
@staticmethod
def factory():
return G3()
def load(self, esc, f):
b = read.ucb(f)
if b == DRCS.CODE:
if DEBUG:
print 'G3 DRCS {:#x}'.format(b)
esc._args.append(b)
DRCS.handler(esc, f)
elif in_code_set_table(b):
if DEBUG:
print 'G3 CODESET {:#x}'.format(b)
esc._args.append(b)
else:
raise DecodingError()
def designate(self, decoder, final_byte):
'''cause in Designation change on decoder
'''
decoder._G3.set(code_set_handler_from_final_byte(final_byte))
DESIGNATION_TABLE = {
G0.CODE : G0.factory,
G1.CODE : G1.factory,
G2.CODE : G2.factory,
G3.CODE : G3.factory,
}
class TwoByte(object):
CODE = 0x24
@staticmethod
def handler(esc, f):
b = read.ucb(f)
if in_code_set_table(b):
esc._args.append(b)
#return code_set_from_final_byte(b, f)
elif b in DESIGNATION_TABLE:
esc._args.append(b)
d = DESIGNATION_TABLE[b]()
d.load(esc, f)
else:
raise DecodingError()
class DRCS(object):
CODE = 0x20
@staticmethod
def handler(esc, f):
b = read.ucb(f)
if DEBUG:
print 'DRCS {:#x}'.format(b)
if in_code_set_table(b):
esc._args.append(b)
else:
#return code_set_from_final_byte(b, f)
raise DecodingError()
class ESC(object):
'''Escape
Code for code extension.
'''
CODE = 0x1b
#Mapping by ESC led byte patterns to code "designations"
#refer to ARIB STD B-24 table 7-12 (pg. 56)
GRAPHIC_SETS_TABLE = [
[G0.CODE,],
[G1.CODE,],
[G2.CODE,],
[G3.CODE,],
[TwoByte.CODE, G0.CODE,],
[TwoByte.CODE, G1.CODE,],
[TwoByte.CODE, G2.CODE,],
[TwoByte.CODE, G3.CODE,],
[G0.CODE, DRCS.CODE,],
[G1.CODE, DRCS.CODE,],
[G2.CODE, DRCS.CODE,],
[G3.CODE, DRCS.CODE,],
[TwoByte.CODE, G0.CODE, DRCS.CODE,],
[TwoByte.CODE, G1.CODE, DRCS.CODE,],
[TwoByte.CODE, G2.CODE, DRCS.CODE,],
[TwoByte.CODE, G3.CODE, DRCS.CODE,],
]
def __init__(self, f):
'''the interpretation and bytes read
after reading 'ESC' can be complex. Here
We'll just attempt to successfully read all
required args, and leave interpretation for later
'''
b = read.ucb(f)
if DEBUG:
print 'esc first byte is ' + '{:#x}'.format(b)
self._args = []
self._args.append(b)
if b in INVOCATION_TABLE:
if DEBUG:
print 'ESC INVOCATION {:#x}'.format(b)
INVOCATION_TABLE[b](f)
#self._args.append(next)
elif b in DESIGNATION_TABLE:
if DEBUG:
print 'ESC DESIGNATION {:#x}'.format(b)
d = DESIGNATION_TABLE[b]()
d.load(self, f)
#self._args.append(next)
elif b == TwoByte.CODE:
if DEBUG:
print 'ESC TWO BYTE {:#x}'.format(b)
TwoByte.handler(self, f)
#self._args.append(next)
else:
raise DecodingError()
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return len(self._args) + 1
def __unicode__(self):
return u'ESC {args}'.format(args=u' '.join(u'{:#x}'.format(x) for x in self._args))
def is_invocation(self):
'''Return whether this ESC control sequence
describes an invocation or not
'''
return self._args[0] in INVOCATION_TABLE
def invoke(self, decoder):
'''Carry out an INVOCATION on a decoder object
'''
if not self.is_invocation():
raise DecodingError('Attempting to get invocation from ESC sequence that has none.')
invocation = INVOCATION_TABLE[self._args[0]]()
if DEBUG:
print 'invoking {:#x}'.format(self._args[0])
invocation(decoder)
def is_designation(self):
#print 'ESC ' + str(self)
if len(self._args) < 2:
raise DecodingError()
designation = self._args[:-1]
return designation in ESC.GRAPHIC_SETS_TABLE
def designate(self, decoder):
'''Carry out an INVOCATION on a decoder object
'''
if not self.is_designation():
raise DecodingError('Attempting to get designation from ESC sequence that has none.')
final_byte = self._args[-1]
byte_pattern = self._args[:-1]
if DEBUG:
print 'designating via final_byte {:#x}'.format(final_byte)
d = ESC.find_designation(byte_pattern)
designation = DESIGNATION_TABLE[d]()
designation.designate(decoder, final_byte)
def to_designation(self):
'''Look at current ESC arguments and return their meaning
as a change in mapping in designation to code set
'''
if DEBUG:
print 'ESC ' + str(self)
if len(self._args) < 2:
raise DecodingError()
#strip the last byte of _args to examine, since it varies by final byte
#(i.e. the final byte indicates the code set we'll change to)
final_byte = self._args[-1]
#TODO: check final_byte to make sure it's code_set or throw
designation = self._args[:-1]
if DEBUG:
print 'final byte: {b}'.format(b=final_byte)
print 'designation: {d}'.format(d=str(designation))
code_set = code_set_handler_from_final_byte(final_byte)
d = 0
if designation in ESC.GRAPHIC_SETS_TABLE:
if DEBUG:
print 'designation in table'
#for now i'm assuming i only need the designation g0-g3
#and the final byte (to get the new code set)
d = ESC.find_designation(designation)
else:
if DEBUG:
print 'not in table'
raise DecodingError()
return (d, code_set)
@staticmethod
def find_designation(bytes):
for i, pattern in enumerate(ESC.GRAPHIC_SETS_TABLE):
if DEBUG:
print '{b} : {i} {p}'.format(b=str(bytes), i=str(i), p=str(pattern))
if bytes == pattern:
if DEBUG:
print 'found designation match at {p} at index {i} and desig {d}'.format(p=str(pattern), i=str(i), d=str(i%4))
return DESIGNATION_TABLE.keys()[i%4]
#raise decoding error?
@staticmethod
def handler(f):
'''Most of these command handler just return an instance of the
associated class. But ESC is more complex.
Depending upon the character sequence, it can return several different
class instances, each representing the different sequence. e.g.:
<ESC><0x6e> --> LS2
<ESC><0x7c> --> LS2R
<ESC><0x24><0x2b><final byte> --> set 2 byte G3 code set in G area
(GL or GR?)according to final byte
<ESC><0X24><0x2b><0x20><final byte> --> set 2 byte DRCS into G3 code
area according to final byte
'''
return ESC(f)
'''
b = read.ucb(f)
if b in INVOCATION_TABLE:
print 'ESC INVOCATION {:#x}'.format(b)
return INVOCATION_TABLE[b](f)
if b in DESIGNATION_TABLE:
print 'ESC DESIGNATION {:#x}'.format(b)
return DESIGNATION_TABLE[b](f)
if b == TwoByte.CODE:
print 'ESC TWO BYTE {:#x}'.format(b)
return TwoByte.handler(f)
raise DecodingError()
'''
class APS(object):
'''Active position set
Specified times of active position down is made by P1 (1 byte) of the first
parameter in line direction length of character field from the first position
of the first line of the display area. Then specified times of active position
forward is made by the second parameter P2 (1 byte) in the character path
length of character field. Each parameter shall be within the range of 04/0
to 07/15 and specify time within the range of 0 to 63 in binary value of 6-
bit from b6 to b1. (b8 and b7 are not used.)
'''
CODE = 0x1C
def __init__(self, f):
self._args = []
self._args.append(read.ucb(f)&0x3f)#p1
self._args.append(read.ucb(f)&0x3f)#p2
if DEBUG:
print(u'APS: --> {:#d},{:#d}>'.format(self._args[0], self._args[1]).encode('utf-8'))
@property
def col(self):
return self._args[1]
@property
def row(self):
return self._args[0]
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return len(self._args) + 1
def __unicode__(self):
return u'\n<Screen Posiiton to {:#d},{:#d}>'.format(self._args[0], self._args[1])
@staticmethod
def handler(f):
return APS(f)
class SS3(object):
'''Single shift 3
Code to invoke character code set.
Sets the GL code area to the G3 code set for one character
'''
CODE = 0x1d
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'SS3'
@staticmethod
def handler(f):
return SS3(f)
class RS(object):
'''Record separator
It is information division code and declares identification and introduction
of data header.
'''
CODE = 0x1e
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'RS'
@staticmethod
def handler(f):
pass
class US(object):
'''Unit separator
It is information division code and declares identification and introduction
of data unit.
'''
CODE = 0x1f
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'US'
@staticmethod
def handler(f):
pass
#Color support
class BKF(object):
'''Foreground colour: black, CMLA: 0BLACK FOREGROUND
( This indicates that foreground colour is set to black and colour map lower
address (CMLA) specifying colouring value of the portrayal plane is set to 0.
Same as follows.)
'''
CODE = 0x80
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<black>'
@staticmethod
def handler(f):
return BKF(f)
class COL(object):
'''Color Controls
Colour control COL P1 (1 byte)
Sets foreground colour, background colour, half foreground colour, half
background colour and CMLA by the parameter.
Colour between foreground and background in gradation font is defined that
colour near to foreground colour is half foreground colour and colour near to
background colour is half background colour.
'''
CODE = 0x90
def __init__(self, f):
self._args = []
p1 = read.ucb(f)
self._args.append(p1)
if p1 == 0x20:
self._args.append(read.ucb(f))
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return len(self._args) + 1
def __unicode__(self):
return u'COL {args}'.format(args=u' '.join(u'{:#x}'.format(x) for x in self._args))
@staticmethod
def handler(f):
return COL(f)
class RDF(object):
'''Foreground colour: red
'''
CODE = 0x81
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<red>'
@staticmethod
def handler(f):
return RDF(f)
class FLC(object):
'''Flashing control
Specifies the beginning and the end of flashing and the differences of the
normal phase and the reverse phase by the parameter P1 (1 byte).
'''
CODE = 0x91
def __init__(self, f):
self._args = []
self._args.append(read.ucb(f))
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return len(self._args) + 1
def __unicode__(self):
return u'FLC {args}'.format(args=u' '.join(u'{:#x}'.format(x) for x in self._args))
@staticmethod
def handler(f):
return FLC(f)
class GRF(object):
'''Foreground colour: green
'''
CODE = 0x82
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<green>'
@staticmethod
def handler(f):
return GRF(f)
class CDC(object):
'''Conceal display controls
Specifies the beginning and end of concealing and the type of concealing by
the parameter.
'''
CODE = 0x92
def __init__(self, f):
pass
@staticmethod
def handler(f):
pass
class YLF(object):
'''Foreground colour: yellow
'''
CODE = 0x83
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<yellow>'
@staticmethod
def handler(f):
return YLF(f)
class POL(object):
'''Pattern Polarity Controls
'''
CODE = 0x93
def __init__(self, f):
pass
@staticmethod
def handler(f):
pass
class BLF(object):
'''Foreground colour: blue
'''
CODE = 0x84
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<blue>'
@staticmethod
def handler(f):
return BLF(f)
class WMM(object):
'''Writing mode modification
This Specifies the changing of the writing mode to the memory of display by
parameter P1 (1 byte).
For middle colour of gradation font, both set portions of half foreground colour
Writing Mode and half background colours are to be treated as foreground colour.
'''
CODE = 0x94
def __init__(self, f):
pass
@staticmethod
def handler(f):
pass
class MGF(object):
'''Foreground colour: magenta
'''
CODE = 0x85
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<magenta>'
@staticmethod
def handler(f):
return MGF(f)
class MACRO(object):
'''Macro command
Macro definition start, macro definition mode and macro definition end is set
by parameter P1 (1 byte).
'''
CODE = 0x95
def __init__(self, f):
pass
@staticmethod
def handler(f):
pass
class CNF(object):
'''Foreground colour: cyan
'''
CODE = 0x86
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<cyan>'
@staticmethod
def handler(f):
return CNF(f)
class WHF(object):
'''White foreground color (text color)
'''
CODE = 0x87
def __init__(self, f):
pass
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<white>'
@staticmethod
def handler(f):
return WHF(f)
class HLC(object):
'''Highlighting character block
Starting and ending of enclosure are set by parameter P1 (1 byte).
'''
CODE = 0x97
def __init__(self, f):
self._code = read.ucb(f)
self._start = False
if self._code & 0x1:
self._start = True
def __len__(self):
return 2
def __unicode__(self):
if self._start:
return u'<Highlight start>'
else:
return u'<Highlight end>'
@staticmethod
def handler(f):
return HLC(f)
class SSZ(object):
''' Small size
Specifies the character size is small.
'''
CODE = 0x88
def __init__(self, f):
if DEBUG:
print(u'SSZ: --> 0x88'.encode('utf-8'))
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<Small Text>'
@staticmethod
def handler(f):
return SSZ(f)
class RPC(object):
'''Repeat character
The repeat code RPC with one parameter P1 (1 byte) causes a displayable
character or mosaic that immediately follows the code, to be displayed a
number of times specified by the parameter P1.
'''
CODE = 0x98
def __init__(self, f):
pass
@staticmethod
def handler(f):
pass
class MSZ(object):
'''Middle size
Specifies the character size is middle.
'''
CODE = 0x89
def __init__(self, f):
if DEBUG:
print(u'MSZ: --> 0x89'.encode('utf-8'))
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<Medium Text>'
@staticmethod
def handler(f):
return MSZ(f)
class SPL(object):
'''Stop Lining
Underlining and mosaic division process is terminated.
'''
CODE = 0x1d
def __init__(self, f):
pass
@staticmethod
def handler(f):
pass
class NSZ(object):
'''Normal size
Specifies the character size is normal.
'''
CODE = 0x8a
def __init__(self, f):
if DEBUG:
print(u'NSZ: --> 0x8a'.encode('utf-8'))
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return 1
def __unicode__(self):
return u'<Normal Text>'
@staticmethod
def handler(f):
return NSZ(f)
class STL(object):
'''Start lining
The composition of mosaic A and B in the display after this code, is not made.
When mosaic is included during composing non-spacing and composition
command, dividing process (mosaic element is classified in small elements by
Start Lining
1/2 across direction and 1/3 length making space surrounding them) should be
made after composition. In other cases, make underline
'''
CODE = 0x9a
def __init__(self, f):
pass
@staticmethod
def handler(f):
pass
class SZX(object):
'''Character size controls
The character size is set in parameter P1 (1 byte).
'''
CODE = 0x8b
def __init__(self, f):
if DEBUG:
print(u'SZX: --> 0x8b'.encode('utf-8'))
@staticmethod
def handler(f):
pass
class CSI(object):
'''Control Sequence Initiator
Code for code system extension indicated in table 7-14.
'''
CODE = 0x9b
def __init__(self, f):
'''read from stream until we get "space" and then our CSI
specific control character.
'''
self._args = []
c = read.ucb(f)
while c is not 0x20:
self._args.append(c)
c = read.ucb(f)
self._args.append(c)
#lastly read the command code
c = read.ucb(f)
self._args.append(c)
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return len(self._args) + 1
def __unicode__(self):
try:
return u'<CS:"{seq}">'.format(seq=u''.join(u'{:#c}'.format(x) for x in self._args))
except UnicodeDecodeError:
return u'<CS:"{seq}">'.format(seq=u''.join(u':{h}'.format(h=hex(x)) for x in self._args))
@staticmethod
def handler(f):
return CSI(f)
class TIME(object):
'''Time
The time control designation is made by parameter P1 (1 byte) and P2 (1 byte)
'''
CODE = 0x9d
def __init__(self, f):
self._args = []
self._args.append(read.ucb(f))
self._args.append(read.ucb(f))
def __len__(self):
'''Defiing len() operator to help
in calculating bytes read
'''
return len(self._args) + 1
def __unicode__(self):
return u'TIME {args}'.format(args=u' '.join(u'{:#x}'.format(x) for x in self._args))
@staticmethod
def handler(f):
return TIME(f)
COMMAND_TABLE = {
NUL.CODE : NUL.handler,
SP.CODE : SP.handler,
DEL.CODE : DEL.handler,
BEL.CODE : BEL.handler,
APB.CODE : APB.handler,
APF.CODE : APF.handler,
APD.CODE : APD.handler,
APU.CODE : APU.handler,
CS.CODE : CS.handler,
APR.CODE : APR.handler,
LS1.CODE : LS1.handler,
LS0.CODE : LS0.handler,
PAPF.CODE : PAPF.handler,
#CAN.CODE : CAN.handler,
SS2.CODE : SS2.handler,
ESC.CODE : ESC.handler,
APS.CODE : APS.handler,
SS3.CODE : SS3.handler,
#RS.CODE : RS.handler,
#US.CODE : US.handler,
BKF.CODE : BKF.handler,
COL.CODE : COL.handler,
RDF.CODE : RDF.handler,
FLC.CODE : FLC.handler,
GRF.CODE : GRF.handler,
#CDC.CODE : CDC.handler,
YLF.CODE : YLF.handler,
#POL.CODE : POL.handler,
BLF.CODE : BLF.handler,
#WMM.CODE : WMM.handler,
MGF.CODE : MGF.handler,
#MACRO.CODE : MACRO.handler,
CNF.CODE : CNF.handler,
WHF.CODE : WHF.handler,
HLC.CODE : HLC.handler,
SSZ.CODE : SSZ.handler,
#RPC.CODE : RPC.handler,
MSZ.CODE : MSZ.handler,
#SPL.CODE : SPL.handler,
NSZ.CODE : NSZ.handler,
#STL.CODE : STL.handler,
#SZX.CODE : SZX.handler,
CSI.CODE : CSI.handler,
TIME.CODE : TIME.handler,
}
def is_control_character(char):
'''return True if this is an ARIB control character
'''
return char in COMMAND_TABLE
def handle_control_character(b, f):
'''
handle a given control character read from stream f
'''
return COMMAND_TABLE[b](f)
|
|
import warnings
from nose.tools import assert_true, assert_raises
import numpy as np
import numpy.testing as npt
from dipy.data import get_data
from dipy.core.gradients import (gradient_table, GradientTable,
gradient_table_from_bvals_bvecs,
reorient_bvecs, generate_bvecs)
from dipy.io.gradients import read_bvals_bvecs
def test_btable_prepare():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
bt.info
fimg, fbvals, fbvecs = get_data('small_64D')
bvals = np.load(fbvals)
bvecs = np.load(fbvecs)
bvecs = np.where(np.isnan(bvecs), 0, bvecs)
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
bt2 = gradient_table(bvals, bvecs.T)
npt.assert_array_equal(bt2.bvecs, bvecs)
btab = np.concatenate((bvals[:, None], bvecs), axis=1)
bt3 = gradient_table(btab)
npt.assert_array_equal(bt3.bvecs, bvecs)
npt.assert_array_equal(bt3.bvals, bvals)
bt4 = gradient_table(btab.T)
npt.assert_array_equal(bt4.bvecs, bvecs)
npt.assert_array_equal(bt4.bvals, bvals)
# Test for proper inputs (expects either bvals/bvecs or 4 by n):
assert_raises(ValueError, gradient_table, bvecs)
def test_GradientTable():
gradients = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[3, 4, 0],
[5, 0, 12]], 'float')
expected_bvals = np.array([0, 1, 1, 5, 13])
expected_b0s_mask = expected_bvals == 0
expected_bvecs = gradients / (expected_bvals + expected_b0s_mask)[:, None]
gt = GradientTable(gradients, b0_threshold=0)
npt.assert_array_almost_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.b0s_mask, expected_b0s_mask)
npt.assert_array_almost_equal(gt.bvecs, expected_bvecs)
npt.assert_array_almost_equal(gt.gradients, gradients)
gt = GradientTable(gradients, b0_threshold=1)
npt.assert_array_equal(gt.b0s_mask, [1, 1, 1, 0, 0])
npt.assert_array_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.bvecs, expected_bvecs)
npt.assert_raises(ValueError, GradientTable, np.ones((6, 2)))
npt.assert_raises(ValueError, GradientTable, np.ones((6,)))
def test_gradient_table_from_bvals_bvecs():
sq2 = np.sqrt(2) / 2
bvals = [0, 1, 2, 3, 4, 5, 6, 0]
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2],
[0, 0, 0]])
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)
npt.assert_array_equal(gt.bvals, bvals)
npt.assert_array_equal(gt.gradients, np.reshape(bvals, (-1, 1)) * bvecs)
npt.assert_array_equal(gt.b0s_mask, [1, 0, 0, 0, 0, 0, 0, 1])
# Test nans are replaced by 0
new_bvecs = bvecs.copy()
new_bvecs[[0, -1]] = np.nan
gt = gradient_table_from_bvals_bvecs(bvals, new_bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)
# Bvalue > 0 for non-unit vector
bad_bvals = [2, 1, 2, 3, 4, 5, 6, 0]
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# num_gard inconsistent bvals, bvecs
bad_bvals = np.ones(7)
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# bvals not 1d
bad_bvals = np.ones((1, 8))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# bvec not 2d
bad_bvecs = np.ones((1, 8, 3))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# bvec not (N, 3)
bad_bvecs = np.ones((8, 2))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# bvecs not unit vectors
bad_bvecs = bvecs * 2
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# Test **kargs get passed along
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0,
big_delta=5, small_delta=2)
npt.assert_equal(gt.big_delta, 5)
npt.assert_equal(gt.small_delta, 2)
def test_b0s():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(8)
bvals[0] = 0
bvals[7] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2],
[0, 0, 0]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(np.where(bt.b0s_mask > 0)[0], np.array([0, 7]))
npt.assert_array_equal(np.where(bt.b0s_mask == 0)[0], np.arange(1, 7))
def test_gtable_from_files():
fimg, fbvals, fbvecs = get_data('small_101D')
gt = gradient_table(fbvals, fbvecs)
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
npt.assert_array_equal(gt.bvals, bvals)
npt.assert_array_equal(gt.bvecs, bvecs)
def test_deltas():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
bt = gradient_table(bvals, bvecs, big_delta=5, small_delta=2)
npt.assert_equal(bt.big_delta, 5)
npt.assert_equal(bt.small_delta, 2)
def test_qvalues():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
qvals = np.sqrt(bvals / 6) / (2 * np.pi)
bt = gradient_table(bvals, bvecs, big_delta=8, small_delta=6)
npt.assert_almost_equal(bt.qvals, qvals)
def test_reorient_bvecs():
sq2 = np.sqrt(2) / 2
bvals = np.concatenate([[0], np.ones(6) * 1000])
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0)
# The simple case: all affines are identity
affs = np.zeros((6, 4, 4))
for i in range(4):
affs[:, i, i] = 1
# We should get back the same b-vectors
new_gt = reorient_bvecs(gt, affs)
npt.assert_equal(gt.bvecs, new_gt.bvecs)
# Now apply some rotations
rotation_affines = []
rotated_bvecs = bvecs[:]
for i in np.where(~gt.b0s_mask)[0]:
rot_ang = np.random.rand()
cos_rot = np.cos(rot_ang)
sin_rot = np.sin(rot_ang)
rotation_affines.append(np.array([[1, 0, 0, 0],
[0, cos_rot, -sin_rot, 0],
[0, sin_rot, cos_rot, 0],
[0, 0, 0, 1]]))
rotated_bvecs[i] = np.dot(rotation_affines[-1][:3, :3],
bvecs[i])
# Copy over the rotation affines
full_affines = rotation_affines[:]
# And add some scaling and translation to each one to make this harder
for i in range(len(full_affines)):
full_affines[i] = np.dot(full_affines[i],
np.array([[2.5, 0, 0, -10],
[0, 2.2, 0, 20],
[0, 0, 1, 0],
[0, 0, 0, 1]]))
gt_rot = gradient_table_from_bvals_bvecs(bvals,
rotated_bvecs, b0_threshold=0)
new_gt = reorient_bvecs(gt_rot, full_affines)
# At the end of all this, we should be able to recover the original
# vectors
npt.assert_almost_equal(gt.bvecs, new_gt.bvecs)
# We should be able to pass just the 3-by-3 rotation components to the same
# effect
new_gt = reorient_bvecs(gt_rot, np.array(rotation_affines)[:, :3, :3])
npt.assert_almost_equal(gt.bvecs, new_gt.bvecs)
# Verify that giving the wrong number of affines raises an error:
full_affines.append(np.zeros((4, 4)))
assert_raises(ValueError, reorient_bvecs, gt_rot, full_affines)
def test_nan_bvecs():
"""
Test that the presence of nan's in b-vectors doesn't raise warnings.
In previous versions, the presence of NaN in b-vectors was taken to
indicate a 0 b-value, but also raised a warning when testing for the length
of these vectors. This checks that it doesn't happen.
"""
fdata, fbvals, fbvecs = get_data()
with warnings.catch_warnings(record=True) as w:
gtab = gradient_table(fbvals, fbvecs)
npt.assert_(len(w) == 0)
def test_generate_bvecs():
"""Tests whether we have properly generated bvecs.
"""
# Test if the generated bvectors are unit vectors
bvecs = generate_bvecs(100)
norm = [np.linalg.norm(v) for v in bvecs]
npt.assert_almost_equal(norm, np.ones(100))
# Test if two generated vectors are almost orthogonal
bvecs_2 = generate_bvecs(2)
cos_theta = np.dot(bvecs_2[0], bvecs_2[1])
npt.assert_almost_equal(cos_theta, 0.)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.controller.inventory import Inventory
from pybvc.controller.topology import Topology, Node
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
def of_demo_27():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 27 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
print "\n"
print ("<<< Controller '%s:%s'" % (ctrlIpAddr, ctrlPortNum))
time.sleep(rundelay)
print ("\n")
print ("<<< Get OpenFlow Network Topology information")
time.sleep(rundelay)
topology_ids = []
topologies = []
inventory = None
result = ctrl.build_inventory_object()
status = result.get_status()
if(status.eq(STATUS.OK)):
inventory = result.get_data()
assert(isinstance(inventory, Inventory))
else:
print ("\n")
print ("!!!Error, failed to obtain inventory info, reason: %s"
% status.brief().lower())
exit(0)
result = ctrl.get_topology_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
topology_ids = result.get_data()
assert(isinstance(topology_ids, list))
else:
print ("\n")
print ("!!!Error, failed to obtain topology info, reason: %s"
% status.brief().lower())
exit(0)
print "\n"
print ("<<< OpenFlow network topologies")
for topo_id in topology_ids:
print " '%s'" % topo_id
for topo_id in topology_ids:
result = ctrl.build_topology_object(topo_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
topo = result.get_data()
topologies.append(topo)
assert(isinstance(topo, Topology))
else:
print ("\n")
print ("!!!Error, failed to parse '%s' topology info, reason: %s"
% (topo_id, status.brief().lower()))
exit(0)
for topo in topologies:
time.sleep(rundelay)
print "\n"
print ("<<< Information for '%s' network topology:") % topo.get_id()
print "\n".strip()
flows_cnt = 0
sids = topo.get_switch_ids()
for sid in sids:
flows_cnt += inventory.get_openflow_node_flows_cnt(sid)
print (" Number of flows : %s"
% flows_cnt)
print (" Number of switches : %s"
% topo.get_switches_cnt())
print (" Number of inter-switch links : %s"
% topo.get_inter_switch_links_cnt())
print (" Number of hosts : %s"
% topo.get_hosts_cnt())
time.sleep(rundelay)
print "\n"
print ("<<< OpenFlow switches in '%s' topology") % topo.get_id()
s1 = 'IP Address'
s2 = 'OpenFlow Id'
sym = '-'
print "\n".strip()
print " {0:<15} {1:<30}".format(s1, s2)
print " {0:<15} {1:<30}".format(sym*15, sym*30)
switch_ids = topo.get_switch_ids()
for switch_id in switch_ids:
inv_node = inventory.get_openflow_node(switch_id)
addr = inv_node.get_ip_address()
node_id = inv_node.get_id()
print " {0:<15} {1:<30}".format(addr, node_id)
switches = topo.get_switches()
for switch in switches:
assert(isinstance(switch, Node))
print "\n".strip()
time.sleep(rundelay)
print ("<<< Neighborhood information for '%s' switch ports"
% switch.get_id())
pnums = switch.get_port_numbers()
for pnum in pnums:
if pnum == 'LOCAL':
continue
print "\n".strip()
print " Port '%s'" % pnum
peer_list = topo.get_peer_list_for_node_port_(switch, pnum)
if len(peer_list):
for item in peer_list:
assert(isinstance(item, Node))
if(item.is_switch()):
print (" Device Type : %s"
% "switch")
print (" OpenFlow Id : %s"
% item.get_openflow_id())
elif (item.is_host()):
print (" Device Type : %s"
% "host")
mac_addr = item.get_mac_address()
print (" MAC Address : %s"
% mac_addr)
ip_addr = item.get_ip_address_for_mac(mac_addr)
print (" IP Address : %s"
% ip_addr)
else:
print " None"
time.sleep(rundelay)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_27()
|
|
# -*- coding: utf-8 -*-
"""
Environment discovery and helper functions for Google App Engine.
Some methods from the following modules have been made available for convenience:
* google.appengine.api.app_identity
https://cloud.google.com/appengine/docs/python/appidentity/
* google.appengine.api.modules
https://cloud.google.com/appengine/docs/python/modules/
* google.appengine.api.namespace_manager
https://cloud.google.com/appengine/docs/python/multitenancy/
Example:
import gaek.environ
# Only send emails in production.
if gaek.environ.is_production():
mail.send(*args, **kwargs)
"""
__author__ = 'Eric Higgins'
__copyright__ = 'Copyright 2015-2016, Eric Higgins'
__email__ = '[email protected]'
import os
import warnings
from google.appengine.api import app_identity
from google.appengine.api import modules
try:
from google.appengine.api import namespace_manager
except ImportError:
namespace_manager = None
__all__ = (
# App Identity functions.
'get_application_id',
'get_default_version_hostname',
'get_service_account_name',
# Module functions.
'get_current_instance_id',
'get_current_module_name',
'get_current_version_name',
'get_default_version',
'get_hostname',
'get_modules',
'get_versions',
# Namespace functions.
'get_namespace',
'google_apps_namespace',
# Helper functions.
'get_dot_target_name',
'get_dot_target_name_safe',
'get_environ_dict',
'is_host_google',
'is_development',
'is_staging',
'is_staging_safe',
'is_production',
'is_production_safe',
'is_default_version',
'is_default_version_safe',
'get_current_module_name_safe',
'get_current_version_name_safe'
)
_UNDEFINED = '_UNDEFINED_'
# App Identity functions.
get_application_id = app_identity.get_application_id
get_default_version_hostname = app_identity.get_default_version_hostname
get_service_account_name = app_identity.get_service_account_name
# Module functions.
get_current_instance_id = modules.get_current_instance_id
get_current_module_name = modules.get_current_module_name
get_current_version_name = modules.get_current_version_name
get_default_version = modules.get_default_version
get_hostname = modules.get_hostname
get_modules = modules.get_modules
get_versions = modules.get_versions
# Namespace functions.
if namespace_manager == None:
get_namespace = deprecated_fn
google_apps_namespace = deprecated_fn
else:
get_namespace = namespace_manager.get_namespace
google_apps_namespace = namespace_manager.google_apps_namespace
def deprecated_fn():
warnings.warn('deprecated', DeprecationWarning)
# Helper functions.
def get_current_version_name_safe():
"""Returns the current version of the app, or None if there is no current version found."""
try:
return modules.get_current_version_name()
except KeyError:
return None
def get_current_module_name_safe():
"""Returns the current module of the app, or None if there is no current module found.."""
try:
return modules.get_current_module_name()
except KeyError:
return None
def is_host_google():
"""True if the app is being hosted from Google App Engine servers."""
return os.environ.get('SERVER_SOFTWARE', '').startswith('Google') or get_hostname().endswith('.appspot.com')
def is_default_version(version=None):
"""True if the current or specified app version is the default."""
version = version or get_current_version_name()
return version == get_default_version()
def is_default_version_safe(version=None):
"""
True if the current or specified app version is the default.
Returns False when there is no version found.
"""
version = version or get_current_version_name_safe()
return version == get_default_version()
def is_development():
"""True if the dev_appserver is running (localhost or local development server)."""
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
def is_staging(version=None):
"""True if the app is hosted by Google (appspot.com) but the version is not the default."""
return is_host_google() and not is_default_version(version)
def is_staging_safe(version=None):
"""True if the app is hosted by Google (appspot.com) but the version is not the default."""
is_default_version = is_default_version_safe()
if is_default_version is None:
return False
return is_host_google() and not is_default_version
def is_production(version=None):
"""True if the app is being hosted by Google and the default version."""
return is_host_google() and is_default_version(version)
def is_production_safe(version=None):
"""True if the app is being hosted by Google and the default version."""
is_default_version = is_default_version_safe(version)
if is_default_version is None:
return False
return is_host_google() and is_default_version
def get_dot_target_name(version=None, module=None):
"""Returns the current version/module in -dot- notation which is used by `target:` parameters."""
version = version or get_current_version_name()
module = module or get_current_module_name()
return '-dot-'.join((version, module))
def get_dot_target_name_safe(version=None, module=None):
"""
Returns the current version/module in -dot- notation which is used by `target:` parameters.
If there is no current version or module then None is returned.
"""
version = version or get_current_version_name_safe()
module = module or get_current_module_name_safe()
if version and module:
return '-dot-'.join((version, module))
return None
def _get_os_environ_dict(keys):
"""Return a dictionary of key/values from os.environ."""
return {k: os.environ.get(k, _UNDEFINED) for k in keys}
def _get_app_identity_dict(keys):
"""Return a dictionary of key/values from the app_identity module functions."""
return {k: getattr(app_identity, k)() for k in keys}
def _get_modules_dict(keys):
"""Return a dictionary of key/values from the modules module functions."""
return {k: getattr(modules, k)() for k in keys}
def _get_namespace_manager_dict_v2(keys):
"""Returns an empty dictionary since the namespace_manager API has been deprecated."""
return {}
def _get_namespace_manager_dict_v1(keys):
"""Return a dictionary of key/values from the namespace_manager module functions."""
return {k: getattr(namespace_manager, k)() for k in keys}
# Swap the function that's called if the namespace_manager is disabled.
if namespace_manager == None:
_get_namespace_manager_dict = _get_namespace_manager_dict_v2
else:
_get_namespace_manager_dict = _get_namespace_manager_dict_v1
def get_environ_dict():
"""Return a dictionary of all environment keys/values."""
return {
'os.environ': _get_os_environ_dict((
'AUTH_DOMAIN',
'CURRENT_CONFIGURATION_VERSION',
'CURRENT_MODULE_ID',
'CURRENT_VERSION_ID',
'DEFAULT_VERSION_HOSTNAME',
'FEDERATED_IDENTITY',
'FEDERATED_PROVIDER',
'GAE_LOCAL_VM_RUNTIME',
'HTTP_HOST',
'HTTP_PROXY',
'HTTP_X_APPENGINE_HTTPS',
'HTTP_X_APPENGINE_QUEUENAME',
'HTTP_X_ORIGINAL_HOST',
'HTTP_X_ORIGINAL_SCHEME',
'SERVER_NAME',
'SERVER_PORT',
'SERVER_SOFTWARE',
'USER_IS_ADMIN',
)),
'app_identity': _get_app_identity_dict((
'get_service_account_name',
'get_application_id',
'get_default_version_hostname',
)),
'modules': _get_modules_dict((
'get_current_module_name',
'get_current_version_name',
'get_current_instance_id',
'get_modules',
'get_versions',
'get_default_version',
'get_hostname',
)),
'namespace_manager': _get_namespace_manager_dict((
'get_namespace',
'google_apps_namespace',
)),
}
|
|
from __future__ import absolute_import
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponseRedirect, HttpResponse
from django.http import JsonResponse
from django.core.urlresolvers import reverse
from django.contrib import auth,messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth.views import login
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.forms.models import model_to_dict
from django.conf import settings
from django.db.models import Q
from django.views.generic import ListView, DetailView, TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Recipe, Ingredient, MethodStep, ExternalRecipe
from recipebox.wines.models import WineNote
from .forms import RecipeForm, IngredientFormSet, MethodStepFormSet,\
UserForm, UserProfileForm, ImportForm, \
ExternalRecipeForm, ImportFileForm
import os
import json
import operator
import urlparse
import urllib,urllib2
from bs4 import BeautifulSoup
import requests
### user login and logout
@login_required(login_url='/accounts/login/')
def logout(request):
auth.logout(request)
messages.add_message(request, messages.INFO, \
'Successfully logged out. Login again to create recipes')
return HttpResponseRedirect(reverse('login'))
def custom_login(request):
response = login(request,template_name='/accounts/login/')
if request.user.is_authenticated():
messages.info(request, "Welcome ...")
return response
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
return HttpResponseRedirect(reverse('recipes'))
else:
print user_form.errors, profile_form.errors
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(request,
'recipes/registration.html',
{'user_form': user_form, 'profile_form': profile_form,\
'registered': registered})
####################################################################
### recipes
@login_required(login_url='/accounts/login/')
def recipe_list(request, template_name='recipes/recipes.html'):
recipes = Recipe.objects.all()
print settings.INSTALLED_APPS
context = {'recipe_list': recipes}
return render(request, template_name, context)
@login_required(login_url='/accounts/login/')
def recipe_show(request, recipe_id, template_name='recipes/show_recipe.html'):
recipe = get_object_or_404(Recipe, pk=recipe_id)
ingredients = []
for i in recipe.ingredient_set.all():
ingredients.append(i.ingredient_name)
steps = []
for s in recipe.methodstep_set.all():
steps.append(s.step)
return render(request, template_name, {'recipe':recipe,\
'ingredients': ingredients,\
'steps': steps })
@login_required(login_url='/accounts/login/')
def recipe_create(request, template_name='recipes/recipe_form.html'):
recipe = Recipe()
if request.POST:
recipe_form = RecipeForm(request.POST or None, request.FILES, instance=recipe)
if recipe_form.is_valid():
ingredient_formset = IngredientFormSet(request.POST, instance=recipe)
method_formset = MethodStepFormSet(request.POST, instance=recipe)
if ingredient_formset.is_valid() and method_formset.is_valid():
recipe.chef = request.user.get_username()
recipe.source = request.user.get_username()
if request.FILES:
print recipe_form.cleaned_data['recipe_picture']
recipe.recipe_picture = recipe_form.cleaned_data['recipe_picture']
recipe.create_thumbnail()
if request.user.is_authenticated():
recipe.user = request.user
recipe.save()
ingredient_formset.save()
method_formset.save()
return redirect('recipes')
else:
recipe_form = RecipeForm()
ingredient_formset = IngredientFormSet(instance=Recipe())
method_formset = MethodStepFormSet(instance=Recipe())
return render(request, template_name, {'recipe_form':recipe_form, \
'ingredient_formset': ingredient_formset,\
'method_formset': method_formset})
@login_required(login_url='/accounts/login/')
def recipe_update(request, recipe_id, template_name='recipes/recipe_form.html'):
recipe = get_object_or_404(Recipe, pk=recipe_id)
recipe_form = RecipeForm(request.POST or None, instance=recipe)
ingredient_formset = IngredientFormSet(request.POST or None, instance=recipe)
method_formset = MethodStepFormSet(request.POST or None, instance=recipe)
if recipe_form.is_valid():
if ingredient_formset.is_valid() and method_formset.is_valid():
recipe.chef = request.user.get_username()
recipe.source = request.user.get_username()
print request.FILES
if request.FILES:
recipe.recipe_picture = request.FILES['recipe_picture']
recipe.create_thumbnail()
recipe.save()
ingredient_formset.save()
method_formset.save()
return redirect('recipes')
return render(request, template_name, {'recipe_id': recipe_id, 'recipe_form':recipe_form, \
'ingredient_formset': ingredient_formset,\
'method_formset': method_formset})
@login_required(login_url='/accounts/login/')
def recipe_delete(request, recipe_id, template_name='recipes/recipe_confirm_delete.html'):
recipe = get_object_or_404(Recipe, pk=recipe_id)
if request.method=='POST':
recipe.delete()
return redirect('recipes')
return render(request, template_name, {'recipe':recipe})
@login_required(login_url='/accounts/login/')
def recipe_delete_ajax(request):
if request.is_ajax():
recipe_id = request.POST['id']
recipe = get_object_or_404(Recipe, pk=recipe_id)
recipe.delete()
return redirect('recipes')
##################################################
### import recipe from url
@login_required(login_url='/accounts/login/')
def import_from_url(request, template_name='recipes/recipe_import.html'):
if request.POST:
import_form = ImportForm(request.POST)
if import_form.is_valid():
url = import_form.cleaned_data['url']
source = import_form.cleaned_data['source']
recipe = process_url(url,source)
if request.user.is_authenticated():
recipe.user = request.user
recipe.save()
return redirect('recipes')
else:
import_form = ImportForm()
return render(request, template_name, {'import_form':import_form})
@login_required(login_url='/accounts/login/')
def define_external(request, template_name='recipes/external_recipe.html'):
if request.POST:
external_form = ExternalRecipeForm(request.POST)
if external_form.is_valid():
external_form.save()
return redirect('dashboard')
else:
external_form = ExternalRecipeForm()
return render(request, template_name, {'external_form':external_form})
@login_required(login_url='/accounts/login/')
def show_external(request, external_id, template_name='recipes/external_detail.html'):
external = get_object_or_404(ExternalRecipe, pk=external_id)
external_form = ExternalRecipeForm(request.POST or None, instance=external)
if external_form.is_valid():
external_form.save()
return redirect('dashboard')
return render(request, template_name, {'external_form': external_form })
def process_url(url,site):
"""
utility function for scraping the url source and
creating a new recipe instance
"""
source = site.source.lower()
soup = BeautifulSoup(urllib2.urlopen(url).read())
if "bbc" in source:
ingredients = get_ingredients_from_bbc(soup)
elif "taste" in source:
ingredients = get_ingredients_from_taste(soup)
steps = get_method(soup, site)
description = get_description(soup, site)
chef = get_chef(soup, site)
title = get_title(soup, site)
recipe = create_recipe(source,title,chef,description,ingredients,steps)
return recipe
###########################################################################
### import from text file
@login_required(login_url='/accounts/login/')
def import_from_file(request):
if request.method == 'POST':
form = ImportFileForm(request.POST, request.FILES)
if form.is_valid():
recipe = process_file(request.FILES['file'])
if request.user.is_authenticated():
recipe.user = request.user
recipe.save()
return redirect('recipes')
else:
form = ImportFileForm()
return HttpResponse(form)
def index(string):
return len(string)+1
def extract(idx, text):
return text[idx:].strip('\r')
def recipe_from_dict(r):
recipe = Recipe()
recipe.title = r["title"]
recipe.source = r["source"]
recipe.chef = r["chef"]
recipe.description = r["description"]
recipe.save()
for ingredient in r["ingredients"]:
recipe.ingredient_set.create(ingredient_name=ingredient)
for step in r["method"]:
recipe.methodstep_set.create(step=step)
return recipe
def process_file(f):
content = f.read()
recipe_list = content.split('\n')
keys = ["title","source","chef","description","ingredient","method"]
values = [index(k) for k in keys]
identifiers = dict(zip(keys, values))
ingredients = []
method = []
recipe = {}
for line in recipe_list:
for k,v in identifiers.iteritems():
if k in line:
if "ingredient" in k:
ingredients.append(extract(v,line))
elif "method" in k:
method.append(extract(v,line))
else:
recipe[k] = extract(v,line)
recipe["ingredients"] = ingredients
recipe["method"] = method
return recipe_from_dict(recipe)
###########################################################################
### ajax recipe filtering
def recipe_search(request):
result = Recipe.objects.all()
query = request.GET.get('q')
if query:
query_list = query.split()
result = result.filter(
reduce(operator.and_,
(Q(title__icontains=q) for q in query_list)) |
reduce(operator.and_,
(Q(description__icontains=q) for q in query_list))
)
ids = ['recipe_'+str(r.id) for r in result]
else:
ids = []
data = {'id_list': ids}
return JsonResponse(data)
###########################################################################
### food2fork api searching
@login_required(login_url='/accounts/login/')
def recipe_inspiration(request):
#search for shredded chicken recipes
if request.is_ajax:
q = request.GET.get("q")
print q
if q:
url = "http://food2fork.com/api/search"
recipe_url = "http://food2fork.com/api/get"
parameters = {
'key': "673a9139cc12071e81eacf740cfa0409",
'q': q
}
response = requests.get(url, params=parameters)
#get the first recipe from the response
recipe_list = json.loads(response.content)['recipes']
context = {'recipe_list': recipe_list}
template_name = 'recipes/inspiration_results.html'
else:
context = {'recipe_list': None}
template_name = 'recipes/inspiration.html'
else:
context = {'recipe_list': None}
template_name = 'recipes/inspiration.html'
return render(request, template_name, context)
@login_required(login_url='/accounts/login/')
def get_from_food2fork(request,template_name='recipes/recipe_detail.html'):
#search for shredded chicken recipes
if request.is_ajax:
recipe_id = request.GET.get('recipe_id')
print "recipe_id",recipe_id
recipe_url = "http://food2fork.com/api/get"
recipe_parameters = {
'key': "673a9139cc12071e81eacf740cfa0409",
'rId': recipe_id
}
recipe = requests.get(recipe_url,
params=recipe_parameters)
context = {'recipe': json.loads(recipe.content)}
return JsonResponse(json.loads(recipe.content))
else:
context = {'recipe': None}
return render(request, template_name, context)
@login_required(login_url='/accounts/login/')
def import_from_food2fork(request):
if request.is_ajax:
recipe_id = request.POST.get('recipe_id')
recipe_url = "http://food2fork.com/api/get"
recipe_parameters = {
'key': "673a9139cc12071e81eacf740cfa0409",
'rId': recipe_id
}
f2f_recipe = json.loads(requests.get(recipe_url,
params=recipe_parameters).content)
print f2f_recipe
source = f2f_recipe['recipe']['publisher']
title = f2f_recipe['recipe']['title']
chef = f2f_recipe['recipe']['publisher']
description = "No description found"
ingredients = f2f_recipe['recipe']['ingredients']
steps = f2f_recipe['recipe']['publisher']
# recipe = create_recipe(source, title, chef, description,\
# ingredients, steps)
###########################################################################
### utility functions which work on external recipe model instance
def create_recipe(source,title,chef,description,ingredients,steps):
recipe = Recipe()
recipe.title = title
recipe.source = source
recipe.chef = chef
recipe.description = description
recipe.save()
for ingredient in ingredients:
recipe.ingredient_set.create(ingredient_name=ingredient)
for step in steps:
recipe.methodstep_set.create(step=step)
return recipe
### site specfic - need a way of generalising
def get_ingredients_from_taste(soup):
section = soup.find('ul','ingredient-table').find_all('li')
inner_list = []
for li in section:
inner_list.append(li.find('label').contents[0])
return inner_list
def get_ingredients_from_bbc(soup): #this is the same as ingredients_from_taste except for label - no cb's
try:
sections = soup.find_all('ul','recipe-ingredients__list')
inner_list = []
for s,section in enumerate(sections):
section_li = section.find_all('li')
for li in section_li:
li_contents = li.contents
li_text = []
for li_c in li_contents:
a_text = ""
if li_c.find('a') is None:
a_text = li_c.contents[0]
else:
li_text.append(li_c)
li_text.append(a_text)
inner_list.append("".join(li_text))
except Exception, e:
inner_list = ["could not find ingredients on page"]
return inner_list
### generalised functions
def get_chef(soup, site):
try:
chef = soup.find('a',{'class':site.chef_class}).contents[0]
except Exception, e:
chef = "Unknown Chef"
return chef
def get_description(soup, site):
try:
description = get_text_from_div(soup,site.description_class,'p')
except Exception, e:
description = "could not find description on page"
return description
def get_title(soup, site):
try:
title = get_text_from_div(soup,site.title_class,'h1')
except Exception, e:
title = "could not find title on page"
return title
def get_method(soup, site):
try:
method = get_li_group(soup, site.method_class)
except Exception, e:
method = ["could not find method on page"]
return method
### utilities
def get_text_from_div(soup, div_class, el_type):
return soup.find('div',div_class).find(el_type).contents[0]
def get_li_group(soup, div_class):
section = soup.find('div',div_class).find('ol').find_all('li')
inner_list = []
for li in section:
inner_list.append(li.find('p').contents[0])
return inner_list
###############################################################
############################################
## recipes - class based views
############################################
class RecipeList(LoginRequiredMixin,ListView):
model = Recipe
context_object_name = 'recipe_list'
template_name = 'recipes/recipes.html'
class RecipeDetail(LoginRequiredMixin,DetailView):
model = Recipe
context_object_name = 'recipe'
template_name = 'recipes/show_recipe.html'
###############################################
## search
###############################################
# class JSONResponseMixin(object):
# """
# A mixin that can be used to render a JSON response.
# """
# def render_to_json_response(self, context, **response_kwargs):
# """
# Returns a JSON response, transforming 'context' to make the payload.
# """
# return JsonResponse(
# self.get_data(context),
# **response_kwargs
# )
# def get_data(self, context):
# """
# Returns an object that will be serialized as JSON by json.dumps().
# """
# # Note: This is *EXTREMELY* naive; in reality, you'll need
# # to do much more complex handling to ensure that arbitrary
# # objects -- such as Django model instances or querysets
# # -- can be serialized as JSON.
# return context
# class RecipeSearchListView(JSONResponseMixin,TemplateView):
# model = Recipe
# def get_queryset(self):
# result = super(RecipeSearchListView, self).get_queryset()
# query = self.request.GET.get('q')
# if query:
# query_list = query.split()
# result = result.filter(
# reduce(operator.and_,
# (Q(title__icontains=q) for q in query_list)) |
# reduce(operator.and_,
# (Q(description__icontains=q) for q in query_list))
# )
# ids = ['recipe_'+str(r.id) for r in result]
# else:
# ids = []
# self.data = {'id_list': ids}
# def get_context_data(self, **kwargs):
# context = super(RecipeSearchListView, self).get_context_data(**kwargs)
# context['id_list'] = self.data['id_list']
# return context
# def render_to_response(self, context, **response_kwargs):
# return self.render_to_json_response(context, **response_kwargs)
|
|
from __future__ import division, absolute_import, print_function
import confuse
import sys
import unittest
from . import _root
PY3 = sys.version_info[0] == 3
class SingleSourceTest(unittest.TestCase):
def test_dict_access(self):
config = _root({'foo': 'bar'})
value = config['foo'].get()
self.assertEqual(value, 'bar')
def test_list_access(self):
config = _root({'foo': ['bar', 'baz']})
value = config['foo'][1].get()
self.assertEqual(value, 'baz')
def test_missing_key(self):
config = _root({'foo': 'bar'})
with self.assertRaises(confuse.NotFoundError):
config['baz'].get()
def test_missing_index(self):
config = _root({'l': ['foo', 'bar']})
with self.assertRaises(confuse.NotFoundError):
config['l'][5].get()
def test_dict_iter(self):
config = _root({'foo': 'bar', 'baz': 'qux'})
keys = [key for key in config]
self.assertEqual(set(keys), set(['foo', 'baz']))
def test_list_iter(self):
config = _root({'l': ['foo', 'bar']})
items = [subview.get() for subview in config['l']]
self.assertEqual(items, ['foo', 'bar'])
def test_int_iter(self):
config = _root({'n': 2})
with self.assertRaises(confuse.ConfigTypeError):
[item for item in config['n']]
def test_dict_keys(self):
config = _root({'foo': 'bar', 'baz': 'qux'})
keys = config.keys()
self.assertEqual(set(keys), set(['foo', 'baz']))
def test_dict_values(self):
config = _root({'foo': 'bar', 'baz': 'qux'})
values = [value.get() for value in config.values()]
self.assertEqual(set(values), set(['bar', 'qux']))
def test_dict_items(self):
config = _root({'foo': 'bar', 'baz': 'qux'})
items = [(key, value.get()) for (key, value) in config.items()]
self.assertEqual(set(items), set([('foo', 'bar'), ('baz', 'qux')]))
def test_list_keys_error(self):
config = _root({'l': ['foo', 'bar']})
with self.assertRaises(confuse.ConfigTypeError):
config['l'].keys()
def test_list_sequence(self):
config = _root({'l': ['foo', 'bar']})
items = [item.get() for item in config['l'].sequence()]
self.assertEqual(items, ['foo', 'bar'])
def test_dict_sequence_error(self):
config = _root({'foo': 'bar', 'baz': 'qux'})
with self.assertRaises(confuse.ConfigTypeError):
list(config.sequence())
def test_dict_contents(self):
config = _root({'foo': 'bar', 'baz': 'qux'})
contents = config.all_contents()
self.assertEqual(set(contents), set(['foo', 'baz']))
def test_list_contents(self):
config = _root({'l': ['foo', 'bar']})
contents = config['l'].all_contents()
self.assertEqual(list(contents), ['foo', 'bar'])
def test_int_contents(self):
config = _root({'n': 2})
with self.assertRaises(confuse.ConfigTypeError):
list(config['n'].all_contents())
class ConverstionTest(unittest.TestCase):
def test_str_conversion_from_str(self):
config = _root({'foo': 'bar'})
value = str(config['foo'])
self.assertEqual(value, 'bar')
def test_str_conversion_from_int(self):
config = _root({'foo': 2})
value = str(config['foo'])
self.assertEqual(value, '2')
@unittest.skipIf(confuse.PY3, "unicode only present in Python 2")
def test_unicode_conversion_from_int(self):
config = _root({'foo': 2})
value = unicode(config['foo']) # noqa ignore=F821
self.assertEqual(value, unicode('2')) # noqa ignore=F821
def test_bool_conversion_from_bool(self):
config = _root({'foo': True})
value = bool(config['foo'])
self.assertEqual(value, True)
def test_bool_conversion_from_int(self):
config = _root({'foo': 0})
value = bool(config['foo'])
self.assertEqual(value, False)
class NameTest(unittest.TestCase):
def test_root_name(self):
config = _root()
self.assertEqual(config.name, 'root')
def test_string_access_name(self):
config = _root()
name = config['foo'].name
self.assertEqual(name, "foo")
def test_int_access_name(self):
config = _root()
name = config[5].name
self.assertEqual(name, "#5")
def test_nested_access_name(self):
config = _root()
name = config[5]['foo']['bar'][20].name
self.assertEqual(name, "#5.foo.bar#20")
class MultipleSourceTest(unittest.TestCase):
def test_dict_access_shadowed(self):
config = _root({'foo': 'bar'}, {'foo': 'baz'})
value = config['foo'].get()
self.assertEqual(value, 'bar')
def test_dict_access_fall_through(self):
config = _root({'qux': 'bar'}, {'foo': 'baz'})
value = config['foo'].get()
self.assertEqual(value, 'baz')
def test_dict_access_missing(self):
config = _root({'qux': 'bar'}, {'foo': 'baz'})
with self.assertRaises(confuse.NotFoundError):
config['fred'].get()
def test_list_access_shadowed(self):
config = _root({'l': ['a', 'b']}, {'l': ['c', 'd', 'e']})
value = config['l'][1].get()
self.assertEqual(value, 'b')
def test_list_access_fall_through(self):
config = _root({'l': ['a', 'b']}, {'l': ['c', 'd', 'e']})
value = config['l'][2].get()
self.assertEqual(value, 'e')
def test_list_access_missing(self):
config = _root({'l': ['a', 'b']}, {'l': ['c', 'd', 'e']})
with self.assertRaises(confuse.NotFoundError):
config['l'][3].get()
def test_access_dict_replaced(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'qux': 'fred'}})
value = config['foo'].get()
self.assertEqual(value, {'bar': 'baz'})
def test_dict_keys_merged(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'qux': 'fred'}})
keys = config['foo'].keys()
self.assertEqual(set(keys), set(['bar', 'qux']))
def test_dict_keys_replaced(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'bar': 'fred'}})
keys = config['foo'].keys()
self.assertEqual(list(keys), ['bar'])
def test_dict_values_merged(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'qux': 'fred'}})
values = [value.get() for value in config['foo'].values()]
self.assertEqual(set(values), set(['baz', 'fred']))
def test_dict_values_replaced(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'bar': 'fred'}})
values = [value.get() for value in config['foo'].values()]
self.assertEqual(list(values), ['baz'])
def test_dict_items_merged(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'qux': 'fred'}})
items = [(key, value.get()) for (key, value) in config['foo'].items()]
self.assertEqual(set(items), set([('bar', 'baz'), ('qux', 'fred')]))
def test_dict_items_replaced(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'bar': 'fred'}})
items = [(key, value.get()) for (key, value) in config['foo'].items()]
self.assertEqual(list(items), [('bar', 'baz')])
def test_list_sequence_shadowed(self):
config = _root({'l': ['a', 'b']}, {'l': ['c', 'd', 'e']})
items = [item.get() for item in config['l'].sequence()]
self.assertEqual(items, ['a', 'b'])
def test_list_sequence_shadowed_by_dict(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': ['qux', 'fred']})
with self.assertRaises(confuse.ConfigTypeError):
list(config['foo'].sequence())
def test_dict_contents_concatenated(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'qux': 'fred'}})
contents = config['foo'].all_contents()
self.assertEqual(set(contents), set(['bar', 'qux']))
def test_dict_contents_concatenated_not_replaced(self):
config = _root({'foo': {'bar': 'baz'}}, {'foo': {'bar': 'fred'}})
contents = config['foo'].all_contents()
self.assertEqual(list(contents), ['bar', 'bar'])
def test_list_contents_concatenated(self):
config = _root({'foo': ['bar', 'baz']}, {'foo': ['qux', 'fred']})
contents = config['foo'].all_contents()
self.assertEqual(list(contents), ['bar', 'baz', 'qux', 'fred'])
def test_int_contents_error(self):
config = _root({'foo': ['bar', 'baz']}, {'foo': 5})
with self.assertRaises(confuse.ConfigTypeError):
list(config['foo'].all_contents())
def test_list_and_dict_contents_concatenated(self):
config = _root({'foo': ['bar', 'baz']}, {'foo': {'qux': 'fred'}})
contents = config['foo'].all_contents()
self.assertEqual(list(contents), ['bar', 'baz', 'qux'])
def test_add_source(self):
config = _root({'foo': 'bar'})
config.add({'baz': 'qux'})
self.assertEqual(config['foo'].get(), 'bar')
self.assertEqual(config['baz'].get(), 'qux')
class SetTest(unittest.TestCase):
def test_set_missing_top_level_key(self):
config = _root({})
config['foo'] = 'bar'
self.assertEqual(config['foo'].get(), 'bar')
def test_override_top_level_key(self):
config = _root({'foo': 'bar'})
config['foo'] = 'baz'
self.assertEqual(config['foo'].get(), 'baz')
def test_set_second_level_key(self):
config = _root({})
config['foo']['bar'] = 'baz'
self.assertEqual(config['foo']['bar'].get(), 'baz')
def test_override_second_level_key(self):
config = _root({'foo': {'bar': 'qux'}})
config['foo']['bar'] = 'baz'
self.assertEqual(config['foo']['bar'].get(), 'baz')
def test_override_list_index(self):
config = _root({'foo': ['a', 'b', 'c']})
config['foo'][1] = 'bar'
self.assertEqual(config['foo'][1].get(), 'bar')
|
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check if all public modules are included in our docs."""
from __future__ import print_function
import argparse
import os
import sys
import warnings
from sphinx.ext.intersphinx import fetch_inventory
from script_utils import PROJECT_ROOT
DOCS_DIR = os.path.join(PROJECT_ROOT, 'docs')
IGNORED_PREFIXES = ('test_', '_')
IGNORED_MODULES = frozenset([
'google.cloud',
'google.cloud.bigquery',
'google.cloud.bigtable',
'google.cloud.dns',
'google.cloud.error_reporting',
'google.cloud.language',
'google.cloud.logging',
'google.cloud.logging.handlers',
'google.cloud.logging.handlers.transports',
'google.cloud.monitoring',
'google.cloud.pubsub',
'google.cloud.resource_manager',
'google.cloud.speech',
'google.cloud.storage',
'google.cloud.streaming',
'google.cloud.streaming.buffered_stream',
'google.cloud.streaming.exceptions',
'google.cloud.streaming.http_wrapper',
'google.cloud.streaming.stream_slice',
'google.cloud.streaming.transfer',
'google.cloud.streaming.util',
'google.cloud.translate',
'google.cloud.vision',
'google.cloud.vision.fixtures',
])
PACKAGES = (
'bigquery',
'bigtable',
'core',
'datastore',
'dns',
'error_reporting',
'language',
'logging',
'monitoring',
'pubsub',
'resource_manager',
'runtimeconfig',
'speech',
'storage',
'translate',
'vision',
)
class SphinxApp(object):
"""Mock app to interact with Sphinx helpers."""
warn = warnings.warn
srcdir = DOCS_DIR
def is_valid_module(filename):
"""Determines if a filename is a valid Python module.
Assumes if is just the end of a path (i.e. does not contain
``os.path.sep``.
:type filename: str
:param filename: The name of a file.
:rtype: bool
:returns: Flag indicating if the filename is valid.
"""
if not filename.endswith('.py'):
return False
if filename == '__init__.py':
return True
for prefix in IGNORED_PREFIXES:
if filename.startswith(prefix):
return False
return True
def get_public_modules(path, base_package=None):
"""Get list of all public modules relative to a path.
:type path: str
:param path: The path containing the python modules.
:type base_package: str
:param base_package: (Optional) A package to prepend in
front of the path.
:rtype: list
:returns: List of all modules found.
"""
result = []
for subdir, _, files in os.walk(path):
# Skip folders that start with _.
if any([part.startswith('_')
for part in subdir.split(os.path.sep)]):
continue
_, rel_dir = subdir.split(path)
rel_dir = rel_dir.lstrip(os.path.sep)
for filename in files:
if is_valid_module(filename):
mod_name, _ = os.path.splitext(filename)
rel_path = os.path.join(rel_dir, mod_name)
if base_package is not None:
rel_path = os.path.join(base_package, rel_path)
# Turn into a Python module rather than a file path.
rel_path = rel_path.replace(os.path.sep, '.')
if mod_name == '__init__':
result.append(rel_path[:-len('.__init__')])
else:
result.append(rel_path)
return result
def verify_modules(build_root='_build'):
"""Verify modules included.
:type build_root: str
:param build_root: The root of the directory where docs are built into.
Defaults to ``_build``.
"""
object_inventory_relpath = os.path.join(build_root, 'html', 'objects.inv')
mock_uri = ''
inventory = fetch_inventory(SphinxApp, mock_uri,
object_inventory_relpath)
sphinx_mods = set(inventory['py:module'].keys())
public_mods = set()
for package in PACKAGES:
library_dir = os.path.join(PROJECT_ROOT, package, 'google', 'cloud')
package_mods = get_public_modules(library_dir,
base_package='google.cloud')
public_mods.update(package_mods)
if not sphinx_mods <= public_mods:
unexpected_mods = sphinx_mods - public_mods
message = ['Unexpected error. There were modules referenced by '
'Sphinx that are not among the public modules.']
message.extend(['- %s' % (mod,) for mod in unexpected_mods])
print('\n'.join(message), file=sys.stderr)
sys.exit(1)
undocumented_mods = public_mods - sphinx_mods
# Remove ignored modules.
undocumented_mods -= IGNORED_MODULES
if undocumented_mods:
message_parts = ['Found undocumented public modules:']
message_parts.extend(['- ' + mod_name
for mod_name in sorted(undocumented_mods)])
print('\n'.join(message_parts), file=sys.stderr)
sys.exit(1)
def get_parser():
"""Get simple ``argparse`` parser to determine package.
:rtype: :class:`argparse.ArgumentParser`
:returns: The parser for this script.
"""
description = ('Run check that all google-cloud '
'modules are included in docs.')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--build-root', dest='build_root',
help='The root directory where docs are located.')
return parser
def main():
"""Main script to verify modules included."""
parser = get_parser()
args = parser.parse_args()
verify_modules(build_root=args.build_root)
if __name__ == '__main__':
main()
|
|
import asyncio
import aiohs2
import pandas as pd
import subprocess
import urllib
import re
import logging
from functools import wraps
coroutine = asyncio.coroutine
logger = logging.getLogger(__name__)
hive_type_map = {
'BOOLEAN': pd.np.dtype(bool),
'BINARY': pd.np.dtype(bytes),
'TINYINT': pd.np.dtype(int),
'SMALLINT': pd.np.dtype(int),
'INT': pd.np.dtype(int),
'BIGINT': pd.np.dtype(int),
'FLOAT': pd.np.dtype(float),
'DOUBLE': pd.np.dtype(float),
'DECIMAL': pd.np.dtype(float),
'TIMESTAMP': pd.np.dtype('datetime64[ms]'),
'DATE': pd.np.dtype('datetime64[ms]'),
'STRING': pd.np.dtype(str),
'VARCHAR': pd.np.dtype(str),
'CHAR': pd.np.dtype(str),
'ARRAY': pd.np.dtype(list),
'MAP': pd.np.dtype(dict),
'STRUCT': pd.np.dtype(object),
'UNIONTYPE': pd.np.dtype(object),
}
hive_nils = ['(null)', 'null', 'none', '']
def hive_isnull(val):
return val.str.lower().isin(hive_nils)
class Framer:
def __init__(self, columns, dtypes, fill_values=None):
self.columns = columns
self.dtypes = dtypes
self.offset = 0
self.fill_values = fill_values or {}
self.warns = set()
@staticmethod
def get_dtype(typ):
try:
return hive_type_map[typ.rsplit('<', 1)[0].rsplit('_', 1)[0]]
except KeyError:
logger.warning('Unknown type %r for hive request', typ)
return pd.np.dtype(object)
@classmethod
@coroutine
def by_cursor(cls, cur, hql, **kws):
yield from cur.execute(hql)
schema = (yield from cur.getSchema())
if schema is None:
columns = dtypes = None
else:
columns = pd.Index([nfo['columnName'] for nfo in schema])
dtypes = [cls.get_dtype(nfo['type']) for nfo in schema]
return cls(columns, dtypes, **kws)
@coroutine
def __call__(self, coro):
raw = yield from coro
return self.mk_df(raw)
def mk_df(self, raw, na_vals=None):
if self.columns is None:
if raw is None:
return None
else:
if raw is not None and '__schema__' not in self.warns:
logger.warning('no schema, but got data from hive')
self.warns.add('__schema__')
return pd.DataFrame(raw, dtype=object)
df = pd.DataFrame(raw or None, # self.empty,
columns=self.columns, dtype=object)
if na_vals is not None:
df[df.isin(na_vals)] = None
df.index += self.offset
self.offset += len(df)
for col, val in self.fill_values.items():
if col in df:
df[col] = df[col].fillna(val)
# if self.empty is None:
# local.empty = df[:0].copy()
for col, typ in zip(self.columns, self.dtypes):
try:
if typ == pd.np.dtype(str):
# don't convert str again, as it will turn None into 'None'
continue
if typ == pd.np.dtype('datetime64[ms]'):
try:
df[col] = df[col].astype(int)
except ValueError:
pass
df[col] = df[col].astype(typ)
except (TypeError, ValueError) as e:
first = not bool(self.warns)
if col not in self.warns:
logger.warning('Cannot convert %r to %r (%s)', col, typ, e)
self.warns.add(col)
if first:
logger.warning('consider passing fill_values={%r: ...} '
'as argument to your request', col)
return df
class RawHDFSChunker:
def __init__(self, hive, table, partitions, fill_values=None,
sep=None, nl='\n', strip='\r\t'):
self.hive = hive
self.table = table
self.partitions = partitions[:]
self.fill_values = fill_values
self.partition = None
self.framer = None
self.proc = None
self.tail = b''
self.sep = sep
self.nl = nl
self.strip = strip
self.sel = slice(None)
self.fill = []
@coroutine
def next_part(self):
yield from self.close()
self.partition = self.partitions.pop(0)
self.framer, self.proc = yield from self.hive._raw_hdfs(
self.table, self.partition, fill_values=self.fill_values)
self.tail = b''
@coroutine
def chunker(self):
chunk = None
while self.partition or self.partitions:
if not self.partition:
yield from self.next_part()
chunk = yield from self.proc.stdout.read(24000000)
if not chunk:
self.partition = None
if self.tail:
chunk = self.tail
self.tail = b''
break
else:
continue
split = (self.tail + chunk).rsplit(b'\n', 1)
if len(split) == 1:
self.tail = chunk
else:
chunk, self.tail = split
break
if chunk:
chunk = chunk.decode()
if True: # FIXME when to initialize # self.sep is None:
self.l = len(self.framer.columns)
if self.l == 1:
self.sep = sep = '\x01'
else:
# guess se seperator
for sep in ['\x01', '\t', ';', ',', ' | ', ' ']:
if sep in chunk:
self.sep = sep
break
else:
raise ValueError('No Seperator found')
if sep == '\t':
self.strip = '\r'
elif sep == ' | ':
self.strip = ' \r\t'
lines = (pd.Series(chunk.split(self.nl))
.str.strip(self.strip).str.split(self.sep))
l = int(lines.str.len().median())
diff = l - self.l
a = 0
b = 0
while diff > 0:
if hive_isnull(lines.str[a]).all():
a += 1
diff -= 1
elif hive_isnull(lines.str[-(b+1)]).all():
b += 1
diff -= 1
else:
b += diff
diff = 0
if diff < 0:
self.fill = [None] * (-diff)
diff = 0
self.sel = slice(a or None, l - b if b else None)
raw = (cs + self.fill
for cs in (l.strip(self.strip).split(self.sep)[self.sel]
for l in chunk.split(self.nl))
if cs)
return self.framer.mk_df(raw, na_vals=['',
'\\N', 'n/a',
'NaN', 'nan'
'(null)', 'null'])
else:
return None
def iter(self):
try:
while True:
fut = asyncio.async(self.chunker())
yield fut
if fut.result() is None:
break
finally:
yield self.close()
@coroutine
def close(self):
if self.proc and self.proc.returncode is None:
try:
self.proc.send_signal(subprocess.signal.SIGINT)
except ProcessLookupError:
pass
yield from self.proc.wait()
class AioHive:
def __init__(self, host=None, port=10000, config=None, hadoop='hadoop'):
"""
coroutine based hive client
Parameters
==========
host : str
host of the hiveserver2 to connect to
config : str
hive-site.xml to extract hive.metastore.uris
port : int, default 10000
port of the hiveserver2
hadoop : str, optional
hadoop executable for raw hdfs access
"""
if (host is None and config is None) or (config and host):
raise TypeError('Either host or config argument has to be supplied')
if config:
import xml.etree.ElementTree as ET
cfg = ET.parse(config)
for res in cfg.iter('property'):
if res.findtext('name') == 'hive.metastore.uris':
uri = res.findtext('value')
host = uri.split('://')[-1].split(':')[0]
break
else:
raise ValueError(
"could not find 'hive.metastore.uris' in config")
self.cli = aiohs2.Client(host=host, port=port)
self.config = config
self.hadoop = hadoop
@coroutine
def execute(self, *rqs):
""" execute request without looking at returns """
cur = yield from self.cli.cursor()
try:
for rq in rqs:
yield from cur.execute(rq)
finally:
yield from cur.close()
@coroutine
def fetch(self, hql, chunk_size=10000, fill_values=None):
""" execute request and fetch answer as DataFrame """
cur = yield from self.cli.cursor()
try:
framer = yield from Framer.by_cursor(cur, hql,
fill_values=fill_values)
return (yield from framer(cur.fetch(maxRows=chunk_size)))
finally:
yield from cur.close()
def iter(self, hql, chunk_size=10000, fill_values=None):
""" execute request and iterate over chunks of resulting DataFrame """
cur = yield from self.cli.cursor()
framer = yield from Framer.by_cursor(cur, hql,
fill_values=fill_values)
chunks = cur.iter(maxRows=chunk_size)
def iter_chunks():
try:
for chunk in chunks:
# here we yield the coroutine that will fetch the data
# and put in in a frame
yield framer(chunk)
finally:
yield framer(cur.close())
return iter_chunks()
@coroutine
def close(self):
yield from self.cli.close()
@coroutine
def _raw_hdfs(self, table, partition=True, fill_values=None):
if partition is True:
rq = 'describe formatted {table}'
else:
rq = 'describe formatted {table} partition ({partition})'
info = (yield from self.fetch(
rq.format(table=table, partition=partition))).fillna('').applymap(str.strip)
i0, i1, *_ = pd.np.flatnonzero(info.col_name == '')
schema = info[i0+1:i1]
location = info.query('col_name == "Location:"').data_type
if location.empty:
raise KeyError('table {} seems not to be marterialized'
.format(table))
proc = yield from asyncio.create_subprocess_exec(
self.hadoop, 'fs', '-ls', '-R', *location.values,
stdout=subprocess.PIPE)
out = (yield from proc.stdout.read()).decode().split('\n')
location = [f
for f in (re.split('\s+', l, 7)[-1]
for l in out if l.startswith('-'))
if f.rsplit('/', 1)[-1][0] not in '._']
if not location:
raise KeyError('table {} seems not to be filled'
.format(table))
columns = schema.col_name
dtypes = (schema.data_type
.str.split('(').str[0]
.str.split('<').str[0].str.upper()
.apply(hive_type_map.__getitem__))
framer = Framer(columns, dtypes, fill_values=fill_values)
proc = yield from asyncio.create_subprocess_exec(
self.hadoop, 'fs', '-text', *location,
stdout=subprocess.PIPE)
return framer, proc
@coroutine
def raw(self, table, fill_values=None, **partitions):
if '.' in table:
db, table = table.rsplit('.', 1)
yield from self.execute('use {db}'.format(db=db))
try:
parts = yield from self.fetch('show partitions {}'.format(table))
if parts.empty:
parts = None
except aiohs2.error.Pyhs2Exception as e:
parts = None
if parts is None:
if partitions:
raise e
select = [True]
else:
parts = (parts
.applymap(urllib.parse.unquote)
.partition.str.split('/', expand=True)
.unstack().str.split('=', expand=True)
.reset_index().set_index(['level_1', 0])[1]
.unstack())
sel = pd.Series(not bool(partitions), index=parts.index)
for name, val in partitions.items():
if name not in parts.columns:
raise KeyError('no partition info {} in {}', name, table)
if isinstance(val, str):
val = [val]
for v in val:
sel |= parts[name].str.contains(v)
select = list((parts[sel].columns.values[None, :]
+ "='" + parts[sel] + "'")
.apply(', '.join, axis=1))
rhc = RawHDFSChunker(self, table, select,
fill_values=fill_values)
return rhc.iter()
class SyncedHive:
def __init__(self, *args, hive=None, **kws):
"""
synced wrapper around the asyncio hive class
Parameters
==========
host : str
host of the hiveserver2 to connect to
config : str
hive-site.xml to extract hive.metastore.uris
port : int, default 10000
port of the hiveserver2
hive : AioHive, optional
existing async hive client
"""
self.hive = hive or AioHive(*args, **kws)
self.loop = asyncio.get_event_loop()
def run(self, coro):
return self.loop.run_until_complete(coro)
def synced(name):
func = getattr(AioHive, name)
@wraps(func)
def synced(self, *args, **kws):
return self.run(func(self.hive, *args, **kws))
return synced
execute = synced('execute')
fetch = synced('fetch')
close = synced('close')
def iter(self, *args, **kws):
it = self.run(self.hive.iter(*args, **kws))
try:
for chunk in it:
data = self.run(chunk)
if data is not None and not data.empty:
yield data
except BaseException as e:
# ensure close is run
self.run(it.throw(e))
raise e
def raw(self, *args, **kws):
it = self.run(self.hive.raw(*args, **kws))
try:
for chunk in it:
data = self.run(chunk)
if data is not None and not data.empty:
yield data
except BaseException as e:
# ensure close is run
self.run(it.throw(e))
raise e
Hive = SyncedHive
|
|
# -*- coding: UTF-8 -*-
__all__ = ['agregation']
import numpy as np
import scipy
from scipy.sparse import csr_matrix, coo_matrix, isspmatrix_csr, isspmatrix_csc
from pyamg.relaxation import gauss_seidel
#from pyamg.util.linalg import residual_norm
# ...
try:
from petsc4py import PETSc
importPETSc=True
except ImportError:
importPETSc=False
# ...
from scipy.sparse.linalg import LinearOperator
norm = np.linalg.norm
# ...
def residual_norm(A, x, b):
"""Compute ||b - A*x||"""
return norm(b - A.dot(x))
# ...
class level:
"""Stores one level of the multigrid hierarchy
All level objects will have an 'A' attribute referencing the matrix
of that level. All levels, except for the coarsest level, will
also have 'P' and 'R' attributes referencing the prolongation and
restriction operators that act between each level and the next
coarser level.
Attributes
----------
A : csr_matrix
Problem matrix for Ax=b
R : csr_matrix
Restriction matrix between levels (often R = P.T)
P : csr_matrix
Prolongation or Interpolation matrix.
Notes
-----
The functionality of this class is a struct
"""
def __init__(self, withscipy=False, withPETSc=False):
from pigasus.fem.solver import solver
# from pigasus.solver.solver import solver
if withscipy:
from pigasus.fem.matrix import matrix
self.R = None
self.A = None
self.P = None
self._A = matrix()
self.slv = solver(matrix=self._A)
else:
from pigasus.fem.matrix import matrix
from pigasus.fem.constants import SPM_SOLVER_BASIC_CG, SPM_SOLVER_BASIC_GS
self.R = matrix()
self.A = matrix()
self.P = matrix()
#
if withPETSc:
self.slv = None
self.smt = None
# self.smt = solver(matrix=self.A, solver=SPM_SOLVER_BASIC_GS)
else:
slvInfo = {}
slvInfo['solver'] = SPM_SOLVER_BASIC_CG
self.slv = solver(matrix=self.A, solverInfo=slvInfo)
smtInfo = {}
smtInfo['solver'] = SPM_SOLVER_BASIC_GS
self.smt = solver(matrix=self.A, solverInfo=smtInfo)
self.withscipy = withscipy
self.withPETSc = withPETSc
def set(self, A, R, P):
"""
sets the scipy matrices A, R, P into pigasus
"""
if self.withscipy:
self.R =R
self.A =A
self.P =P
self._A.set(A)
else:
self.R.set(R)
self.A.set(A)
self.P.set(P)
self._A = A # TODO: to remove. needed for the moment for the smoother
if self.withPETSc and importPETSc:
self.A_petsc = PETSc.Mat().createAIJ(size=A.shape,csr=(A.indptr,A.indices,A.data))
# ...
# Initialize ksp solver.
self.ksp_slv = PETSc.KSP().create()
self.ksp_slv.setOperators(self.A_petsc)
self.ksp_slv.setFromOptions()
self.ksp_slv.setType(PETSc.KSP.Type.CG)
# # Initialize ksp smoother.
# self.ksp_smt = PETSc.KSP().create()
# self.ksp_smt.setOperators(self.A_petsc)
# self.ksp_smt.setFromOptions()
# self.ksp_smt.setType(PETSc.KSP.Type.RICHARDSON)
def solve(self, b, maxiter=6000, tol=1.e-14):
if self.withPETSc and importPETSc:
_b = PETSc.Vec().createWithArray(b, comm=PETSc.COMM_SELF)
_x = PETSc.Vec().createWithArray(np.zeros_like(b), comm=PETSc.COMM_SELF)
self.ksp_slv.rtol = tol
# self.ksp_slv.setConvergenceHistory()
self.ksp_slv.solve(_b, _x)
return _x.getArray()
else:
return self.slv.solve(b, guess=np.zeros_like(b) \
, maxiter=maxiter, eps=tol)
def smoother(self, x, b, nu, tol=1.e-10):
if self.withscipy:
gauss_seidel(self.A, x, b, iterations=nu)
return x
else:
# if self.withPETSc and importPETSc:
if False:
_b = PETSc.Vec().createWithArray(b, comm=PETSc.COMM_SELF)
_x = PETSc.Vec().createWithArray(np.zeros_like(b), comm=PETSc.COMM_SELF)
self.ksp_smt.rtol = tol
self.ksp_smt.max_it = nu
# self.ksp_smt.setConvergenceHistory()
self.ksp_smt.solve(_b, _x)
return _x.getArray()
else:
gauss_seidel(self._A, x, b, iterations=nu) # TODO: to remove
return x
# return self.smt.solve(b, guess=np.zeros_like(b) \
# , maxiter=nu, eps=tol)
class agregation(object):
def __init__(self, list_geometry, gamma, nu1, nu2 \
, smoother=None, coarse_solver=None \
, withscipy=False, withPETSc=False):
"""Creates a geometric multigrid for the matrix list_A[-1]
Parameters
----------
list_A : is a list of csr_matrix or pigasus-matrix. list_A[0] is on the finest grid
list_geometry : list of geometries [-1] -> the finest geometry and [0] -> the coarse
nlevels : the number of subdomains levels
Returns
-------
mg : the geometric multigrid
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pigasus.gallery import EllipticPDE
nrb_H = self.geometry_H[0]
nrb_h = self.geometry_h[0]
knots_H = nrb_H.knots[0]
knots_h = nrb_h.knots[0]
n = nrb_H.shape[0]
p = nrb_H.degree[0]
list_r = [r for r in knots_h if r not in knots_H]
See Also
--------
TODO
"""
# TODO : for the moment we only treate 1-patch geometries
self.withscipy = withscipy
self.withPETSc = withPETSc
self.geometries = list_geometry
self.dim = self.geometries[0].dim
self.nlevels = len(self.geometries)-1
self.coarse_solver = coarse_solver
self.smoother = smoother
self.gamma = gamma
self.nu1 = nu1
self.nu2 = nu2
self.nloop = 1
self.levels = []
for i in range(0, len(self.geometries)):
self.levels.append(level(withscipy=withscipy))
self.list_allresiduals = [] # list of residuals for each step
self.list_coarseresiduals = []
#-----------------------------------
#-----------------------------------
def initialize(self, A, list_A=None, list_DirFaces=None, GalerkinCoarseGrid=True):
from scipy.sparse import identity as Id
from pigasus.fem.common_obj import isMatrix
if isMatrix(A):
self.A = A.get()
else:
self.A = A
if list_A is not None:
self.list_A = list_A
list_A = self.list_A[::-1]
n,m = self.A.shape
ilvl = 0
lvl = self.levels[ilvl]
lvl.set(self.A, Id(n), Id(n))
geometries = self.geometries[::-1]
for (geo_h, geo_H) in zip(geometries[:-1], geometries[1:]):
ilvl += 1
# ... interpolator
P = self.constructInterpolationMatrix(geo_H, geo_h, list_DirFaces=list_DirFaces)
# ... restriction
list_H, list_h = self.compute_H(geo_H, geo_h)
r = list_h[0]/list_H[0]
R = P.transpose().tocsr()
R *= r
# print "Interpolator : ", P.shape
# print "Restrictor : ", R.shape
# ... the coarse system
if GalerkinCoarseGrid:
# print "Galerkin coarse grid operator has been initialized"
if self.withscipy:
A_h = self.levels[ilvl-1].A
else:
A_h = self.levels[ilvl-1].A.get()
# print R.shape, A_h.shape, P.shape
A_H = R * A_h * P
# print A_h.shape, A_H.shape
# A_H = A_H.tocsr()
else:
if self.withscipy:
A_H = list_A[i]
else:
A_H = list_A[i].get()
lvl = self.levels[ilvl]
lvl.set(A_H, R, P)
self.levels = self.levels[::-1]
A = self.levels[-1].A.get()
self.dtype = A.dtype
self.shape = A.shape
#-----------------------------------
#-----------------------------------
def interpolation(self, level, vH):
P = self.levels[level].P
vh = P.dot(vH)
return vh
#-----------------------------------
#-----------------------------------
def restriction(self, level, vh):
R = self.levels[level].R
vH = R.dot(vh)
return vH
#-----------------------------------
#-----------------------------------
def mgcyc(self, k, gamma, ukm, fk, nu1, nu2):
"""
this routine will retrurn uk_{m+1} using ukm
"""
nlevels = self.nlevels + 1
lvl = self.levels[::-1][nlevels-k]
lvl1 = self.levels[::-1][nlevels-k-1]
Rk = lvl.R
Pk = lvl.P
Lk = lvl1.A
Lk1 = lvl.A
# ----------------------------
# presmoothing
# ----------------------------
ukm_s = lvl1.smoother(ukm, fk, nu1)
# ----------------------------
# ----------------------------
# coarse grid correction
# ----------------------------
# Compute the defect
dkm = fk - Lk.dot(ukm_s)
# Restrict the defect
dk1m = Rk.dot(dkm)
# Compute an approximate solution vk1m of the defect equation on Omega_{k-1}
# if k = 1, use a direct or fast iterative solver, by calling
if k == 1:
# TODO : enlever le guess
guess = np.zeros_like(dk1m)
if self.coarse_solver is None:
vk1m = lvl.solve(dk1m)
else:
vk1m = self.coarse_solver(dk1m)
if k > 1:
a = np.zeros_like(dk1m)
vk1m_ = dk1m
for i in range(0, gamma):
dk1m_ = vk1m_
vk1m_, err_ = self.mgcyc(k-1, gamma, a, dk1m_, nu1, nu2)
vk1m = vk1m_
# Interpolate the correction
# print "vk1m : ", vk1m.__class__.__name__, vk1m.shape
# print "Pk : ", Pk.__class__.__name__, Pk.shape
vkm = Pk.dot(vk1m)
# Compute the corrected approximation
ukm += vkm
# ----------------------------
# ----------------------------
# postsmoothing
# ----------------------------
ukp1m = lvl1.smoother(ukm, fk, nu2)
# ----------------------------
err = residual_norm(Lk, ukp1m, fk)
return ukp1m, err
#-----------------------------------
#-----------------------------------
def solve(self, b, x0=None, tol=1e-7, maxiter=20, cycle='V' \
, residuals=None, callback=None \
, accel=None, maxiter_prec=2, tol_prec=1e-7 \
):
"""Main solution call to execute multigrid cycling.
Parameters
----------
b : array
Right hand side.
x0 : array
Initial guess.
tol : float
Stopping criteria: relative residual r[k]/r[0] tolerance.
maxiter : int
Stopping criteria: maximum number of allowable iterations.
cycle : {'V','W','F'}
Type of multigrid cycle to perform in each iteration.
residuals : list
List to contain residual norms at each iteration.
Returns
-------
x : array
Approximate solution to Ax=b
See Also
--------
aspreconditioner
Examples
--------
"""
if x0 is None:
x = np.zeros_like(b)
else:
x = np.array(x0) # copy
cycle = str(cycle).upper()
if accel is not None:
# # Check for AMLI compatability
# if (accel != 'fgmres') and (cycle == 'AMLI'):
# raise ValueError('AMLI cycles require acceleration (accel) to be fgmres, or no acceleration')
# Acceleration is being used
if isinstance(accel, str):
from pyamg import krylov
from scipy.sparse.linalg import isolve
if hasattr(krylov, accel):
accel = getattr(krylov, accel)
else:
accel = getattr(isolve, accel)
A = self.levels[-1].A.get()
M = self.aspreconditioner(cycle=cycle, tol=tol_prec, maxiter=maxiter_prec)
try: # try PyAMG style interface which has a residuals parameter
return accel(A, b, x0=x0, tol=tol, maxiter=maxiter, M=M,
callback=callback, residuals=residuals)[0]
except: # try the scipy.sparse.linalg.isolve style interface,
# which requires a call back function if a residual
# history is desired
cb = callback
if residuals is not None:
residuals[:] = [residual_norm(A, x, b)]
def callback(x):
if scipy.isscalar(x):
residuals.append(x)
else:
residuals.append(residual_norm(A, x, b))
if cb is not None:
cb(x)
return accel(A, b, x0=x0, tol=tol, maxiter=maxiter, M=M,
callback=callback)[0]
else:
# Scale tol by normb
normb = norm(b)
if normb != 0:
tol = tol * normb
# print ">>>>>>>>>>>>>> tol ", tol, normb
if residuals is None:
residuals = []
else:
residuals[:] = []
residuals.append(residual_norm(self.A, x, b))
self.first_pass = True
self.nloop = 0
while len(residuals) <= maxiter and residuals[-1] > tol:
x, err = self.mgcyc(self.nlevels, self.gamma, x, b, self.nu1, self.nu2)
residuals.append(err)
self.first_pass = False
if callback is not None:
callback(x)
self.nloop += 1
return x
def aspreconditioner(self, cycle='V', maxiter=1, tol=1e-12):
"""Create a preconditioner using this multigrid cycle
Parameters
----------
cycle : {'V','W','F','AMLI'}
Type of multigrid cycle to perform in each iteration.
Returns
-------
precond : LinearOperator
Preconditioner suitable for the iterative solvers in defined in
the scipy.sparse.linalg module (e.g. cg, gmres) and any other
solver that uses the LinearOperator interface. Refer to the
LinearOperator documentation in scipy.sparse.linalg
See Also
--------
multilevel_solver.solve, scipy.sparse.linalg.LinearOperator
Examples
--------
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> from scipy import rand
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = rand(A.shape[0]) # random RHS
>>> ml = smoothed_aggregation_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
"""
shape = self.shape
dtype = self.dtype
def matvec(b):
return self.solve(b, maxiter=maxiter, cycle=cycle, tol=tol)
return LinearOperator(shape, matvec, dtype=dtype)
#-----------------------------------
def constructInterpolationMatrix(self, geo_H, geo_h, list_DirFaces=None):
if self.dim ==1:
from .splineRefMat import constructCurveMatrix as constructMatrix
if self.dim ==2:
from .splineRefMat import constructSurfaceMatrix as constructMatrix
if self.dim ==3:
print("initInterpolation: Not yet implemented for 3D")
patch_id = 0
nrb_H = geo_H[patch_id]
nrb_h = geo_h[patch_id]
if list_DirFaces is None:
DirFaces = []
else:
DirFaces = list_DirFaces[patch_id]
if self.dim ==1:
knots_H = nrb_H.knots[0]
knots_h = nrb_h.knots[0]
n = nrb_H.shape[0]
p = nrb_H.degree[0]
list_r = [r for r in knots_h if r not in knots_H]
M = constructMatrix(list_r, p, n, knots_H \
, DirFaces=DirFaces)
return M
if self.dim ==2:
u_H1,u_H2 = nrb_H.knots
n_H1,n_H2 = nrb_H.shape
p_H1,p_H2 = nrb_H.degree
u_h1,u_h2 = nrb_h.knots
list_r1 = [r for r in u_h1 if r not in u_H1]
list_r2 = [r for r in u_h2 if r not in u_H2]
M, [n,m] = constructMatrix( list_r1, list_r2 \
, p_H1, p_H2 \
, n_H1, n_H2 \
, u_H1, u_H2 \
, DirFaces=DirFaces)
return M
def compute_H(self, geo_H, geo_h):
dim = geo_H.dim
list_H = []
list_h = []
# for (nrb_H, nrb_h) in zip(geo_H, geo_h):
nrb_H = geo_H[0]
nrb_h = geo_h[0]
H = 1.
h = 1.
for d in range(0,dim):
p_H = nrb_H.degree[d]
p_h = nrb_h.degree[d]
H *= nrb_H.knots[d][p_H+1]-nrb_H.knots[d][0]
h *= nrb_h.knots[d][p_h+1]-nrb_h.knots[d][0]
list_H.append(H)
list_h.append(h)
return list_H, list_h
def __repr__(self):
"""Prints basic statistics about the multigrid hierarchy.
"""
from pyamg.util.linalg import condest
levels = self.levels[::-1]
output = 'multilevel_solver\n'
output += 'Conditioning Number of the matrix: %d\n' % condest(self.A)
output += 'Number of Levels: %d\n' % len(levels)
output += 'Operator Complexity: %6.3f\n' % self.operator_complexity()
output += 'Grid Complexity: %6.3f\n' % self.grid_complexity()
# output += 'Coarse Solver: %s\n' % self.coarse_solver.name()
total_nnz = sum([level.A.nnz for level in levels])
output += ' level unknowns nonzeros\n'
for n, level in enumerate(levels):
A = level.A
output += ' %2d %10d %10d [%5.2f%%]\n' %\
(n, A.shape[1], A.nnz,\
(100 * float(A.nnz) / float(total_nnz)))
return output
def operator_complexity(self):
"""Operator complexity of this multigrid hierarchy
Defined as:
Number of nonzeros in the matrix on all levels /
Number of nonzeros in the matrix on the finest level
"""
levels = self.levels[::-1]
return sum([level.A.nnz for level in levels]) /\
float(levels[0].A.nnz)
def grid_complexity(self):
"""Grid complexity of this multigrid hierarchy
Defined as:
Number of unknowns on all levels /
Number of unknowns on the finest level
"""
levels = self.levels[::-1]
return sum([level.A.shape[0] for level in levels]) /\
float(levels[0].A.shape[0])
|
|
#!/usr/bin/env python
# encoding: utf-8
"""Azkaban remote interaction module.
This contains the `Session` class which will be used for all interactions with
a remote Azkaban server.
"""
from .util import AzkabanError, Config, Adapter, MultipartForm, flatten
from getpass import getpass, getuser
from os.path import basename, exists
from requests.exceptions import HTTPError
from six import string_types
from six.moves.configparser import NoOptionError, NoSectionError
from six.moves.urllib.parse import urlparse
from time import sleep
from warnings import warn
import json
import logging as lg
import re
import requests as rq
_logger = lg.getLogger(__name__)
def _azkaban_request(method, url, **kwargs):
"""Make request to azkaban server and catch common errors.
:param method: GET, POST, etc.
:param url: Endpoint url.
:param **kwargs: Arguments forwarded to the request handler.
This function is meant to handle common errors and return a more helpful
message than the default one.
"""
try:
response = rq.request(url=url, method=method, **kwargs)
except rq.ConnectionError as err:
raise AzkabanError('Unable to connect to Azkaban server %r: %s', url, err)
except rq.exceptions.MissingSchema:
raise AzkabanError('Invalid Azkaban server url: %r.', url)
else:
return response
def _extract_json(response):
"""Extract JSON from Azkaban response, gracefully handling errors.
:param response: Request response object.
"""
try:
json = response.json()
except ValueError as err: # this should never happen
_logger.error('No JSON decoded from response:\n%s', response.text)
raise err
else:
if 'error' in json:
raise AzkabanError(json['error'])
elif json.get('status') == 'error':
raise AzkabanError(json['message'])
else:
return json
def _parse_url(url):
"""Parse url, returning tuple of (username, password, address)
:param url: HTTP endpoint (including protocol, port, and optional user /
password).
Supported url formats:
+ protocol://host:port
+ protocol://user@host:port
+ protocol://user:password@host:port
+ user@protocol://host:port (compatibility with older versions)
+ user:password@protocol://host:port (compatibility with older versions)
"""
if not re.match(r'[a-zA-Z]+://', url) and not re.search(r'@[a-zA-Z]+://', url):
# no scheme specified, default to http://
url = 'http://' + url
if re.search(r'@[a-zA-Z]+://', url):
# compatibility mode: `user@protocol://host:port` or
# `user:password@protocol://host:port`
splitted = url.rstrip('/').split('@')
if len(splitted) == 1:
address = splitted[0]
user = None
password = None
elif len(splitted) == 2:
address = splitted[1]
creds = splitted[0].split(':', 1)
if len(creds) == 1:
user = creds[0]
password = None
else:
user, password = creds
else:
raise AzkabanError('Malformed url: %r' % (url, ))
return user, password, address
else:
parsed = urlparse(url)
return (parsed.username, parsed.password,
'%s://%s:%s' % (parsed.scheme, parsed.hostname, parsed.port))
class Session(object):
"""Azkaban session.
:param url: HTTP endpoint (including protocol, port and optional user).
:param alias: Alias name.
:param config: Configuration object used to store session IDs.
:param attempts: Maximum number of attempts to refresh session.
:param verify: Whether or not to verify HTTPS requests.
This class contains mostly low-level methods that translate directly into
Azkaban API calls. The :class:`~azkaban.remote.Execution` class should be
preferred for interacting with workflow executions.
Note that each session's ID is lazily updated. In particular, instantiating
the :class:`Session` doesn't guarantee that its current ID (e.g. loaded from
the configuration file) is valid.
"""
def __init__(
self, url=None, alias=None, config=None, attempts=3, verify=True
):
self.attempts = attempts
self.verify = verify
self.config = config
if not url:
warn(DeprecationWarning(
'Session constructor support for aliases is going away in 1.0. '
'Please use `Session.from_alias` instead.',
))
config = config or Config() # Temporary hack for backwards compatibility.
alias = alias or config.get_option('azkaban', 'default.alias')
try:
url = config.parser.get('alias', alias)
except (NoOptionError, NoSectionError):
raise AzkabanError('Alias %r not found.', alias)
self.user, self.password, self.url = _parse_url(url)
if not self.user:
self.user = getuser()
self.id = None
if self.config:
try:
key = str(self).replace(':', '.')
self.id = self.config.parser.get('session_id', key)
except (NoOptionError, NoSectionError):
pass
self._logger = Adapter(repr(self), _logger)
self._logger.debug('Instantiated.')
def __repr__(self):
return '<%s(url=\'%s@%s\')>' % (
self.__class__.__name__, self.user, self.url
)
def __str__(self):
return '%s@%s' % (self.user, self.url)
def is_valid(self, response=None):
"""Check if the current session ID is valid.
:param response: If passed, this reponse will be used to determine the
validity of the session. Otherwise a simple test request will be emitted.
"""
self._logger.debug('Checking if current session is valid.')
if not self.id:
self._logger.debug('No previous ID found.')
return False
if response is None:
# issue a request to check if the ID is valid (note the explicit `None`
# check as 500 responses are falsish).
self._logger.debug('Checking if ID %s is valid.', self.id)
response = _azkaban_request(
'POST',
'%s/manager' % (self.url, ),
data={'session.id': self.id},
verify=self.verify,
)
# the above request will return a 200 empty response if the current
# session ID is valid and a 500 response otherwise
if (
'<!-- /.login -->' in response.text or # usual non API error response
'Login error' in response.text or # special case for API
'"error" : "session"' in response.text # error when running a flow's jobs
):
self._logger.debug('ID %s is invalid:\n%s', self.id, response.text)
return False
else:
self._logger.debug('ID %s is valid.', self.id)
return True
def get_workflow_executions(self, project, flow, start=0, length=10):
"""Fetch executions of a flow.
:param project: Project name.
:param flow: Flow name.
:param start: Start index (inclusive) of the returned list.
:param length: Max length of the returned list.
"""
self._logger.debug('Fetching executions of %s/%s.', project, flow)
res = self._request(
method='GET',
endpoint='manager',
params={
'ajax': 'fetchFlowExecutions',
'project': project,
'flow': flow,
'start': start,
'length': length
},
)
if not res.text:
# Azkaban returns a 200 empty response if the project doesn't exist so
# we throw an explicit error here, rather than letting `_extract_json`
# fail generically.
raise AzkabanError(
'Unable to fetch executions. Check that project %r exists.', project
)
else:
return _extract_json(res)
def get_running_workflows(self, project, flow):
"""Get running executions of a flow.
:param project: Project name.
:param flow: Flow name.
Note that if the project doesn't exist, the Azkaban server will return a
somewhat cryptic error `Project 'null' not found.`, even though the name of
the project isn't `null`.
"""
self._logger.debug('Fetching running executions of %s/%s.', project, flow)
return _extract_json(self._request(
method='GET',
endpoint='executor',
params={
'ajax': 'getRunning',
'project': project,
'flow': flow,
},
))
def get_execution_status(self, exec_id):
"""Get status of an execution.
:param exec_id: Execution ID.
"""
self._logger.debug('Fetching status for execution %s.', exec_id)
return _extract_json(self._request(
method='GET',
endpoint='executor',
params={
'execid': exec_id,
'ajax': 'fetchexecflow',
},
))
def get_execution_logs(self, exec_id, offset=0, limit=50000):
"""Get execution logs.
:param exec_id: Execution ID.
:param offset: Log offset.
:param limit: Size of log to download.
"""
self._logger.debug('Fetching logs for execution %s.', exec_id)
return _extract_json(self._request(
method='GET',
endpoint='executor',
params={
'execid': exec_id,
'ajax': 'fetchExecFlowLogs',
'offset': offset,
'length': limit,
},
))
def get_job_logs(self, exec_id, job, offset=0, limit=50000):
"""Get logs from a job execution.
:param exec_id: Execution ID.
:param job: Job name.
:param offset: Log offset.
:param limit: Size of log to download.
"""
self._logger.debug('Fetching logs for execution %s, job %s.', exec_id, job)
return _extract_json(self._request(
method='GET',
endpoint='executor',
params={
'execid': exec_id,
'jobId': job,
'ajax': 'fetchExecJobLogs',
'offset': offset,
'length': limit,
},
))
def cancel_execution(self, exec_id):
"""Cancel workflow execution.
:param exec_id: Execution ID.
"""
self._logger.debug('Cancelling execution %s.', exec_id)
res = _extract_json(self._request(
method='GET',
endpoint='executor',
params={
'execid': exec_id,
'ajax': 'cancelFlow',
},
))
if 'error' in res:
raise AzkabanError('Execution %s is not running.', exec_id)
else:
self._logger.info('Execution %s cancelled.', exec_id)
return res
def pause_execution(self, exec_id):
"""Pause workflow execution.
:param exec_id: Execution ID.
If an execution has already been paused, this method is a no-op.
"""
self._logger.debug('Pausing execution %s.', exec_id)
res = _extract_json(self._request(
method='GET',
endpoint='executor',
params={
'execid': exec_id,
'ajax': 'pauseFlow',
},
))
if 'error' in res:
raise AzkabanError('Execution %s is not running.', exec_id)
else:
self._logger.info('Execution %s paused.', exec_id)
return res
def resume_execution(self, exec_id):
"""Resume workflow execution.
:param exec_id: Execution ID.
If an execution is already running, this method is a no-op.
"""
self._logger.debug('Resuming execution %s.', exec_id)
res = _extract_json(self._request(
method='GET',
endpoint='executor',
params={
'execid': exec_id,
'ajax': 'resumeFlow',
},
))
if 'error' in res:
raise AzkabanError('Execution %s is not running.', exec_id)
else:
self._logger.info('Execution %s resumed.', exec_id)
return res
def get_projects(self):
"""Get a list of all projects."""
self._logger.debug('Getting all projects')
return _extract_json(self._request(
method='GET',
endpoint='index',
params={'ajax': 'fetchallprojects'},
))
def create_project(self, name, description):
"""Create project.
:param name: Project name.
:param description: Project description.
"""
self._logger.debug('Creating project %s.', name)
return _extract_json(self._request(
method='POST',
endpoint='manager',
data={
'action': 'create',
'name': name,
'description': description,
},
))
def delete_project(self, name):
"""Delete a project on Azkaban.
:param name: Project name.
"""
self._logger.debug('Deleting project %s.', name)
res = self._request(
method='GET',
endpoint='manager',
params={
'project': name,
'delete': 'true',
},
)
msg = "Project '%s' was successfully deleted" % (name, )
if not msg in res.text:
raise AzkabanError('Delete failed. Check permissions and existence.')
return res
def run_workflow(self, name, flow, jobs=None, disabled_jobs=None,
concurrent=True, properties=None, on_failure='finish', notify_early=False,
emails=None):
"""Launch a workflow.
:param name: Name of the project.
:param flow: Name of the workflow.
:param jobs: List of names of jobs to run (run entire workflow by default).
Mutually exclusive with `disabled_jobs` parameter.
:param disabled_jobs: List of names of jobs not to run. Mutually exclusive
with `jobs` parameter.
:param concurrent: Run workflow concurrently with any previous executions.
Can either be a boolean or a valid concurrency option string. Available
string options: `'skip'` (do not run flow if it is already running),
`'concurrent'` (run the flow in parallel with any current execution),
`'pipeline:1'` (pipeline the flow such that the current execution will
not be overrun: block job A until the previous flow job A has completed),
`'pipeline:2'` (pipeline the flow such that the current execution will
not be overrun: block job A until the previous flow job A's _children_
have completed).
:param properties: Dictionary that will override global properties in this
execution of the workflow. This dictionary will be flattened similarly to
how :class:`~azkaban.job.Job` options are handled.
:param on_failure: Set the execution behavior on job failure. Available
options: `'finish'` (finish currently running jobs, but do not start any
others), `'continue'` (continue executing jobs as long as dependencies
are met),`'cancel'` (cancel all jobs immediately).
:param notify_early: Send any notification emails when the first job fails
rather than when the entire workflow finishes.
:param emails: List of emails or pair of list of emails to be notified
when the flow fails. Note that this will override any properties set in
the worfklow. If a single list is passed, the emails will be used for
both success and failure events. If a pair of lists is passed, the first
will receive failure emails, the second success emails.
Note that in order to run a workflow on Azkaban, it must already have been
uploaded and the corresponding user must have permissions to run it.
"""
self._logger.debug('Starting project %s workflow %s.', name, flow)
request_data = {
'ajax': 'executeFlow',
'project': name,
'flow': flow
}
request_data.update(self._run_options(
name,
flow,
jobs=jobs,
disabled_jobs=disabled_jobs,
concurrent=concurrent,
properties=properties,
on_failure=on_failure,
notify_early=notify_early,
emails=emails
))
res = _extract_json(self._request(
method='POST',
endpoint='executor',
include_session='params',
data=request_data,
))
self._logger.info('Started project %s workflow %s.', name, flow)
return res
def schedule_workflow(self, name, flow, date, time, period=None, **kwargs):
"""Schedule a workflow.
:param name: Project name.
:param flow: Name of flow in project.
:param date: Date of the first run (possible values:
`'08/07/2014'`, `'12/11/2015'`).
:param time: Time of the schedule (possible values:
`'9,21,PM,PDT'`, `'10,30,AM,PDT'`).
:param period: Frequency to repeat. Consists of a number and a unit
(possible values: `'1s'`, `'2m'`, `'3h'`, `'2M'`). If not specified
the flow will be run only once.
:param \*\*kwargs: See :meth:`run_workflow` for documentation.
"""
self._logger.debug('Scheduling project %s workflow %s.', flow, name)
request_data = {
'ajax': 'scheduleFlow',
'projectName': name,
'projectId': self._get_project_id(name),
'flow': flow,
'scheduleDate': date,
'scheduleTime': time,
'is_recurring': 'on' if period else 'off',
}
if period:
request_data['period'] = period
request_data.update(self._run_options(name, flow, **kwargs))
res = _extract_json(self._request(
method='POST',
endpoint='schedule',
data=request_data,
))
self._logger.info('Scheduled project %s workflow %s.', name, flow)
return res
def unschedule_workflow(self, name, flow):
"""Unschedule a workflow.
:param name: Project name.
:param flow: Name of flow in project.
"""
self._logger.debug('Unscheduling project %s workflow %s.', flow, name)
request_data = {
'action': 'removeSched',
'scheduleId': self.get_schedule(name, flow)['scheduleId'],
}
res = _extract_json(self._request(
method='POST',
endpoint='schedule',
data=request_data,
))
self._logger.info('Unscheduled project %s workflow %s.', name, flow)
return res
def schedule_cron_workflow(self, name, flow, cron, timezone=None, **kwargs):
"""Schedule a cron workflow.
:param name: Project name.
:param flow: Name of flow in project.
:param cron: A CRON expression comprising 6 or 7 fields separated by white
space that represents a set of times in Quartz Cron Format.
:param timezone: Timezone ID. See https://bit.ly/2RzHxfI for the list of
valid IDs. If set to an invalid value, the server's default will be used.
:param \*\*kwargs: See :meth:`run_workflow` for documentation.
"""
self._logger.debug('Scheduling project %s workflow %s.', flow, name)
request_data = {
'ajax': 'scheduleCronFlow',
'projectName': name,
'flow': flow,
'cronExpression': cron,
}
if timezone:
request_data['timezone'] = timezone
request_data.update(self._run_options(name, flow, **kwargs))
res = _extract_json(self._request(
method='POST',
endpoint='schedule',
params=request_data,
))
self._logger.info('Scheduled project %s workflow %s.', name, flow)
return res
def get_schedule(self, name, flow):
"""Get schedule information.
:param name: Project name.
:param flow: Name of flow in project.
"""
self._logger.debug(
'Retrieving schedule for project %s workflow %s.', flow, name
)
res = _extract_json(self._request(
method='GET',
endpoint='schedule',
params={
'ajax': 'fetchSchedule',
'projectId': self._get_project_id(name),
'flowId': flow,
},
))
self._logger.info(
'Retrieved schedule for project %s workflow %s.', name, flow
)
if 'schedule' not in res:
raise AzkabanError(
'Failed to get schedule. Check that the schedule exists.'
)
return res['schedule']
def get_sla(self, schedule_id):
"""Get SLA information.
:param schedule_id: Schedule Id - obtainable from get_schedule
"""
self._logger.debug('Retrieving SLA for schedule ID %s.', schedule_id)
res = _extract_json(self._request(
method='GET',
endpoint='schedule',
params={
'ajax': 'slaInfo',
'scheduleId': schedule_id
},
))
self._logger.info('Retrieved SLA for schedule ID %s.', schedule_id)
if 'settings' not in res:
raise AzkabanError('Failed to get SLA; check that an SLA exists.')
return res
def set_sla(self, schedule_id, email, settings):
"""Set SLA for a schedule.
:param schedule_id: Schedule ID.
:param email: Array of emails to receive notifications.
:param settings: Array of comma delimited strings of SLA settings
consisting of:
+ job name - blank for full workflow
+ rule - SUCCESS or FINISH
+ duration - specified in hh:mm
+ email action - bool
+ kill action - bool
"""
self._logger.debug('Setting SLA for schedule Id %s.', schedule_id)
request_data = {
'ajax': 'setSla',
'scheduleId': schedule_id,
'slaEmails': ','.join(email),
}
for i, setting in enumerate(settings):
request_data['settings[%s]' % (i,)] = setting
res = _extract_json(self._request(
method='POST',
endpoint='schedule',
data=request_data,
))
self._logger.info('Set SLAs for schedule Id %s.', schedule_id)
return res
def _get_project_id(self, name):
"""Fetch the id of a project.
:param name: Project name.
"""
self._logger.debug('Retrieving id for project %s.', name)
try:
res = _extract_json(self._request(
method='GET',
endpoint='manager',
params={
# there is no endpoint to get the project id, getPermissions is
# the least expensive endpoint whose response contains the id
'ajax': 'getPermissions',
'project': name,
},
))
except ValueError:
# Azkaban server sends a 200 empty response if the project doesn't exist
raise AzkabanError(
'Failed to get project id. Check that the project exists.'
)
else:
project_id = res['projectId']
self._logger.info('Retrieved id for project %s: %s.', name, project_id)
return project_id
def upload_project(self, name, path, archive_name=None, callback=None):
"""Upload project archive.
:param name: Project name.
:param path: Local path to zip archive.
:param archive_name: Filename used for the archive uploaded to Azkaban.
Defaults to `basename(path)`.
:param callback: Callback forwarded to the streaming upload.
"""
self._logger.debug('Uploading archive %r to project %s.', path, name)
if not exists(path):
raise AzkabanError('Unable to find archive at %r.' % (path, ))
if not self.is_valid():
self._refresh() # ensure that the ID is valid
archive_name = archive_name or basename(path)
if not archive_name.endswith('.zip'):
archive_name += '.zip'
form = MultipartForm(
files=[{
'path': path,
'name': archive_name,
'type': 'application/zip' # force this (tempfiles don't have extension)
}],
params={
'ajax': 'upload',
'project': name,
'session.id': self.id,
},
callback=callback
)
# note that we have made sure the ID is valid, for two reasons:
# + to avoid reuploading large files
# + to simplify the custom ID update process (form parameter)
res = _extract_json(self._request(
method='POST',
endpoint='manager',
include_session=False,
headers=form.headers,
data=form,
))
self._logger.info(
'Archive %s for project %s uploaded as %s.', path, name, archive_name
)
return res
def get_workflows(self, name):
"""Get list of workflows corresponding to a project
:param name: Project name
"""
self._logger.debug(
'Fetching workflows in project %s', name
)
try:
res = self._request(
method='GET',
endpoint='manager',
params={
'ajax': 'fetchprojectflows',
'project': name,
},
)
except HTTPError:
raise AzkabanError('No workflows found in project %s', name)
else:
try:
return _extract_json(res)
except ValueError:
raise AzkabanError('Project %s not found', name)
def get_workflow_info(self, name, flow):
"""Get list of jobs corresponding to a workflow.
:param name: Project name.
:param flow: Name of flow in project.
"""
self._logger.debug(
'Fetching infos for workflow %s in project %s', flow, name
)
try:
res = self._request(
method='GET',
endpoint='manager',
params={
'ajax': 'fetchflowjobs',
'project': name,
'flow': flow,
},
)
except HTTPError:
# the Azkaban server throws a NullPointerException if the flow doesn't
# exist in the project, which causes a 500 response
raise AzkabanError('Worklow %s not found in project %s.', flow, name)
else:
try:
return _extract_json(res)
except ValueError:
# but sends a 200 empty response if the project doesn't exist
raise AzkabanError('Project %s not found.', name)
def _refresh(self, password=None):
"""Refresh session ID.
:param password: Password used to log into Azkaban. If not specified,
will prompt for one.
Also caches the session ID for future use.
"""
self._logger.debug('Refreshing.')
attempts = self.attempts
password = password or self.password
while True:
password = password or getpass('Azkaban password for %s: ' % (self, ))
try:
res = _extract_json(_azkaban_request(
'POST',
self.url,
data={
'action': 'login',
'username': self.user,
'password': password,
},
verify=self.verify,
))
except AzkabanError as err:
if not 'Incorrect Login.' in err.message:
raise err
self._logger.warning('Invalid login attempt.')
attempts -= 1
password = None
if attempts <= 0:
raise AzkabanError('Too many unsuccessful login attempts. Aborting.')
else:
break
self.id = res['session.id']
if self.config:
if not self.config.parser.has_section('session_id'):
self.config.parser.add_section('session_id')
self.config.parser.set(
'session_id',
str(self).replace(':', '.'),
self.id
)
self.config.save()
self._logger.info('Refreshed.')
def _run_options(self, name, flow, jobs=None, disabled_jobs=None,
concurrent=True, properties=None, on_failure='finish', notify_early=False,
emails=None):
"""Construct data dict for run related actions.
See :meth:`run_workflow` for parameter documentation.
"""
if jobs and disabled_jobs:
raise ValueError('`jobs` and `disabled_jobs` are mutually exclusive.')
if not jobs:
if not disabled_jobs:
disabled = '[]'
else:
disabled = json.dumps(list(disabled_jobs))
else:
all_names = set(
n['id']
for n in self.get_workflow_info(name, flow)['nodes']
)
run_names = set(jobs)
missing_names = run_names - all_names
if missing_names:
raise AzkabanError(
'Jobs not found in flow %r: %s.' %
(flow, ', '.join(missing_names))
)
else:
disabled = json.dumps(list(all_names - run_names))
try:
failure_action = {
'finish': 'finishCurrent',
'continue': 'finishPossible',
'cancel': 'cancelImmediately',
}[on_failure]
except KeyError:
raise ValueError('Invalid `on_failure` value: %r.' % (on_failure, ))
concurrency_level = None
if isinstance(concurrent, bool):
concurrent = 'concurrent' if concurrent else 'skip'
elif ':' in concurrent:
concurrent, concurrency_level = concurrent.split(':', 1)
request_data = {
'disabled': disabled,
'concurrentOption': concurrent,
'failureAction': failure_action,
'notifyFailureFirst': 'true' if notify_early else 'false',
}
if concurrency_level is not None:
request_data['%sLevel' % (concurrent, )] = concurrency_level
if properties:
request_data.update(dict(
('flowOverride[%s]' % (key, ), value)
for key, value in flatten(properties).items()
))
if emails:
if isinstance(emails[0], string_types):
failure_emails = ','.join(emails)
success_emails = failure_emails
else:
failure_emails = ','.join(emails[0])
success_emails = ','.join(emails[1])
request_data.update({
'failureEmails': failure_emails,
'failureEmailsOverride': 'true',
'successEmails': success_emails,
'successEmailsOverride': 'true',
})
return request_data
def _request(self, method, endpoint, include_session='cookies', **kwargs):
"""Make a request to Azkaban using this session.
:param method: HTTP method.
:param endpoint: Server endpoint (e.g. manager).
:param include_session: Where to include the `session_id` (possible values:
`'cookies'`, `'params'`, `False`).
:param kwargs: Keyword arguments passed to :func:`_azkaban_request`.
If the session expired, will prompt for a password to refresh.
"""
full_url = '%s/%s' % (self.url, endpoint.lstrip('/'))
if not self.id:
self._logger.debug('No ID found.')
self._refresh()
def _send_request():
"""Try sending the request with the appropriate credentials."""
if include_session == 'cookies':
kwargs.setdefault('cookies', {})['azkaban.browser.session.id'] = self.id
elif include_session == 'params':
kwargs.setdefault('data', {})['session.id'] = self.id
elif include_session:
raise ValueError('Invalid `include_session`: %r' % (include_session, ))
return _azkaban_request(method, full_url, verify=self.verify, **kwargs)
response = _send_request()
if not self.is_valid(response):
self._refresh()
response = _send_request()
# `_refresh` raises an exception rather than letting an unauthorized second
# request happen. this means that something is wrong with the server.
if not self.is_valid(response):
raise AzkabanError('Azkaban server is unavailable.')
try:
response.raise_for_status() # check that we get a 2XX response back
except HTTPError as err: # catch, log, and reraise
self._logger.warning(
'Received invalid response from %s:\n%s',
response.request.url, response.content
)
raise err
else:
return response
@classmethod
def from_alias(cls, alias, config=None):
"""Create configured session from an alias.
:param alias: Alias name.
:param config: Azkaban configuration object.
"""
config = config or Config()
section_name = 'alias.%s' % (alias, )
try:
url = config.parser.get(section_name, 'url')
except NoSectionError:
raise AzkabanError('Alias not found: %r' % (alias, ))
except NoOptionError:
raise AzkabanError('No url defined for alias %r.' % (alias, ))
else:
opts = {'url': url, 'config': config}
if config.parser.has_option(section_name, 'verify'):
opts['verify'] = config.parser.getboolean(section_name, 'verify')
if config.parser.has_option(section_name, 'attempts'):
opts['attempts'] = config.parser.getint(section_name, 'attempts')
return Session(**opts)
class Execution(object):
"""Remote workflow execution.
:param session: :class:`Session` instance.
:param exec_id: Execution ID.
"""
def __init__(self, session, exec_id):
self._session = session
self.exec_id = exec_id
@property
def status(self):
"""Execution status."""
return self._session.get_execution_status(self.exec_id)
@property
def url(self):
"""Execution URL."""
return '%s/executor?execid=%s' % (self._session.url, self.exec_id)
def cancel(self):
"""Cancel execution."""
self._session.cancel_execution(self.exec_id)
def logs(self, delay=5):
"""Execution log generator.
:param delay: time in seconds between each server poll
Yields line by line.
"""
finishing = False
offset = 0
while True:
logs = self._session.get_execution_logs(
exec_id=self.exec_id,
offset=offset,
)
if logs['length']:
offset += logs['length']
lines = (e for e in logs['data'].split('\n') if e)
for line in lines:
yield line
elif finishing:
break
else:
if self.status['status'] != 'RUNNING':
finishing = True
sleep(delay)
def job_logs(self, job, delay=5):
"""Job log generator.
:param job: job name
:param delay: time in seconds between each server poll
Yields line by line.
"""
finishing = False
offset = 0
while True:
try:
logs = self._session.get_job_logs(
exec_id=self.exec_id,
job=job,
offset=offset,
)
except HTTPError as err:
# if Azkaban is hanging, the job might be stuck in preparing stage
preparing = False
while True:
sleep(delay)
preparing_jobs = set(
e['id']
for e in self.status['nodes']
if e['status'] == 'PREPARING'
)
if job in preparing_jobs:
if not preparing:
preparing = True
_logger.debug(
'Job %s in execution %s is still preparing.', job, self.exec_id
)
else:
break
if not preparing:
# something else is causing the error
raise err
else:
if logs['length']:
offset += logs['length']
lines = (e for e in logs['data'].split('\n') if e)
for line in lines:
yield line
elif finishing:
break
else:
running_jobs = set(
e['id']
for e in self.status['nodes']
if e['status'] == 'RUNNING'
)
if job not in running_jobs:
finishing = True
sleep(delay)
@classmethod
def start(cls, session, *args, **kwargs):
"""Convenience method to start a new execution.
:param session: :class:`Session` instance.
:param args: Cf. :meth:`Session.run_workflow`.
:param kwargs: Cf. :meth:`Session.run_workflow`.
"""
res = session.run_workflow(*args, **kwargs)
return cls(session, res['execid'])
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for learning rate decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_state_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import learning_rate_decay
class LRDecayTest(test_util.TensorFlowTestCase):
def testContinuous(self):
with self.test_session():
step = 5
decayed_lr = learning_rate_decay.exponential_decay(0.05, step, 10, 0.96)
expected = .05 * 0.96 ** (5.0 / 10.0)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testStaircase(self):
with self.test_session():
step = gen_state_ops._variable(shape=[], dtype=dtypes.int32,
name="step", container="", shared_name="")
assign_100 = state_ops.assign(step, 100)
assign_1 = state_ops.assign(step, 1)
assign_2 = state_ops.assign(step, 2)
decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
staircase=True)
# No change to learning rate
assign_1.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
assign_2.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
# Decayed learning rate
assign_100.op.run()
expected = .1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testVariables(self):
with self.test_session():
step = variables.Variable(1)
assign_1 = step.assign(1)
assign_2 = step.assign(2)
assign_100 = step.assign(100)
decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
staircase=True)
variables.global_variables_initializer().run()
# No change to learning rate
assign_1.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
assign_2.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
# Decayed learning rate
assign_100.op.run()
expected = .1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
def testPiecewiseConstant(self):
x = resource_variable_ops.ResourceVariable(-999)
def pc():
return learning_rate_decay.piecewise_constant(x, [100, 110, 120],
[1.0, 0.1, 0.01, 0.001])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(pc()), 1.0, 1e-6)
self.evaluate(x.assign(100))
self.assertAllClose(self.evaluate(pc()), 1.0, 1e-6)
self.evaluate(x.assign(105))
self.assertAllClose(self.evaluate(pc()), 0.1, 1e-6)
self.evaluate(x.assign(110))
self.assertAllClose(self.evaluate(pc()), 0.1, 1e-6)
self.evaluate(x.assign(120))
self.assertAllClose(self.evaluate(pc()), 0.01, 1e-6)
self.evaluate(x.assign(999))
self.assertAllClose(self.evaluate(pc()), 0.001, 1e-6)
@test_util.run_in_graph_and_eager_modes()
def testPiecewiseConstantEdgeCases(self):
x_int = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int32)
boundaries, values = [-1.0, 1.0], [1, 2, 3]
with self.assertRaises(ValueError):
learning_rate_decay.piecewise_constant(x_int, boundaries, values)
x = resource_variable_ops.ResourceVariable(0.0)
boundaries, values = [-1.0, 1.0], [1.0, 2, 3]
with self.assertRaises(ValueError):
learning_rate_decay.piecewise_constant(x, boundaries, values)
# Test that ref types are valid.
if context.in_graph_mode():
x = variables.Variable(0.0)
x_ref = x.op.outputs[0] # float32_ref tensor should be accepted
boundaries, values = [1.0, 2.0], [1, 2, 3]
learning_rate_decay.piecewise_constant(x_ref, boundaries, values)
# Test casting boundaries from int32 to int64.
x_int64 = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int64)
boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
def pc():
return learning_rate_decay.piecewise_constant(x_int64, boundaries, values)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(pc()), 0.4, 1e-6)
self.evaluate(x_int64.assign(1))
self.assertAllClose(self.evaluate(pc()), 0.4, 1e-6)
self.evaluate(x_int64.assign(2))
self.assertAllClose(self.evaluate(pc()), 0.5, 1e-6)
self.evaluate(x_int64.assign(3))
self.assertAllClose(self.evaluate(pc()), 0.6, 1e-6)
self.evaluate(x_int64.assign(4))
self.assertAllClose(self.evaluate(pc()), 0.7, 1e-6)
class LinearDecayTest(test_util.TensorFlowTestCase):
def testHalfWay(self):
with self.test_session():
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testEnd(self):
with self.test_session():
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testHalfWayWithEnd(self):
with self.test_session():
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testBeyondEnd(self):
with self.test_session():
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testBeyondEndWithCycle(self):
with self.test_session():
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
cycle=True)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class SqrtDecayTest(test_util.TensorFlowTestCase):
def testHalfWay(self):
with self.test_session():
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power)
expected = lr * 0.5 ** power
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testEnd(self):
with self.test_session():
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power)
expected = end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testHalfWayWithEnd(self):
with self.test_session():
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power)
expected = (lr - end_lr) * 0.5 ** power + end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testBeyondEnd(self):
with self.test_session():
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power)
expected = end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testBeyondEndWithCycle(self):
with self.test_session():
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr,
power=power, cycle=True)
expected = (lr - end_lr) * 0.25 ** power + end_lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class PolynomialDecayTest(test_util.TensorFlowTestCase):
def testBeginWithCycle(self):
with self.test_session():
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = learning_rate_decay.polynomial_decay(lr, step,
decay_steps, cycle=True)
expected = lr
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class ExponentialDecayTest(test_util.TensorFlowTestCase):
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = gen_state_ops._variable(shape=[], dtype=dtypes.int32,
name="step", container="", shared_name="")
assign_step = state_ops.assign(step, 0)
increment_step = state_ops.assign_add(step, 1)
decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr, step,
k, decay_rate)
with self.test_session():
assign_step.op.run()
for i in range(k+1):
expected = initial_lr * math.exp(-i / k * decay_rate)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
increment_step.op.run()
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = gen_state_ops._variable(shape=[], dtype=dtypes.int32,
name="step", container="", shared_name="")
assign_step = state_ops.assign(step, 0)
increment_step = state_ops.assign_add(step, 1)
decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr,
step,
k,
decay_rate,
staircase=True)
with self.test_session():
assign_step.op.run()
for i in range(k+1):
expected = initial_lr * math.exp(-decay_rate * (i // k))
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
increment_step.op.run()
class InverseDecayTest(test_util.TensorFlowTestCase):
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = gen_state_ops._variable(shape=[], dtype=dtypes.int32,
name="step", container="", shared_name="")
assign_step = state_ops.assign(step, 0)
increment_step = state_ops.assign_add(step, 1)
decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
step,
k,
decay_rate)
with self.test_session():
assign_step.op.run()
for i in range(k+1):
expected = initial_lr / (1 + i / k * decay_rate)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
increment_step.op.run()
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = gen_state_ops._variable(shape=[], dtype=dtypes.int32,
name="step", container="", shared_name="")
assign_step = state_ops.assign(step, 0)
increment_step = state_ops.assign_add(step, 1)
decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
step,
k,
decay_rate,
staircase=True)
with self.test_session():
assign_step.op.run()
for i in range(k+1):
expected = initial_lr / (1 + decay_rate * (i // k))
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
increment_step.op.run()
class CosineDecayTest(test_util.TensorFlowTestCase):
def np_cosine_decay(self, step, decay_steps):
step = min(step, decay_steps)
completed_fraction = step / decay_steps
return 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.cosine_decay(
initial_lr, step, num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class LinearCosineDecayTest(test_util.TensorFlowTestCase):
def np_linear_cosine_decay(self,
step,
decay_steps,
alpha=0.0,
beta=0.001,
num_periods=0.5):
step = min(step, decay_steps)
linear_decayed = float(decay_steps - step) / decay_steps
fraction = 2.0 * num_periods * step / float(decay_steps)
cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
return (alpha + linear_decayed) * cosine_decayed + beta
def testDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr, step, num_training_steps)
expected = self.np_linear_cosine_decay(step, num_training_steps)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testNonDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr,
step,
num_training_steps,
alpha=0.1,
beta=1e-4,
num_periods=5)
expected = self.np_linear_cosine_decay(
step,
num_training_steps,
alpha=0.1,
beta=1e-4,
num_periods=5)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
class NoisyLinearCosineDecayTest(test_util.TensorFlowTestCase):
def testDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr, step, num_training_steps)
decayed_lr.eval()
def testNonDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
with self.test_session():
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr,
step,
num_training_steps,
initial_variance=0.5,
variance_decay=0.1,
alpha=0.1,
beta=1e-4,
num_periods=5)
decayed_lr.eval()
if __name__ == "__main__":
googletest.main()
|
|
from functools import total_ordering
import sys
from PyFBA import log_and_message
COMMON_REACTION_LIMIT = 5
class Compound:
"""
A compound is the essential metabolic compound that is involved in a reaction.
The compound by itself does not have a location. See PyFBA.metabolism.CompoundWithLocation for that detail.
This abstraction allows us to create Compound objects and then create separate objects with a location
that are required for the FBA
Other variables associated with the Compound class:
:ivar name: the name of the compound
:ivar reactions: a set of reaction objects that this compound is connected to
:ivar model_seed_id: the compound id from the model seed.
:ivar abbreviation: a short name for the compound
:ivar formula: the compounds formula
:ivar mw: the molecular weight of the compound
:ivar common: Boolean: this is a common compound. This means the coompound is in > COMMON_REACTION_LIMIT reactions
:ivar charge: the charge associated with the compound
"""
def __init__(self, cpd_id, name, verbose=False):
"""
Initiate the object
:param cpd_id: The id of the compound
:type cpd_id: str
:param name: The name of the compound
:type name: str
:return:
:rtype:
"""
self.id = cpd_id
if name.lower() == 'fe2' or name.lower() == 'fe+2' or name == 'fe2+':
log_and_message(f"Warning: {name} is deprecated. We changed {cpd_id} {name} to {cpd_id} Fe2+", stderr=verbose)
name = 'Fe2+'
if name.lower() == 'fe3' or name == 'fe3+' or name.lower() == 'fe+3':
log_and_message(f"Warning: {name} is deprecated. We changed {name} to Fe3+", stderr=verbose)
name = 'Fe3+'
elif 'fe3' in name.lower() and verbose:
log_and_message(f"Warning: {name} might be deprecated, we prefer Fe3+", stderr=verbose)
self.name = name
self.reactions = set()
self.model_seed_id = self.id
self.alternate_seed_ids = set()
self.abbreviation = None
self.aliases = None
self.formula = None
self.mw = 0
self.common = False
self.charge = 0
self.is_cofactor = False
self.linked_compound = False
self.pka = 0
self.pkb = 0
self.is_obsolete = False
self.abstract_compound = False
self.uptake_secretion = False
self.is_core = False
self.inchikey = 0
def __eq__(self, other):
"""
Two compounds are equal if they have the same id or the same name
:param other: The other compound
:type other: Compound
:return: If they are equal
:rtype: bool
"""
if isinstance(other, Compound):
return self.id == other.id or self.name == other.name
else:
raise NotImplementedError(f"Comparing a Compound with {type(other)} has not been implemented")
def __cmp__(self, other):
"""
Compare whether two things are the same.
:param other: The other compound
:type other: Compound
:return: An int, zero if they are the same
:rtype: int
"""
if isinstance(other, Compound):
if __eq__(other):
return 0
else:
return 1
else:
raise NotImplementedError(f"Comparing a Compound with {type(other)} has not been implemented")
def __ne__(self, other):
"""
Are these not equal?
:param other: The other compound
:type other: Compound
:return: If they are not equal
:rtype: bool
"""
try:
result = self.__eq__(other)
except NotImplementedError:
return True
return not result
def __hash__(self):
"""
The hash function is based on the name of the compound.
:rtype: int
"""
return hash((self.id, self.name))
def __str__(self):
"""
The to string function.
:rtype: str
"""
return f"{self.id}: {self.name}"
def __iter__(self):
for i in self.__dict__.items():
yield i
def add_reactions(self, rxns):
"""
Add a reaction that this compound is involved in. You can add a set of reactions. See the note above about the
number of reactions.
:param rxns: A set of reactions
:type rxns: set
"""
if isinstance(rxns, set):
self.reactions.update(rxns)
else:
raise TypeError("You need to add a set of reactions to a compound")
def has_reaction(self, rxn):
"""
Is this compound involved in this reaction?
:param rxn: A Reaction object
:type rxn: Reaction
:return: Whether the reaction is present
:rtype: bool
"""
return rxn in self.reactions
def number_of_reactions(self):
"""
How many reactions is this compound involved in?
:rtype: int
"""
return len(self.reactions)
def all_reactions(self):
"""
Return a set of all the reactions that this compound is involved in
:rtype: Set[str]
"""
return self.reactions
def is_common(self, rct_limit=COMMON_REACTION_LIMIT):
"""
Is this a common compound? This requires that you have
added reactions to this compound.
You can either specify the number of reactions or use our
default that is currently 50.
:param rct_limit: The limit for a compound to be considered common
:type rct_limit: int
:return: Whether this is a common reaction
:rtype: bool
"""
if self.number_of_reactions() > rct_limit:
self.common = True
else:
self.common = False
return self.common
def calculate_molecular_weight(self):
"""
Calculate and return the molecular weight of this compound
:return: The molecular weight
:rtype: float
"""
raise NotImplementedError("Sorry. Calculate molecular weight has not yet been implemented.")
def add_attribute(self, key, value):
"""
Add an attribute to this class
"""
setattr(self, key, value)
def get_attribute(self, key):
"""
Retrieve an attribute
"""
return getattr(self, key)
@total_ordering
class CompoundWithLocation(Compound):
"""
Compounds can have several locations:
A compound has at the very minimum a name and a location. The location is typically one of:
* e: extracellular
* c: cytoplasmic
* h: chloroplast
* p: periplasm
We extend the Compound class to add a location, and override a few of the methods
:ivar location: the location of the compound.
"""
def __init__(self, id=None, name=None, location=None, *args, **kwargs):
"""
Initiate the object
:param compound: the parent compound. Note you should create this first if it doesn't exist!
:type cpd_id: PyFBA.metabolism.Compound
:param location: The location of the compound
:type location: str
:return:
:rtype:
"""
super(CompoundWithLocation, self).__init__(id, name, *args, **kwargs)
self.id = id
self.name = name
self.location = location
@classmethod
def from_compound(cls, compound, location):
"""Initialize this object from another compound"""
cpd = cls(compound.id, compound.name, location)
for it in compound:
cpd.add_attribute(*it)
cpd.location = location
return cpd
def __eq__(self, other):
"""
Two compounds are equal if they have the same name and the same location
:param other: The other compound
:type other: Compound
:return: If they are equal
:rtype: bool
"""
if isinstance(other, CompoundWithLocation):
return super().__eq__(other) and self.location == other.location
else:
raise NotImplementedError(f"Comparing a Compound with {type(other)} has not been implemented")
def __lt__(self, other):
"""
Return whether this is less than other. Note that @total_ordering will take care of all the
other comparators!
"""
return self.id < other.id
def __cmp__(self, other):
"""
Compare whether two things are the same.
:param other: The other compound
:type other: Compound
:return: An int, zero if they are the same
:rtype: int
"""
if isinstance(other, CompoundWithLocation):
if __eq__(other):
return 0
else:
return 1
else:
raise NotImplementedError(f"Comparing a Compound with {type(other)} has not been implemented")
def __ne__(self, other):
"""
Are these not equal?
:param other: The other compound
:type other: Compound
:return: If they are not equal
:rtype: bool
"""
try:
result = self.__eq__(other)
except NotImplementedError:
return True
return not result
def __hash__(self):
"""
The hash function is based on the name of the compound.
:rtype: int
"""
return hash((super().__hash__(), self.location))
def __str__(self):
"""
The to string function.
:rtype: str
"""
return f"{self.id}: {self.name} (location: {self.location})"
def __getstate__(self):
state = self.__dict__.copy()
# sys.stderr.write(f"Set {state}\n")
return state
def __setstate__(self, state):
# correctly handle unpickling
# sys.stderr.write(f"Read {state}\n")
self.__dict__.update(state)
def calculate_molecular_weight(self):
# this is here because the subclass should implement unimplemented methods otherwise it is abstract
# and I don't want to!
raise NotImplementedError("Sorry. Calculate molecular weight has not yet been implemented.")
|
|
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
from io import StringIO
import numpy as np
import pytest
from pandas.errors import EmptyDataError
from pandas import (
DataFrame,
Index,
)
import pandas._testing as tm
@pytest.mark.parametrize("skiprows", [list(range(6)), 6])
def test_skip_rows_bug(all_parsers, skiprows):
# see gh-505
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
result = parser.read_csv(
StringIO(text), skiprows=skiprows, header=None, index_col=0, parse_dates=True
)
index = Index(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0
)
expected = DataFrame(
np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index
)
tm.assert_frame_equal(result, expected)
def test_deep_skip_rows(all_parsers):
# see gh-4382
parser = all_parsers
data = "a,b,c\n" + "\n".join(
",".join([str(i), str(i + 1), str(i + 2)]) for i in range(10)
)
condensed_data = "a,b,c\n" + "\n".join(
",".join([str(i), str(i + 1), str(i + 2)]) for i in [0, 1, 2, 3, 4, 6, 8, 9]
)
result = parser.read_csv(StringIO(data), skiprows=[6, 8])
condensed_result = parser.read_csv(StringIO(condensed_data))
tm.assert_frame_equal(result, condensed_result)
def test_skip_rows_blank(all_parsers):
# see gh-9832
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = parser.read_csv(
StringIO(text), skiprows=6, header=None, index_col=0, parse_dates=True
)
index = Index(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0
)
expected = DataFrame(
np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index
)
tm.assert_frame_equal(data, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""id,text,num_lines
1,"line 11
line 12",2
2,"line 21
line 22",2
3,"line 31",1""",
{"skiprows": [1]},
DataFrame(
[[2, "line 21\nline 22", 2], [3, "line 31", 1]],
columns=["id", "text", "num_lines"],
),
),
(
"a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~",
{"quotechar": "~", "skiprows": [2]},
DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"]),
),
(
(
"Text,url\n~example\n "
"sentence\n one~,url1\n~"
"example\n sentence\n two~,url2\n~"
"example\n sentence\n three~,url3"
),
{"quotechar": "~", "skiprows": [1, 3]},
DataFrame([["example\n sentence\n two", "url2"]], columns=["Text", "url"]),
),
],
)
def test_skip_row_with_newline(all_parsers, data, kwargs, expected):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_row_with_quote(all_parsers):
# see gh-12775 and gh-10911
parser = all_parsers
data = """id,text,num_lines
1,"line '11' line 12",2
2,"line '21' line 22",2
3,"line '31' line 32",1"""
exp_data = [[2, "line '21' line 22", 2], [3, "line '31' line 32", 1]]
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
result = parser.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,exp_data",
[
(
"""id,text,num_lines
1,"line \n'11' line 12",2
2,"line \n'21' line 22",2
3,"line \n'31' line 32",1""",
[[2, "line \n'21' line 22", 2], [3, "line \n'31' line 32", 1]],
),
(
"""id,text,num_lines
1,"line '11\n' line 12",2
2,"line '21\n' line 22",2
3,"line '31\n' line 32",1""",
[[2, "line '21\n' line 22", 2], [3, "line '31\n' line 32", 1]],
),
(
"""id,text,num_lines
1,"line '11\n' \r\tline 12",2
2,"line '21\n' \r\tline 22",2
3,"line '31\n' \r\tline 32",1""",
[[2, "line '21\n' \r\tline 22", 2], [3, "line '31\n' \r\tline 32", 1]],
),
],
)
def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), skiprows=[1])
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"line_terminator", ["\n", "\r\n", "\r"] # "LF" # "CRLF" # "CR"
)
def test_skiprows_lineterminator(all_parsers, line_terminator):
# see gh-9079
parser = all_parsers
data = "\n".join(
[
"SMOSMANIA ThetaProbe-ML2X ",
"2007/01/01 01:00 0.2140 U M ",
"2007/01/01 02:00 0.2141 M O ",
"2007/01/01 04:00 0.2142 D M ",
]
)
expected = DataFrame(
[
["2007/01/01", "01:00", 0.2140, "U", "M"],
["2007/01/01", "02:00", 0.2141, "M", "O"],
["2007/01/01", "04:00", 0.2142, "D", "M"],
],
columns=["date", "time", "var", "flag", "oflag"],
)
if parser.engine == "python" and line_terminator == "\r":
pytest.skip("'CR' not respect with the Python parser yet")
data = data.replace("\n", line_terminator)
result = parser.read_csv(
StringIO(data),
skiprows=1,
delim_whitespace=True,
names=["date", "time", "var", "flag", "oflag"],
)
tm.assert_frame_equal(result, expected)
def test_skiprows_infield_quote(all_parsers):
# see gh-14459
parser = all_parsers
data = 'a"\nb"\na\n1'
expected = DataFrame({"a": [1]})
result = parser.read_csv(StringIO(data), skiprows=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
({}, DataFrame({"1": [3, 5]})),
({"header": 0, "names": ["foo"]}, DataFrame({"foo": [3, 5]})),
],
)
def test_skip_rows_callable(all_parsers, kwargs, expected):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
result = parser.read_csv(StringIO(data), skiprows=lambda x: x % 2 == 0, **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_rows_skip_all(all_parsers):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: True)
def test_skip_rows_bad_callable(all_parsers):
msg = "by zero"
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
with pytest.raises(ZeroDivisionError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015, Ekevoo.com.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
from decimal import Decimal, DecimalException
from logging import getLogger
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.http import HttpResponse, Http404, HttpRequest, JsonResponse
from django.shortcuts import render
from django.utils.decorators import classonlymethod
from django.utils.timezone import now
from django.utils.translation import ugettext
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView
from .forms import SubscriptionForm, PartialPayForm, ManualTransactionForm
from .models import Event, Subscription, SubsState, Transaction
from .notify import Notifier
from .payment.base import get_payment, get_payment_names
from .queue import cron, QueueAgent
from .utils import named, prg_redirect
log = getLogger(__name__)
BLANK_PAGE = HttpResponse()
@named('esupa-splash')
@login_required
def redirect_to_view_or_edit(request: HttpRequest, slug: str) -> HttpResponse:
try:
event = Event.objects.get(slug=slug)
except Event.DoesNotExist:
look_to_the_future = Event.objects.filter(starts_at__gt=now()).order_by('starts_at')
look_to_the_past = Event.objects.filter(starts_at__lt=now()).order_by('-starts_at')
event = look_to_the_future.first() or look_to_the_past.first()
if event:
exists = Subscription.objects.filter(event=event, user=request.user).exists()
return prg_redirect(view.name if exists else edit.name, event.slug)
else:
raise Http404(ugettext('There is no event. Create one in /admin/'))
def _get_subscription(event_slug: str, user: User) -> Subscription:
"""Takes existing subscription if available, creates a new one otherwise."""
try:
event = Event.objects.get(slug=event_slug)
except Event.DoesNotExist:
raise Http404(ugettext('Unknown event %s.') % event_slug)
kwargs = dict(event=event, user=user)
try:
subscription = Subscription.objects.get(**kwargs)
except Subscription.DoesNotExist:
subscription = Subscription(**kwargs)
if subscription.state == SubsState.DENIED:
raise PermissionDenied
return subscription
@named('esupa-view')
@login_required
def view(request: HttpRequest, slug: str) -> HttpResponse:
subscription = _get_subscription(slug, request.user)
if subscription.id:
context = {
'sub': subscription,
'event': subscription.event,
'state': SubsState(subscription.state),
'pending_trans': subscription.transaction_set.filter(document__isnull=False, ended_at__isnull=True),
'confirmed_trans': subscription.transaction_set.filter(accepted=True),
'partial_pay_form': PartialPayForm(subscription.get_owing()),
'pay_buttons': get_payment_names(),
}
if 'pay_with' in request.POST:
queue = QueueAgent(subscription)
subscription.position = queue.add()
subscription.waiting = queue.within_capacity
subscription.raise_state(SubsState.EXPECTING_PAY if queue.within_capacity else SubsState.QUEUED_FOR_PAY)
subscription.save()
if queue.within_capacity:
payment = get_payment(int(request.POST['pay_with']))(subscription)
try:
amount = Decimal(request.POST.get('amount', ''))
except DecimalException:
amount = subscription.get_owing()
return payment.start_payment(request, amount)
return render(request, 'esupa/view.html', context)
else:
return prg_redirect(edit.name, slug)
@named('esupa-edit')
@login_required
def edit(request: HttpRequest, slug: str) -> HttpResponse:
subscription = _get_subscription(slug, request.user)
if not subscription.id and subscription.user.email:
subscription.email = subscription.user.email
form = SubscriptionForm(data=request.POST or None, instance=subscription)
if request.POST and form.is_valid():
old_state = subscription.state
form.save()
s = map(str.lower, (subscription.full_name, subscription.email, subscription.document, subscription.badge))
b = tuple(map(str.lower, filter(bool, subscription.event.data_to_be_checked.splitlines())))
acceptable = True not in (t in d for d in s for t in b)
if not acceptable:
subscription.state = SubsState.VERIFYING_DATA # Lowers the state.
elif subscription.paid_any:
if subscription.get_owing() <= 0:
subscription.raise_state(SubsState.CONFIRMED)
elif subscription.state == SubsState.CONFIRMED:
subscription.state = SubsState.PARTIALLY_PAID # Lowers the state.
else:
subscription.raise_state(SubsState.ACCEPTABLE)
subscription.save()
Notifier(subscription).saved(old_state, request.build_absolute_uri)
return prg_redirect(view.name, slug)
else:
return render(request, 'esupa/edit.html', {
'form': form,
'event': subscription.event,
'subscription': subscription,
})
@named('esupa-trans-doc')
@login_required
def transaction_document(request: HttpRequest, tid) -> HttpResponse:
trans = Transaction.objects.get(id=tid)
if trans is None or not trans.document:
raise Http404(ugettext("No such document."))
if not request.user.is_staff and trans.subscription.user != request.user:
return PermissionDenied
response = HttpResponse(trans.document, content_type=trans.mimetype)
return response
@named('esupa-cron')
def cron_view(request: HttpRequest, secret) -> HttpResponse:
if request.user and request.user.is_staff:
return cron() or BLANK_PAGE
elif secret != getattr(settings, 'ESUPA_CRON_SECRET', None):
cron()
return BLANK_PAGE
else:
raise SuspiciousOperation
@named('esupa-pay')
@csrf_exempt
def paying(request: HttpRequest, code) -> HttpResponse:
resolved_view = get_payment(int(code)).class_view
return resolved_view(request) or BLANK_PAGE
@named('esupa-json-state')
def json_state(_: HttpRequest, slug: str) -> JsonResponse:
result = JsonResponse(_json_state(slug))
result['Access-Control-Allow-Origin'] = '*'
return result
def _json_state(slug: str) -> dict:
try:
event = Event.objects.get(slug=slug)
except Event.DoesNotExist:
return {'exists': False, 'slug': slug}
threshold = event.reveal_openings_under
potentially = max(0, event.capacity - event.num_confirmed)
currently = max(0, potentially - event.num_pending)
if threshold > 0:
potentially = str(threshold) + '+' if potentially > threshold else str(potentially)
currently = str(threshold) + '+' if currently > threshold else str(currently)
return {'exists': True, 'slug': slug, 'id': event.id,
'registrationOpen': event.subs_open, 'salesOpen': event.sales_open,
'potentiallyAvailable': potentially, 'currentlyAvailable': currently}
class EsupaListView(ListView):
name = ''
@classonlymethod
def as_view(cls, **initkwargs):
view_ = login_required(super().as_view(**initkwargs))
view_.name = cls.name
return view_
def dispatch(self, request, *args, **kwargs):
user = request.user
assert isinstance(user, User)
if not user.is_staff:
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
return super().get_context_data(user=self.request.user, **kwargs)
class EventList(EsupaListView):
model = Event
name = 'esupa-check-all'
class SubscriptionList(EsupaListView):
model = Subscription
name = 'esupa-check-event'
_event = None
sort_dict = {
'state': '-state',
'sid': 'id',
'pos': 'position',
}
@property
def event(self) -> Event:
if not self._event:
try:
self._event = Event.objects.get(slug=self.args[0])
except Event.DoesNotExist:
raise Http404
return self._event
def get_queryset(self):
queryset = self.event.subscription_set
sort = self.request.GET.get('sort')
if sort == 'pos':
queryset = queryset.filter(position__isnull=False)
return queryset.order_by(self.sort_dict.get(sort, '-state'))
def get_context_data(self, **kwargs):
return super().get_context_data(event=self.event, **kwargs)
class TransactionList(EsupaListView):
model = Transaction
name = 'esupa-check-docs'
_event = None
_subscription = None
@property
def event(self) -> Event:
if not self._event:
self._event = self.subscription.event
return self._event
@property
def subscription(self) -> Subscription:
if not self._subscription:
try:
self._subscription = Subscription.objects.get(id=int(self.args[0]))
except Subscription.DoesNotExist:
raise Http404
self._event = self._subscription.event
return self._subscription
def get_queryset(self):
return self.subscription.transaction_set.order_by('-id')
def get_context_data(self, **kwargs):
return super().get_context_data(
event=self.event,
sub=self.subscription,
state=SubsState(),
manual_transaction_form=ManualTransactionForm(self.subscription),
**kwargs)
def post(self, request: HttpRequest, sid: str):
if 'action' in request.POST:
tid, decision = request.POST.get('action').split()
transaction = Transaction.objects.get(id=tid, subscription_id=int(sid))
transaction.end(decision == 'yes')
transaction.verifier = request.user
else:
form = ManualTransactionForm(request.POST)
if form.is_valid():
transaction = Transaction(subscription_id=int(sid))
transaction.amount = form.cleaned_data['amount']
transaction.created_at = form.cleaned_data['when']
transaction.method = 1
if request.FILES:
transaction.mimetype = request.FILES['attachment'].content_type or 'application/octet-stream'
transaction.document = request.FILES['attachment'].read()
transaction.filled_at = transaction.created_at
transaction.verifier = request.user
transaction.notes = form.cleaned_data['notes']
transaction.end(True)
else:
return self.get(request, sid)
return prg_redirect(TransactionList.name, sid)
|
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Collection, Dict, List, Optional
from unittest.mock import Mock
from twisted.internet import defer
from synapse.api.auth import Auth
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
from synapse.events import make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.state import StateHandler, StateResolutionHandler
from tests import unittest
from .utils import MockClock, default_config
_next_event_id = 1000
def create_event(
name=None,
type=None,
state_key=None,
depth=2,
event_id=None,
prev_events: Optional[List[str]] = None,
**kwargs,
):
global _next_event_id
if not event_id:
_next_event_id += 1
event_id = "$%s:test" % (_next_event_id,)
if not name:
if state_key is not None:
name = "<%s-%s, %s>" % (type, state_key, event_id)
else:
name = "<%s, %s>" % (type, event_id)
d = {
"event_id": event_id,
"type": type,
"sender": "@user_id:example.com",
"room_id": "!room_id:example.com",
"depth": depth,
"prev_events": prev_events or [],
}
if state_key is not None:
d["state_key"] = state_key
d.update(kwargs)
event = make_event_from_dict(d)
return event
class _DummyStore:
def __init__(self):
self._event_to_state_group = {}
self._group_to_state = {}
self._event_id_to_event = {}
self._next_group = 1
async def get_state_groups_ids(self, room_id, event_ids):
groups = {}
for event_id in event_ids:
group = self._event_to_state_group.get(event_id)
if group:
groups[group] = self._group_to_state[group]
return groups
async def store_state_group(
self, event_id, room_id, prev_group, delta_ids, current_state_ids
):
state_group = self._next_group
self._next_group += 1
self._group_to_state[state_group] = dict(current_state_ids)
return state_group
async def get_events(self, event_ids, **kwargs):
return {
e_id: self._event_id_to_event[e_id]
for e_id in event_ids
if e_id in self._event_id_to_event
}
async def get_partial_state_events(
self, event_ids: Collection[str]
) -> Dict[str, bool]:
return {e: False for e in event_ids}
async def get_state_group_delta(self, name):
return None, None
def register_events(self, events):
for e in events:
self._event_id_to_event[e.event_id] = e
def register_event_context(self, event, context):
self._event_to_state_group[event.event_id] = context.state_group
def register_event_id_state_group(self, event_id, state_group):
self._event_to_state_group[event_id] = state_group
async def get_room_version_id(self, room_id):
return RoomVersions.V1.identifier
class DictObj(dict):
def __init__(self, **kwargs):
super().__init__(kwargs)
self.__dict__ = self
class Graph:
def __init__(self, nodes, edges):
events = {}
clobbered = set(events.keys())
for event_id, fields in nodes.items():
refs = edges.get(event_id)
if refs:
clobbered.difference_update(refs)
prev_events = [(r, {}) for r in refs]
else:
prev_events = []
events[event_id] = create_event(
event_id=event_id, prev_events=prev_events, **fields
)
self._leaves = clobbered
self._events = sorted(events.values(), key=lambda e: e.depth)
def walk(self):
return iter(self._events)
def get_leaves(self):
return (self._events[i] for i in self._leaves)
class StateTestCase(unittest.TestCase):
def setUp(self):
self.dummy_store = _DummyStore()
storage = Mock(main=self.dummy_store, state=self.dummy_store)
hs = Mock(
spec_set=[
"config",
"get_datastores",
"get_storage",
"get_auth",
"get_state_handler",
"get_clock",
"get_state_resolution_handler",
"get_account_validity_handler",
"hostname",
]
)
hs.config = default_config("tesths", True)
hs.get_datastores.return_value = Mock(main=self.dummy_store)
hs.get_state_handler.return_value = None
hs.get_clock.return_value = MockClock()
hs.get_auth.return_value = Auth(hs)
hs.get_state_resolution_handler = lambda: StateResolutionHandler(hs)
hs.get_storage.return_value = storage
self.state = StateHandler(hs)
self.event_id = 0
@defer.inlineCallbacks
def test_branch_no_conflict(self):
graph = Graph(
nodes={
"START": DictObj(
type=EventTypes.Create, state_key="", content={}, depth=1
),
"A": DictObj(type=EventTypes.Message, depth=2),
"B": DictObj(type=EventTypes.Message, depth=3),
"C": DictObj(type=EventTypes.Name, state_key="", depth=3),
"D": DictObj(type=EventTypes.Message, depth=4),
},
edges={"A": ["START"], "B": ["A"], "C": ["A"], "D": ["B", "C"]},
)
self.dummy_store.register_events(graph.walk())
context_store: dict[str, EventContext] = {}
for event in graph.walk():
context = yield defer.ensureDeferred(
self.state.compute_event_context(event)
)
self.dummy_store.register_event_context(event, context)
context_store[event.event_id] = context
ctx_c = context_store["C"]
ctx_d = context_store["D"]
prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids())
self.assertEqual(2, len(prev_state_ids))
self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
@defer.inlineCallbacks
def test_branch_basic_conflict(self):
graph = Graph(
nodes={
"START": DictObj(
type=EventTypes.Create,
state_key="",
content={"creator": "@user_id:example.com"},
depth=1,
),
"A": DictObj(
type=EventTypes.Member,
state_key="@user_id:example.com",
content={"membership": Membership.JOIN},
membership=Membership.JOIN,
depth=2,
),
"B": DictObj(type=EventTypes.Name, state_key="", depth=3),
"C": DictObj(type=EventTypes.Name, state_key="", depth=4),
"D": DictObj(type=EventTypes.Message, depth=5),
},
edges={"A": ["START"], "B": ["A"], "C": ["A"], "D": ["B", "C"]},
)
self.dummy_store.register_events(graph.walk())
context_store = {}
for event in graph.walk():
context = yield defer.ensureDeferred(
self.state.compute_event_context(event)
)
self.dummy_store.register_event_context(event, context)
context_store[event.event_id] = context
# C ends up winning the resolution between B and C
ctx_c = context_store["C"]
ctx_d = context_store["D"]
prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids())
self.assertSetEqual({"START", "A", "C"}, set(prev_state_ids.values()))
self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
@defer.inlineCallbacks
def test_branch_have_banned_conflict(self):
graph = Graph(
nodes={
"START": DictObj(
type=EventTypes.Create,
state_key="",
content={"creator": "@user_id:example.com"},
depth=1,
),
"A": DictObj(
type=EventTypes.Member,
state_key="@user_id:example.com",
content={"membership": Membership.JOIN},
membership=Membership.JOIN,
depth=2,
),
"B": DictObj(type=EventTypes.Name, state_key="", depth=3),
"C": DictObj(
type=EventTypes.Member,
state_key="@user_id_2:example.com",
content={"membership": Membership.BAN},
membership=Membership.BAN,
depth=4,
),
"D": DictObj(
type=EventTypes.Name,
state_key="",
depth=4,
sender="@user_id_2:example.com",
),
"E": DictObj(type=EventTypes.Message, depth=5),
},
edges={"A": ["START"], "B": ["A"], "C": ["B"], "D": ["B"], "E": ["C", "D"]},
)
self.dummy_store.register_events(graph.walk())
context_store = {}
for event in graph.walk():
context = yield defer.ensureDeferred(
self.state.compute_event_context(event)
)
self.dummy_store.register_event_context(event, context)
context_store[event.event_id] = context
# C ends up winning the resolution between C and D because bans win over other
# changes
ctx_c = context_store["C"]
ctx_e = context_store["E"]
prev_state_ids = yield defer.ensureDeferred(ctx_e.get_prev_state_ids())
self.assertSetEqual({"START", "A", "B", "C"}, set(prev_state_ids.values()))
self.assertEqual(ctx_c.state_group, ctx_e.state_group_before_event)
self.assertEqual(ctx_e.state_group_before_event, ctx_e.state_group)
@defer.inlineCallbacks
def test_branch_have_perms_conflict(self):
userid1 = "@user_id:example.com"
userid2 = "@user_id2:example.com"
nodes = {
"A1": DictObj(
type=EventTypes.Create,
state_key="",
content={"creator": userid1},
depth=1,
),
"A2": DictObj(
type=EventTypes.Member,
state_key=userid1,
content={"membership": Membership.JOIN},
membership=Membership.JOIN,
),
"A3": DictObj(
type=EventTypes.Member,
state_key=userid2,
content={"membership": Membership.JOIN},
membership=Membership.JOIN,
),
"A4": DictObj(
type=EventTypes.PowerLevels,
state_key="",
content={
"events": {"m.room.name": 50},
"users": {userid1: 100, userid2: 60},
},
),
"A5": DictObj(type=EventTypes.Name, state_key=""),
"B": DictObj(
type=EventTypes.PowerLevels,
state_key="",
content={"events": {"m.room.name": 50}, "users": {userid2: 30}},
),
"C": DictObj(type=EventTypes.Name, state_key="", sender=userid2),
"D": DictObj(type=EventTypes.Message),
}
edges = {
"A2": ["A1"],
"A3": ["A2"],
"A4": ["A3"],
"A5": ["A4"],
"B": ["A5"],
"C": ["A5"],
"D": ["B", "C"],
}
self._add_depths(nodes, edges)
graph = Graph(nodes, edges)
self.dummy_store.register_events(graph.walk())
context_store = {}
for event in graph.walk():
context = yield defer.ensureDeferred(
self.state.compute_event_context(event)
)
self.dummy_store.register_event_context(event, context)
context_store[event.event_id] = context
# B ends up winning the resolution between B and C because power levels
# win over other changes.
ctx_b = context_store["B"]
ctx_d = context_store["D"]
prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids())
self.assertSetEqual({"A1", "A2", "A3", "A5", "B"}, set(prev_state_ids.values()))
self.assertEqual(ctx_b.state_group, ctx_d.state_group_before_event)
self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
def _add_depths(self, nodes, edges):
def _get_depth(ev):
node = nodes[ev]
if "depth" not in node:
prevs = edges[ev]
depth = max(_get_depth(prev) for prev in prevs) + 1
node["depth"] = depth
return node["depth"]
for n in nodes:
_get_depth(n)
@defer.inlineCallbacks
def test_annotate_with_old_message(self):
event = create_event(type="test_message", name="event")
old_state = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
context = yield defer.ensureDeferred(
self.state.compute_event_context(event, old_state=old_state)
)
prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
self.assertCountEqual(
(e.event_id for e in old_state), current_state_ids.values()
)
self.assertIsNotNone(context.state_group_before_event)
self.assertEqual(context.state_group_before_event, context.state_group)
@defer.inlineCallbacks
def test_annotate_with_old_state(self):
event = create_event(type="state", state_key="", name="event")
old_state = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
context = yield defer.ensureDeferred(
self.state.compute_event_context(event, old_state=old_state)
)
prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
self.assertCountEqual(
(e.event_id for e in old_state + [event]), current_state_ids.values()
)
self.assertIsNotNone(context.state_group_before_event)
self.assertNotEqual(context.state_group_before_event, context.state_group)
self.assertEqual(context.state_group_before_event, context.prev_group)
self.assertEqual({("state", ""): event.event_id}, context.delta_ids)
@defer.inlineCallbacks
def test_trivial_annotate_message(self):
prev_event_id = "prev_event_id"
event = create_event(
type="test_message", name="event2", prev_events=[(prev_event_id, {})]
)
old_state = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
group_name = yield defer.ensureDeferred(
self.dummy_store.store_state_group(
prev_event_id,
event.room_id,
None,
None,
{(e.type, e.state_key): e.event_id for e in old_state},
)
)
self.dummy_store.register_event_id_state_group(prev_event_id, group_name)
context = yield defer.ensureDeferred(self.state.compute_event_context(event))
current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
self.assertEqual(
{e.event_id for e in old_state}, set(current_state_ids.values())
)
self.assertEqual(group_name, context.state_group)
@defer.inlineCallbacks
def test_trivial_annotate_state(self):
prev_event_id = "prev_event_id"
event = create_event(
type="state", state_key="", name="event2", prev_events=[(prev_event_id, {})]
)
old_state = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
group_name = yield defer.ensureDeferred(
self.dummy_store.store_state_group(
prev_event_id,
event.room_id,
None,
None,
{(e.type, e.state_key): e.event_id for e in old_state},
)
)
self.dummy_store.register_event_id_state_group(prev_event_id, group_name)
context = yield defer.ensureDeferred(self.state.compute_event_context(event))
prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
self.assertEqual({e.event_id for e in old_state}, set(prev_state_ids.values()))
self.assertIsNotNone(context.state_group)
@defer.inlineCallbacks
def test_resolve_message_conflict(self):
prev_event_id1 = "event_id1"
prev_event_id2 = "event_id2"
event = create_event(
type="test_message",
name="event3",
prev_events=[(prev_event_id1, {}), (prev_event_id2, {})],
)
creation = create_event(type=EventTypes.Create, state_key="")
old_state_1 = [
creation,
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
old_state_2 = [
creation,
create_event(type="test1", state_key="1"),
create_event(type="test3", state_key="2"),
create_event(type="test4", state_key=""),
]
self.dummy_store.register_events(old_state_1)
self.dummy_store.register_events(old_state_2)
context = yield self._get_context(
event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
)
current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
self.assertEqual(len(current_state_ids), 6)
self.assertIsNotNone(context.state_group)
@defer.inlineCallbacks
def test_resolve_state_conflict(self):
prev_event_id1 = "event_id1"
prev_event_id2 = "event_id2"
event = create_event(
type="test4",
state_key="",
name="event",
prev_events=[(prev_event_id1, {}), (prev_event_id2, {})],
)
creation = create_event(type=EventTypes.Create, state_key="")
old_state_1 = [
creation,
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
old_state_2 = [
creation,
create_event(type="test1", state_key="1"),
create_event(type="test3", state_key="2"),
create_event(type="test4", state_key=""),
]
store = _DummyStore()
store.register_events(old_state_1)
store.register_events(old_state_2)
self.dummy_store.get_events = store.get_events
context = yield self._get_context(
event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
)
current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
self.assertEqual(len(current_state_ids), 6)
self.assertIsNotNone(context.state_group)
@defer.inlineCallbacks
def test_standard_depth_conflict(self):
prev_event_id1 = "event_id1"
prev_event_id2 = "event_id2"
event = create_event(
type="test4",
name="event",
prev_events=[(prev_event_id1, {}), (prev_event_id2, {})],
)
member_event = create_event(
type=EventTypes.Member,
state_key="@user_id:example.com",
content={"membership": Membership.JOIN},
)
power_levels = create_event(
type=EventTypes.PowerLevels,
state_key="",
content={"users": {"@foo:bar": "100", "@user_id:example.com": "100"}},
)
creation = create_event(
type=EventTypes.Create, state_key="", content={"creator": "@foo:bar"}
)
old_state_1 = [
creation,
power_levels,
member_event,
create_event(type="test1", state_key="1", depth=1),
]
old_state_2 = [
creation,
power_levels,
member_event,
create_event(type="test1", state_key="1", depth=2),
]
store = _DummyStore()
store.register_events(old_state_1)
store.register_events(old_state_2)
self.dummy_store.get_events = store.get_events
context = yield self._get_context(
event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
)
current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
self.assertEqual(old_state_2[3].event_id, current_state_ids[("test1", "1")])
# Reverse the depth to make sure we are actually using the depths
# during state resolution.
old_state_1 = [
creation,
power_levels,
member_event,
create_event(type="test1", state_key="1", depth=2),
]
old_state_2 = [
creation,
power_levels,
member_event,
create_event(type="test1", state_key="1", depth=1),
]
store.register_events(old_state_1)
store.register_events(old_state_2)
context = yield self._get_context(
event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
)
current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
self.assertEqual(old_state_1[3].event_id, current_state_ids[("test1", "1")])
@defer.inlineCallbacks
def _get_context(
self, event, prev_event_id_1, old_state_1, prev_event_id_2, old_state_2
):
sg1 = yield defer.ensureDeferred(
self.dummy_store.store_state_group(
prev_event_id_1,
event.room_id,
None,
None,
{(e.type, e.state_key): e.event_id for e in old_state_1},
)
)
self.dummy_store.register_event_id_state_group(prev_event_id_1, sg1)
sg2 = yield defer.ensureDeferred(
self.dummy_store.store_state_group(
prev_event_id_2,
event.room_id,
None,
None,
{(e.type, e.state_key): e.event_id for e in old_state_2},
)
)
self.dummy_store.register_event_id_state_group(prev_event_id_2, sg2)
result = yield defer.ensureDeferred(self.state.compute_event_context(event))
return result
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""State management for eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import random
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_starting_device_spec = pydev.DeviceSpec.from_string("")
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data = {}
class FunctionCallOptions(object):
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or
a serialized string of that proto.
The config used by Grappler when optimizing the function graph.
Each concrete function is optimized the first time is called. Changing
config_proto after the first call has no effect.
If config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString()
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString())
else:
raise ValueError("the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config)))
class _ThreadLocalData(threading.local):
"""Thread local storage for the eager context."""
def __init__(self):
super(_ThreadLocalData, self).__init__()
self.device_spec = _starting_device_spec
self.device_name = ""
self.mode = default_execution_mode
self.is_eager = default_execution_mode == EAGER_MODE
self.scope_name = ""
self.summary_writer = None
self.summary_recording = None
self.summary_recording_distribution_strategy = True
self.summary_step = None
self.scalar_cache = {}
self._ones_rank_cache = None
self._zeros_cache = None
self.execution_mode = SYNC
self.function_call_options = None
@property
def ones_rank_cache(self):
if not self._ones_rank_cache:
self._ones_rank_cache = _EagerTensorCache()
return self._ones_rank_cache
@property
def zeros_cache(self):
if not self._zeros_cache:
self._zeros_cache = _EagerTensorCache()
return self._zeros_cache
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn",
"device_stack"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode,
device_stack=None)
def push(self, is_building_function, enter_context_fn, device_stack):
"""Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this
graph. When breaking out of graphs in init_scope, the innermost nonempty
device stack is used. Eager contexts put `None` here and the value is
never used.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn, device_stack))
def pop(self):
"""Pop the stack."""
self.stack.pop()
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to DEVICE_PLACEMENT_SILENT.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
self._config = config
self._thread_local_data = _ThreadLocalData()
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._post_execution_callbacks = []
self._seed = None
self._initialize_lock = threading.Lock()
if device_policy is None:
device_policy = DEVICE_PLACEMENT_SILENT
self._device_policy = device_policy
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._execution_mode = execution_mode
self._server_def = server_def
self._collective_ops_server_def = None
# Values set after construction
self._gpu_per_process_memory_fraction = None
self._gpu_per_process_memory_growth = None
self._optimizer_jit = None
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
self._soft_device_placement = None
self._log_device_placement = None
self._optimizer_experimental_options = {}
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
self._rng = random.Random(self._seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_devices(self):
"""Helper to initialize devices."""
# Store list of devices
self._context_devices = []
device_list = pywrap_tensorflow.TFE_ContextListDevices(
self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)):
dev_name = pywrap_tensorflow.TF_DeviceListName(device_list, i)
self._context_devices.append(pydev.canonical_name(dev_name))
dev_type = pywrap_tensorflow.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
pywrap_tensorflow.TF_DeleteDeviceList(device_list)
def _initialize_handle_and_devices(self):
"""Initialize handle and devices."""
with self._initialize_lock:
if self._context_handle is not None:
return
assert self._context_devices is None
opts = pywrap_tensorflow.TFE_NewContextOptions()
try:
config_str = self.config.SerializeToString()
pywrap_tensorflow.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._execution_mode == ASYNC:
pywrap_tensorflow.TFE_ContextOptionsSetAsync(opts, True)
self._context_handle = pywrap_tensorflow.TFE_NewContext(opts)
finally:
pywrap_tensorflow.TFE_DeleteContextOptions(opts)
assert not (self._server_def and self._collective_ops_server_def), (
"Cannot enable remote execution as well as collective ops at the "
"moment. If this is important to you, please file an issue.")
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle, 600,
server_def_str)
elif self._collective_ops_server_def is not None:
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tensorflow.TFE_EnableCollectiveOps(self._context_handle,
server_def_str)
self._initialize_devices()
def _clear_caches(self):
self.scalar_cache().clear()
self.ones_rank_cache().flush()
self.zeros_cache().flush()
def set_server_def(self, server_def, keep_alive_secs=600):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
if not self._context_handle:
self._server_def = server_def
else:
server_def_str = server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle,
keep_alive_secs, server_def_str)
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
self._initialize_devices()
def enable_collective_ops(self, server_def):
"""Enable collective ops with an appropriate server_def.
If previously enabled, this cannot be re-enabled.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
if not self._context_handle:
self._collective_ops_server_def = server_def
else:
server_def_str = server_def.SerializeToString()
pywrap_tensorflow.TFE_EnableCollectiveOps(self._context_handle,
server_def_str)
self._clear_caches()
self._initialize_devices()
@property
def _handle(self):
ctx = self._context_handle
if ctx is None:
self._initialize_handle_and_devices()
return self._context_handle
else:
return ctx
@property
def _devices(self):
devices = self._context_devices
if devices is None:
self._initialize_handle_and_devices()
return self._context_devices
else:
return devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._thread_local_data
old_mode = ctx.mode
old_is_eager = ctx.is_eager
ctx.mode = mode
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode, None)
try:
yield
finally:
ctx.is_eager = old_is_eager
ctx.mode = old_mode
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._thread_local_data.is_eager
def scalar_cache(self):
"""Per-device cache for scalars."""
return self._thread_local_data.scalar_cache
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return self._thread_local_data.ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return self._thread_local_data.zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._thread_local_data.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._thread_local_data.scope_name = s
@property
def summary_writer(self):
"""Returns default summary writer for the current thread."""
return self._thread_local_data.summary_writer
@summary_writer.setter
def summary_writer(self, writer):
"""Sets default summary writer for the current thread."""
self._thread_local_data.summary_writer = writer
@property
def summary_recording(self):
"""Returns summary recording condition."""
return self._thread_local_data.summary_recording
@summary_recording.setter
def summary_recording(self, condition):
"""Sets summary recording condition."""
self._thread_local_data.summary_recording = condition
@property
def summary_recording_distribution_strategy(self):
"""Returns summary recording condition for distribution strategy."""
return self._thread_local_data.summary_recording_distribution_strategy
@summary_recording_distribution_strategy.setter
def summary_recording_distribution_strategy(self, condition):
"""Sets summary recording condition for distribution strategy."""
self._thread_local_data.summary_recording_distribution_strategy = condition
@property
def summary_step(self):
"""Returns summary step variable."""
return self._thread_local_data.summary_step
@summary_step.setter
def summary_step(self, step):
"""Sets summary step variable."""
self._thread_local_data.summary_step = step
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._thread_local_data.device_spec
def _set_device(self, device_name, device_spec):
self._thread_local_data.device_name = device_name
self._thread_local_data.device_spec = device_spec
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Returns:
Context manager that forces device placement.
Raises:
ValueError: If name is not a string or is an invalid device name.
RuntimeError: If device scopes are not properly nested.
"""
return _EagerDeviceContext(self, name)
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
@property
def execution_mode(self):
"""Gets execution mode for current thread."""
# Only get the execution mode from the context if it has already been
# initialized
if self._context_handle is None:
return self._execution_mode
mode = self._thread_local_data.execution_mode
if mode is None:
mode = self._execution_mode
return mode
@execution_mode.setter
def execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
if self._thread_local_data.execution_mode != mode:
self._thread_local_data.execution_mode = mode
# Only set the execution mode if the context has already been initialized
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextSetAsyncForThread(self._context_handle,
mode == ASYNC)
else:
self._execution_mode = mode
@property
def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._gpu_per_process_memory_fraction is not None:
config.gpu_options.per_process_gpu_memory_fraction = (
self._gpu_per_process_memory_fraction)
if self._gpu_per_process_memory_growth is not None:
config.gpu_options.allow_growth = self._gpu_per_process_memory_growth
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
return config
@property
def function_call_options(self):
"""Returns function call options for current thread.
Note that the returned object is still referenced by the eager context.
Returns: the FunctionCallOptions for current thread.
"""
if self._thread_local_data.function_call_options is None:
config = self.config
# Default to soft placement for functions unless specified
if self._soft_device_placement is None:
config.allow_soft_placement = True
self._thread_local_data.function_call_options = FunctionCallOptions(
config_proto=config)
return self._thread_local_data.function_call_options
@function_call_options.setter
def function_call_options(self, options):
"""Returns function call options for current thread."""
self._thread_local_data.function_call_options = options
def async_wait(self):
"""Waits for ops dispatched in ASYNC mode to finish."""
pywrap_tensorflow.TFE_ContextAsyncWait(self._handle)
def async_clear_error(self):
"""Clears errors raised during ASYNC execution."""
pywrap_tensorflow.TFE_ContextAsyncClearError(self._handle)
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self._initialize_handle_and_devices()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
pywrap_tensorflow.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
fdef_string = fdef.SerializeToString()
pywrap_tensorflow.TFE_ContextAddFunctionDef(
self._handle, fdef_string, len(fdef_string))
def has_function(self, name):
"""Check if a function `name` is registered."""
return bool(pywrap_tensorflow.TFE_ContextHasFunction(self._handle, name))
def add_post_execution_callback(self, callback):
"""Add a post-execution callback to the context.
A post-execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added.
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that has was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute names and attribute values.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
# TODO(cais): (b/64674139) Allow access to function-internal operations.
self._post_execution_callbacks.append(callback)
def clear_post_execution_callbacks(self):
"""Clear all post-execution callbacks added to the context."""
del self._post_execution_callbacks[:]
@property
def post_execution_callbacks(self):
"""Get the list of post-execution callbacks added to the context."""
return self._post_execution_callbacks
@property
def gpu_per_process_memory_fraction(self):
return self.config.gpu_options.per_process_gpu_memory_fraction
@gpu_per_process_memory_fraction.setter
def gpu_per_process_memory_fraction(self, fraction):
if self._context_handle is not None:
raise RuntimeError(
"GPU options must be set at program startup")
self._gpu_per_process_memory_fraction = fraction
@property
def gpu_per_process_memory_growth(self):
return self.config.gpu_options.allow_growth
@gpu_per_process_memory_growth.setter
def gpu_per_process_memory_growth(self, enabled):
if self._context_handle is not None:
raise RuntimeError(
"GPU options must be set at program startup")
self._gpu_per_process_memory_growth = enabled
@property
def optimizer_jit(self):
level = self.config.graph_options.optimizer_options.global_jit_level
return (level == config_pb2.OptimizerOptions.ON_1 or
level == config_pb2.OptimizerOptions.ON_2)
@optimizer_jit.setter
def optimizer_jit(self, enabled):
self._optimizer_jit = enabled
self._thread_local_data.function_call_options = None
def get_optimizer_experimental_options(self):
"""Get experimental options for the optimizer.
Returns:
Dictionary of current option values
"""
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_bool("disable_meta_optimizer")
if rewrite_options.min_graph_nodes != 0:
options["min_graph_nodes"] = rewrite_options.min_graph_nodes
return options
def set_optimizer_experimental_options(self, options):
"""Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
"""
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
@property
def intra_op_parallelism_threads(self):
return self.config.intra_op_parallelism_threads
@intra_op_parallelism_threads.setter
def intra_op_parallelism_threads(self, num_threads):
if self._context_handle is not None:
raise RuntimeError(
"Intra op parallelism must be set at program startup")
self._intra_op_parallelism_threads = num_threads
@property
def inter_op_parallelism_threads(self):
return self.config.inter_op_parallelism_threads
@inter_op_parallelism_threads.setter
def inter_op_parallelism_threads(self, num_threads):
if self._context_handle is not None:
raise RuntimeError(
"Inter op parallelism must be set at program startup")
self._inter_op_parallelism_threads = num_threads
@property
def soft_device_placement(self):
return self.config.allow_soft_placement
@soft_device_placement.setter
def soft_device_placement(self, enabled):
self._soft_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def log_device_placement(self):
return self.config.log_device_placement
@log_device_placement.setter
def log_device_placement(self, enabled):
if self._context_handle is not None:
raise RuntimeError(
"Device placement logging must be set at program startup")
self._log_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def device_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(self._handle)
return self._device_policy
@device_policy.setter
def device_policy(self, policy):
if policy is None:
policy = DEVICE_PLACEMENT_SILENT
if self._device_policy != policy:
self._device_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, self._device_policy)
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._handle)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle)
def enable_graph_collection(self):
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
pywrap_tensorflow.TFE_ContextEnableGraphCollection(self._handle)
def disable_graph_collection(self):
"""Disables graph collections of executed functions."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableGraphCollection(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_ContextExportRunMetadata(
self._context_handle, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
def start_step(self):
pywrap_tensorflow.TFE_ContextStartStep(self._handle)
def end_step(self):
pywrap_tensorflow.TFE_ContextEndStep(self._handle)
_context = None
_context_lock = threading.Lock()
class _EagerDeviceContext(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
except KeyError:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, str):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx._initialize_handle_and_devices() # pylint: disable=protected-access
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]) # pylint: disable=protected-access
new_device_spec.merge_from(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError(
"Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
def _initialize_context():
global _context
with _context_lock:
if _context is None:
_context = Context()
def context():
"""Returns a singleton context object."""
if _context is None:
_initialize_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly")
def executing_eagerly():
"""Returns True if the current thread has eager execution enabled.
Eager execution is typically enabled via `tf.enable_eager_execution`,
but may also be enabled within the context of a Python function via
tf.contrib.eager.py_func.
"""
if context_safe() is None:
return default_execution_mode == EAGER_MODE
return context().executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def shared_name(name=None):
"""Returns the anonymous shared name GUID if no shared name is specified.
In eager mode we need to use a unique shared name to avoid spurious sharing
issues. The runtime generates a unique name on our behalf when the reserved
GUID is used as a shared name.
Args:
name: Optional shared name
Returns:
Eager compatible shared name.
"""
if name or not executing_eagerly():
return name
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
# TODO(agarwal): get rid of this and use ops.name_scope instead.
@contextlib.contextmanager
def namescope(name):
"""ContextManager for creating hierarchical name scopes."""
ctx = context()
old_name = ctx.scope_name
ctx.scope_name = "%s/%s" % (old_name, name) if old_name else name
try:
yield
finally:
ctx.scope_name = old_name
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tfe.device('gpu:0'):
with tfe.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
return context().device(name)
@tf_export("config.experimental_list_devices")
def list_devices():
"""List the names of the available devices.
Returns:
Names of the available devices, as a `list`.
"""
return context().devices()
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Set if device placements should be logged.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
ctx = context()
old_mode = ctx.execution_mode
try:
ctx.execution_mode = mode
yield
finally:
ctx.execution_mode = old_mode
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def async_wait():
"""Waits for ops dispatched in ASYNC mode to finish."""
return context().async_wait()
def async_clear_error():
"""Clears errors raised during ASYNC execution mode."""
return context().async_clear_error()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables tracing of op execution via RunMetadata."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
def set_server_def(server_def):
context().set_server_def(server_def)
def add_function(fdef):
"""Add a function definition to the context."""
context().add_function(fdef)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
|
|
import os
import re
import sys
from hq.hquery.syntax_error import HquerySyntaxError
from pytest import raises
sys.path.insert(0, os.path.abspath('../..'))
from ..common_test_util import expected_result
from test.hquery.hquery_test_util import query_html_doc
def test_explicit_child_axis():
html_body = """
<div>
<p>foo</p>
</div>"""
assert query_html_doc(html_body, '//div/child::p') == expected_result("""
<p>
foo
</p>""")
def test_child_axis_selects_only_immediate_children():
html_body = """
<p>uncle</p>
<div>
<p>niece</p>
<p>nephew</p>
</div>"""
assert query_html_doc(html_body, '/html/body/child::p') == expected_result("""
<p>
uncle
</p>""")
def test_descendant_axis_selects_from_descendants_not_ancestors():
html_body = """
<div id="grandma">
<section>
<div>uncle</div>
<aside>
<div>niece</div>
</aside>
</section>
</div>"""
actual = query_html_doc(html_body, '/html/body/div/descendant::div')
assert actual == expected_result("""
<div>
uncle
</div>
<div>
niece
</div>""")
def test_descendant_axis_returns_all_descendants_and_only_descendants_of_nodes_matching_node_test():
html_body = """
<div>
<div>
<div>selected</div>
</div>
</div>
<!-- comment -->
<div>not selected</div>
<p>not selected</p>"""
expected = expected_result("""
<div>
<div>
selected
</div>
</div>
<div>
selected
</div>""")
assert query_html_doc(html_body, '/html/body/div/descendant::div') == expected
assert query_html_doc(html_body, '/html/body/div/~::div') == expected
def test_descendant_or_self_axis_returns_all_descendants_and_context_node_if_it_matches_node_test():
html_body = """
<div>
<div>foo</div>
</div>
<div>bar</div>"""
assert query_html_doc(html_body, '/html/body/descendant-or-self::div') == expected_result("""
<div>
<div>
foo
</div>
</div>
<div>
foo
</div>
<div>
bar
</div>""")
def test_descendant_or_self_axis_does_not_produce_self_if_node_test_does_not_match():
html_body = """
<div>
<p>foo</p>
</div>"""
assert query_html_doc(html_body, '//div/descendant-or-self::p') == expected_result("""
<p>
foo
</p>""")
def test_parent_axis_returns_parent_of_tag_node():
assert query_html_doc('<div></div>', '//div/parent::*') == expected_result("""
<body>
<div>
</div>
</body>""")
def test_parent_axis_selects_only_the_immediate_parent():
html_body = """
<div id="grandma">
<div id="mom">
<p>daughter</p>
</div>
</div>"""
actual = query_html_doc(html_body, '//p/parent::div')
assert actual == expected_result("""
<div id="mom">
<p>
daughter
</p>
</div>""")
def test_parent_axis_returns_parents_for_multiple_matching_nodes():
html_body = """
<div id="first">
<p>
</p>
</div>
<div id="second">
<p>
</p>
</div>"""
assert query_html_doc(html_body, '//p/parent::*') == expected_result(html_body)
def test_parent_axis_produces_nothing_for_root_element():
assert query_html_doc('', '/html/parent::*') == expected_result('')
assert query_html_doc('<div></div>', 'div/parent::*', wrap_body=False) == expected_result('')
def test_ancestor_axis_selects_all_matching_ancestors():
html_body = """
<div>
<section>
<div>
<p>text</p>
</div>
</section>
</div>"""
expected = expected_result("""
<div>
<section>
<div>
<p>
text
</p>
</div>
</section>
</div>
<div>
<p>
text
</p>
</div>""")
assert query_html_doc(html_body, '//p/ancestor::div') == expected
assert query_html_doc(html_body, '//p/^::div') == expected
def test_ancestor_axis_produces_all_ancestors_and_only_ancestors():
html_body = """
<html>
<body>
<!-- comment -->
<h1></h1>
<div></div>
</body>
</html>"""
assert query_html_doc(html_body, '//div/ancestor::*', wrap_body=False) == expected_result("""
<html>
<body>
<!-- comment -->
<h1>
</h1>
<div>
</div>
</body>
</html>
<body>
<!-- comment -->
<h1>
</h1>
<div>
</div>
</body>""")
def test_ancestor_or_self_axis_produces_ancestors_and_self_when_node_test_is_a_match():
html_body = """
<div>
<div>foo</div>
</div>"""
expected = expected_result("""
<div>
<div>
foo
</div>
</div>
<div>
foo
</div>""")
assert query_html_doc(html_body, '/html/body/div/div/ancestor-or-self::div') == expected
assert query_html_doc(html_body, '/html/body/div/div/^^::div') == expected
def test_following_sibling_axis_selects_all_following_siblings_and_only_following_siblings_that_match_name_test():
html_body = """
<section>
<div></div>
<h1></h1>
<p>moe</p>
</section>
<section>
<p>larry<p>
<div></div>
<p>curly</p>
</section>"""
expected = expected_result("""
<p>
moe
</p>
<p>
curly
</p>""")
assert query_html_doc(html_body, '//div/following-sibling::p') == expected
assert query_html_doc(html_body, '//div/>::p') == expected
def test_following_sibling_axis_works_with_node_test():
html_body = """
<div>
foo
<p></p>
bar
</div>"""
assert query_html_doc(html_body, '//p/following-sibling::text()') == expected_result('bar')
assert query_html_doc('<h1></h1><div></div><p>foo</p>', '//div/following-sibling::*') == expected_result("""
<p>
foo
</p>""")
def test_preceding_sibling_axis_works_with_name_test():
html_body = """
<p>foo</p>
<div></div>
<p>bar</p>"""
expected = expected_result("""
<p>
foo
</p>""")
assert query_html_doc(html_body, '//div/preceding-sibling::p') == expected
assert query_html_doc(html_body, '//div/<::p') == expected
def test_preceding_sibling_axis_works_with_node_test():
html_body = """
<p>foo</p>
<p>bar</p>
<div></div>
<p>nothing</p>"""
assert query_html_doc(html_body, '//div/preceding-sibling::node()') == expected_result("""
<p>
foo
</p>
<p>
bar
</p>""")
def test_preceding_sibling_axis_returns_nodes_in_document_order():
"""Node sets are unordered, but people really seem to like these being in document order."""
html_body = """
<p>foo</p>
<p>bar</p>
<div></div>"""
assert query_html_doc(html_body, '//div/preceding-sibling::p') == expected_result("""
<p>
foo
</p>
<p>
bar
</p>""")
def test_following_axis_finds_all_following_nodes_that_match():
html_body = """
<section>
<p>moe</p>
<aside>
<p>larry</p>
</aside>
<div>
<p>curly</p>
</div>
</section>
<p>shemp</p>"""
expected = expected_result("""
<p>
curly
</p>
<p>
shemp
</p>""")
assert query_html_doc(html_body, '//aside/following::p') == expected
assert query_html_doc(html_body, '//aside/>>::p') == expected
def test_preceding_axis_finds_all_preceding_nodes_that_match_node_test():
html_body = """
foo
<div>
<p>bar</p>
</div>
<span></span>"""
actual = query_html_doc(html_body, '//span/preceding::text()')
actual = re.sub(r'\W+', ' ', actual)
assert actual == 'foo bar'
def test_preceding_axis_finds_all_preceding_nodes_that_match():
html_body = """
<p>moe</p>
<section>
<div>
<p>larry</p>
</div>
<aside>
<p>curly</p>
</aside>
<p>shemp</p>
</section>"""
expected = expected_result("""
<p>
moe
</p>
<p>
larry
</p>""")
assert query_html_doc(html_body, '//aside/preceding::p') == expected
assert query_html_doc(html_body, '//aside/<<::p') == expected
def test_preceding_axis_produces_results_in_document_order_and_also_works_with_node_test():
html_body = """
<p>moe</p>
<section>
<div>
<div>
<p>larry</p>
</div>
</div>
<aside>
<p>curly</p>
</aside>
<p>shemp</p>
</section>
<script></script>"""
assert query_html_doc(html_body, '//script/preceding::p/text()') == expected_result("""
moe
larry
curly
shemp""")
def test_attribute_axis_in_full_and_abbreviated_form_selects_named_attributes_or_all_attributes():
html_body = """
<div id="one"></div>
<div id="two" class="three"></div>"""
expected_ids_result = expected_result('''
id="one"
id="two"''')
expected_all_result = expected_result('''
id="one"
class="three"
id="two"''')
assert query_html_doc(html_body, '//div/attribute::id') == expected_ids_result
assert query_html_doc(html_body, '//div/@id') == expected_ids_result
assert query_html_doc(html_body, '//attribute::*') == expected_all_result
assert query_html_doc(html_body, '//@*') == expected_all_result
def test_attribute_axis_matching_any_attribute_produces_attributes_from_each_element_in_alphabetical_order():
html_body = """
<span moe="3" LARRY="2" curly="1"></span>
<span BBB="5" aaa="4" ccc="6"></span>"""
actual = query_html_doc(html_body, '//span/@*')
assert re.sub(r'\w+="(\d)"\n?', r'\1', actual) == '123456'
def test_self_axis_applies_only_to_self():
html_body = """
<div id="not selected">
<div id="selected">
<div></div>
</div>
</div>"""
assert query_html_doc(html_body, '/html/body/div/div/self::div') == expected_result("""
<div id="selected">
<div>
</div>
</div>""")
def test_css_class_axis_finds_elements_based_on_their_css_classes():
html_body = """
<p class="foo">foo</p>
<p class="foo bar">foo bar</p>
<p class="bar">bar</p>"""
expected = expected_result("""
<p class="foo bar">
foo bar
</p>
<p class="bar">
bar
</p>""")
assert query_html_doc(html_body, '//class::bar') == expected
assert query_html_doc(html_body, '//.::bar') == expected
def test_css_class_axis_can_only_be_followed_by_name_test():
with raises(HquerySyntaxError):
assert query_html_doc('', '/.::node()')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return int(new_v)
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, multiplier_func=depth_multiplier, **params):
multiplier = params.pop('multiplier_transform', multiplier_func)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Arguments:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
# pylint: disable=g-backslash-continuation
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1 and kernel size is not [1, 1].
if layer_rate > 1:
if tuple(params.get('kernel_size', [])) != (1, 1):
# We will apply atrous rate in the following cases:
# 1) When kernel_size is not in params, the operation then uses
# default kernel size 3x3.
# 2) When kernel_size is in params, and if the kernel_size is not
# equal to (1, 1) (there is no need to apply atrous convolution to
# any 1x1 convolution).
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
use_reduce_mean_for_pooling=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
use_reduce_mean_for_pooling: if True use the reduce_mean for pooling. If
True use the global_pool function that provides some optimization.
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net, use_reduce_mean_for_pooling)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor,
use_reduce_mean_for_pooling=False,
pool_op=tf.nn.avg_pool2d):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
use_reduce_mean_for_pooling: if True use reduce_mean for pooling
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
if use_reduce_mean_for_pooling:
return tf.reduce_mean(
input_tensor, [1, 2], keepdims=True, name='ReduceMean')
else:
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(value=[
1,
tf.shape(input=input_tensor)[1],
tf.shape(input=input_tensor)[2], 1
])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(
stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
|
|
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to SciPy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import warnings
# SciPy imports.
from scipy import linalg, special
from scipy.special import logsumexp
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
from . import mvn
from ._stats import gaussian_kernel_estimate
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
output_dtype = np.common_type(self.covariance, points)
itemsize = np.dtype(output_dtype).itemsize
if itemsize == 4:
spec = 'float'
elif itemsize == 8:
spec = 'double'
elif itemsize in (12, 16):
spec = 'long double'
else:
raise TypeError('%s has unexpected item size %d' %
(output_dtype, itemsize))
result = gaussian_kernel_estimate[spec](self.dataset.T, self.weights[:, None],
points.T, self.inv_cov, output_dtype)
return result[:, 0]
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun_weighted(low_bounds, high_bounds,
self.dataset, self.weights,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
This parameter defines the object to use for drawing random
variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
Specify `seed` for reproducible drawing of random variates.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
if m >= self.n:
# there are more points than data, so loop over data
energy = zeros((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff, axis=0) / 2.0
result = logsumexp(-energy.T,
b=self.weights / self._norm_factor, axis=1)
else:
# loop over points
result = zeros((m,), dtype=float)
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = logsumexp(-energy, b=self.weights /
self._norm_factor)
return result
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
from eventlet.green import threading
from eventlet.green import time
from eventlet import greenpool
from eventlet import semaphore
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Context(context.RequestContext):
def __init__(self,
user_id=None,
tenant_id=None,
auth_token=None,
service_catalog=None,
username=None,
tenant_name=None,
roles=None,
is_admin=None,
remote_semaphore=None,
auth_uri=None,
resource_uuid=None,
current_instance_info=None,
request_id=None,
overwrite=True,
**kwargs):
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: '
'{args}').format(args=kwargs))
super(Context, self).__init__(auth_token=auth_token,
user=user_id,
tenant=tenant_id,
is_admin=is_admin,
resource_uuid=resource_uuid,
request_id=request_id)
self.service_catalog = service_catalog
self.username = username
self.tenant_name = tenant_name
self.remote_semaphore = remote_semaphore or semaphore.Semaphore(
CONF.cluster_remote_threshold)
self.roles = roles
if auth_uri:
self.auth_uri = auth_uri
else:
self.auth_uri = _get_auth_uri()
if overwrite or not hasattr(context._request_store, 'context'):
self.update_store()
if current_instance_info is not None:
self.current_instance_info = current_instance_info
else:
self.current_instance_info = InstanceInfo()
def clone(self):
return Context(
self.user_id,
self.tenant_id,
self.auth_token,
self.service_catalog,
self.username,
self.tenant_name,
self.roles,
self.is_admin,
self.remote_semaphore,
self.auth_uri,
self.resource_uuid,
self.current_instance_info,
self.request_id,
overwrite=False)
def to_dict(self):
return {
'user_id': self.user_id,
'tenant_id': self.tenant_id,
'auth_token': self.auth_token,
'service_catalog': self.service_catalog,
'username': self.username,
'tenant_name': self.tenant_name,
'is_admin': self.is_admin,
'roles': self.roles,
'auth_uri': self.auth_uri,
'resource_uuid': self.resource_uuid,
'request_id': self.request_id,
}
def is_auth_capable(self):
return (self.service_catalog and self.auth_token and self.tenant and
self.user_id)
# NOTE(adrienverge): The Context class uses the 'user' and 'tenant'
# properties internally (inherited from oslo.context), but Sahara code
# often uses 'user_id' and 'tenant_id'.
@property
def user_id(self):
return self.user
@user_id.setter
def user_id(self, value):
self.user = value
@property
def tenant_id(self):
return self.tenant
@tenant_id.setter
def tenant_id(self, value):
self.tenant = value
def get_admin_context():
return Context(is_admin=True)
_CTX_STORE = threading.local()
_CTX_KEY = 'current_ctx'
def has_ctx():
return hasattr(_CTX_STORE, _CTX_KEY)
def ctx():
if not has_ctx():
raise ex.IncorrectStateError(_("Context isn't available here"))
return getattr(_CTX_STORE, _CTX_KEY)
def current():
return ctx()
def set_ctx(new_ctx):
if not new_ctx and has_ctx():
delattr(_CTX_STORE, _CTX_KEY)
if hasattr(context._request_store, 'context'):
delattr(context._request_store, 'context')
if new_ctx:
setattr(_CTX_STORE, _CTX_KEY, new_ctx)
setattr(context._request_store, 'context', new_ctx)
def _get_auth_uri():
if CONF.keystone_authtoken.auth_uri is not None:
auth_uri = CONF.keystone_authtoken.auth_uri
else:
if CONF.keystone_authtoken.identity_uri is not None:
identity_uri = CONF.keystone_authtoken.identity_uri
else:
host = CONF.keystone_authtoken.auth_host
port = CONF.keystone_authtoken.auth_port
protocol = CONF.keystone_authtoken.auth_protocol
identity_uri = '%s://%s:%s' % (protocol, host, port)
if CONF.use_identity_api_v3 is False:
auth_version = 'v2.0'
else:
auth_version = 'v3'
auth_uri = '%s/%s' % (identity_uri, auth_version)
return auth_uri
def _wrapper(ctx, thread_description, thread_group, func, *args, **kwargs):
try:
set_ctx(ctx)
func(*args, **kwargs)
except BaseException as e:
LOG.debug(
"Thread {thread} failed with exception: {exception}".format(
thread=thread_description, exception=e))
if thread_group and not thread_group.exc:
thread_group.exc = e
thread_group.failed_thread = thread_description
finally:
if thread_group:
thread_group._on_thread_exit()
set_ctx(None)
def spawn(thread_description, func, *args, **kwargs):
eventlet.spawn(_wrapper, current().clone(), thread_description,
None, func, *args, **kwargs)
class ThreadGroup(object):
"""ThreadGroup object.
It is advised to use TreadGroup as a context manager instead
of instantiating and calling _wait() manually. The __exit__()
guaranties to exit only after all child threads are done, even if
spawning code have thrown an exception
"""
def __init__(self, thread_pool_size=1000):
self.tg = greenpool.GreenPool(size=thread_pool_size)
self.exc = None
self.failed_thread = None
self.threads = 0
self.cv = threading.Condition()
def spawn(self, thread_description, func, *args, **kwargs):
self.tg.spawn(_wrapper, current().clone(), thread_description,
self, func, *args, **kwargs)
with self.cv:
self.threads += 1
def _on_thread_exit(self):
with self.cv:
self.threads -= 1
if self.threads == 0:
self.cv.notifyAll()
# NOTE(dmitryme): A little rationale on why we reimplemented wait():
# * Eventlet's GreenPool.wait() can hung
# * Oslo's ThreadGroup.wait() can exit before all threads are done
#
def _wait(self):
"""Using of _wait() method.
It is preferred to use the class as a context manager and do not
use _wait() directly, see class docstring for an explanation.
"""
with self.cv:
while self.threads > 0:
self.cv.wait()
if self.exc:
raise ex.ThreadException(self.failed_thread, self.exc)
def __enter__(self):
return self
def __exit__(self, *ex):
if not any(ex):
self._wait()
else:
# If spawning code thrown an exception, it had higher priority
# for us than the one thrown inside child thread (if any)
try:
self._wait()
except Exception:
# that will make __exit__ throw original exception
pass
def sleep(seconds=0):
time.sleep(seconds)
class InstanceInfo(object):
def __init__(self, cluster_id=None, instance_id=None, instance_name=None,
node_group_id=None, step_type=None, step_id=None):
self.cluster_id = cluster_id
self.instance_id = instance_id
self.instance_name = instance_name
self.node_group_id = node_group_id
self.step_type = step_type
self.step_id = step_id
def set_step_type(step_type):
current().current_instance_info.step_type = step_type
class InstanceInfoManager(object):
def __init__(self, instance_info):
self.prev_instance_info = current().current_instance_info
if not instance_info.step_type:
instance_info.step_type = self.prev_instance_info.step_type
if not instance_info.step_id:
instance_info.step_id = self.prev_instance_info.step_id
current().current_instance_info = instance_info
def __enter__(self):
pass
def __exit__(self, *args):
current().current_instance_info = self.prev_instance_info
def set_current_cluster_id(cluster_id):
current().resource_uuid = 'none, cluster: %s' % cluster_id
def set_current_job_execution_id(je_id):
current().resource_uuid = 'none, job_execution: %s' % je_id
class SetCurrentInstanceId(object):
def __init__(self, instance_id):
ctx = current()
self.prev_uuid = ctx.resource_uuid
if ctx.resource_uuid:
ctx.resource_uuid = ctx.resource_uuid.replace('none', instance_id)
def __enter__(self):
pass
def __exit__(self, *ex):
current().resource_uuid = self.prev_uuid
def set_current_instance_id(instance_id):
return SetCurrentInstanceId(instance_id)
|
|
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
import collections
from collections import namedtuple
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from fractions import Fraction
from io import StringIO
from numbers import Number
import re
import numpy as np
import pytest
import pytz
from pandas._libs import lib, missing as libmissing
import pandas.util._test_decorators as td
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
ensure_int32,
is_bool,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_number,
is_scalar,
is_scipy_sparse,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DateOffset,
DatetimeIndex,
Index,
Interval,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import IntegerArray
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, "list"),
([], True, "list-empty"),
((1,), True, "tuple"),
(tuple(), True, "tuple-empty"),
({"a": 1}, True, "dict"),
(dict(), True, "dict-empty"),
({"a", 1}, "set", "set"),
(set(), "set", "set-empty"),
(frozenset({"a", 1}), "set", "frozenset"),
(frozenset(), "set", "frozenset-empty"),
(iter([1, 2]), True, "iterator"),
(iter([]), True, "iterator-empty"),
((x for x in [1, 2]), True, "generator"),
((_ for _ in []), True, "generator-empty"),
(Series([1]), True, "Series"),
(Series([], dtype=object), True, "Series-empty"),
(Series(["a"]).str, True, "StringMethods"),
(Series([], dtype="O").str, True, "StringMethods-empty"),
(Index([1]), True, "Index"),
(Index([]), True, "Index-empty"),
(DataFrame([[1]]), True, "DataFrame"),
(DataFrame(), True, "DataFrame-empty"),
(np.ndarray((2,) * 1), True, "ndarray-1d"),
(np.array([]), True, "ndarray-1d-empty"),
(np.ndarray((2,) * 2), True, "ndarray-2d"),
(np.array([[]]), True, "ndarray-2d-empty"),
(np.ndarray((2,) * 3), True, "ndarray-3d"),
(np.array([[[]]]), True, "ndarray-3d-empty"),
(np.ndarray((2,) * 4), True, "ndarray-4d"),
(np.array([[[[]]]]), True, "ndarray-4d-empty"),
(np.array(2), False, "ndarray-0d"),
(1, False, "int"),
(b"123", False, "bytes"),
(b"", False, "bytes-empty"),
("123", False, "string"),
("", False, "string-empty"),
(str, False, "string-type"),
(object(), False, "object"),
(np.nan, False, "NaN"),
(None, False, "None"),
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == "set" else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == "set" else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_list_like_recursion():
# GH 33721
# interpreter would crash with with SIGABRT
def foo():
inference.is_list_like([])
foo()
with pytest.raises(RecursionError):
foo()
def test_is_sequence():
is_seq = inference.is_sequence
assert is_seq((1, 2))
assert is_seq([1, 2])
assert not is_seq("abcd")
assert not is_seq(np.int64)
class A:
def __getitem__(self):
return 1
assert not is_seq(A())
def test_is_array_like():
assert inference.is_array_like(Series([], dtype=object))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize(
"inner",
[
[],
[1],
(1,),
(1, 2),
{"a": 1},
{1, "a"},
Series([1]),
Series([], dtype=object),
Series(["a"]).str,
(x for x in range(5)),
],
)
@pytest.mark.parametrize("outer", [list, Series, np.array, tuple])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize(
"obj",
[
"abc",
[],
[1],
(1,),
["a"],
"a",
{"a"},
[1, 2, 3],
Series([1]),
DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
],
)
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize("ll", [{}, {"A": 1}, Series([1]), collections.defaultdict()])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll",
[
"1",
1,
[1, 2],
(1, 2),
range(2),
Index([1]),
dict,
collections.defaultdict,
Series,
],
)
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike:
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key) -> bool:
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like():
class MockFile:
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
test_tuple = collections.namedtuple("Test", ["a", "b", "c"])
@pytest.mark.parametrize("ll", [test_tuple(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize("ll", [(1, 2, 3), "a", Series({"pi": 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass:
pass
class UnhashableClass1:
__hash__ = None
class UnhashableClass2:
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1, 3.14, np.float64(3.14), "a", tuple(), (1,), HashableClass())
not_hashable = ([], UnhashableClass1())
abc_hashable_not_really_hashable = (([],), UnhashableClass2())
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.abc.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
@pytest.mark.parametrize("ll", [re.compile("ad")])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize("ll", ["x", 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r"a", "x", r"asdf", re.compile("adsf"), r"\u2233\s*", re.compile(r"")]
)
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize("ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference:
@pytest.mark.parametrize(
"arr",
[
np.array(list("abc"), dtype="S1"),
np.array(list("abc"), dtype="S1").astype(object),
[b"a", np.nan, b"c"],
],
)
def test_infer_dtype_bytes(self, arr):
result = lib.infer_dtype(arr, skipna=True)
assert result == "bytes"
@pytest.mark.parametrize(
"value, expected",
[
(float("inf"), True),
(np.inf, True),
(-np.inf, False),
(1, False),
("a", False),
],
)
def test_isposinf_scalar(self, value, expected):
# GH 11352
result = libmissing.isposinf_scalar(value)
assert result is expected
@pytest.mark.parametrize(
"value, expected",
[
(float("-inf"), True),
(-np.inf, True),
(np.inf, False),
(1, False),
("a", False),
],
)
def test_isneginf_scalar(self, value, expected):
result = libmissing.isneginf_scalar(value)
assert result is expected
@pytest.mark.parametrize("coerce_numeric", [True, False])
@pytest.mark.parametrize(
"infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
)
@pytest.mark.parametrize("prefix", ["", "-", "+"])
def test_maybe_convert_numeric_infinities(self, coerce_numeric, infinity, prefix):
# see gh-13274
result = lib.maybe_convert_numeric(
np.array([prefix + infinity], dtype=object),
na_values={"", "NULL", "nan"},
coerce_numeric=coerce_numeric,
)
expected = np.array([np.inf if prefix in ["", "+"] else -np.inf])
tm.assert_numpy_array_equal(result, expected)
def test_maybe_convert_numeric_infinities_raises(self):
msg = "Unable to parse string"
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(["foo_inf"], dtype=object),
na_values={"", "NULL", "nan"},
coerce_numeric=False,
)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(["1.200", "-999.000", "4.500"], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(["inf", "inf", "inf"], dtype="O")
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(["-inf", "-inf", "-inf"], dtype="O")
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(["42E", "2E", "99e", "6e"], dtype="O")
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object)
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2 ** 63], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2 ** 63)], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2 ** 63)], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize(
"arr",
[
np.array([2 ** 63, np.nan], dtype=object),
np.array([str(2 ** 63), np.nan], dtype=object),
np.array([np.nan, 2 ** 63], dtype=object),
np.array([np.nan, str(2 ** 63)], dtype=object),
],
)
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2 ** 63, 2 ** 63 + 1], dtype=object)
na_values = {2 ** 63}
expected = (
np.array([np.nan, 2 ** 63 + 1], dtype=float) if coerce else arr.copy()
)
result = lib.maybe_convert_numeric(arr, na_values, coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"case",
[
np.array([2 ** 63, -1], dtype=object),
np.array([str(2 ** 63), -1], dtype=object),
np.array([str(2 ** 63), str(-1)], dtype=object),
np.array([-1, 2 ** 63], dtype=object),
np.array([-1, str(2 ** 63)], dtype=object),
np.array([str(-1), str(2 ** 63)], dtype=object),
],
)
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_string_uint64(self):
# GH32394
result = lib.maybe_convert_numeric(
np.array(["uint64"], dtype=object), set(), coerce_numeric=True
)
assert np.isnan(result)
@pytest.mark.parametrize("value", [-(2 ** 63) - 1, 2 ** 64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2 ** 63], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2 ** 63)], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2 ** 63, -1], dtype=object)
exp = np.array([2 ** 63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_maybe_convert_objects_datetime(self):
# GH27438
arr = np.array(
[np.datetime64("2000-01-01"), np.timedelta64(1, "s")], dtype=object
)
exp = arr.copy()
out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1)
tm.assert_numpy_array_equal(out, exp)
arr = np.array([pd.NaT, np.timedelta64(1, "s")], dtype=object)
exp = np.array([np.timedelta64("NaT"), np.timedelta64(1, "s")], dtype="m8[ns]")
out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1)
tm.assert_numpy_array_equal(out, exp)
arr = np.array([np.timedelta64(1, "s"), np.nan], dtype=object)
exp = arr.copy()
out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1)
tm.assert_numpy_array_equal(out, exp)
@pytest.mark.parametrize(
"exp",
[
IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True])),
IntegerArray(np.array([2, 0], dtype="int64"), np.array([False, True])),
],
)
def test_maybe_convert_objects_nullable_integer(self, exp):
# GH27335
arr = np.array([2, np.NaN], dtype=object)
result = lib.maybe_convert_objects(arr, convert_to_nullable_integer=1)
tm.assert_extension_array_equal(result, exp)
def test_maybe_convert_objects_bool_nan(self):
# GH32146
ind = pd.Index([True, False, np.nan], dtype=object)
exp = np.array([True, False, np.nan], dtype=object)
out = lib.maybe_convert_objects(ind.values, safe=1)
tm.assert_numpy_array_equal(out, exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference:
# Dummy class used for testing with Python objects
class Dummy:
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
@pytest.mark.parametrize("skipna", [True, False])
def test_length_zero(self, skipna):
result = lib.infer_dtype(np.array([], dtype="i4"), skipna=skipna)
assert result == "integer"
result = lib.infer_dtype([], skipna=skipna)
assert result == "empty"
# GH 18004
arr = np.array([np.array([], dtype=object), np.array([], dtype=object)])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "empty"
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "integer"
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), "foo"], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed-integer"
arr = np.array([1, 2, 3, 4, 5], dtype="i4")
result = lib.infer_dtype(arr, skipna=True)
assert result == "integer"
@pytest.mark.parametrize(
"arr, skipna",
[
(np.array([1, 2, np.nan, np.nan, 3], dtype="O"), False),
(np.array([1, 2, np.nan, np.nan, 3], dtype="O"), True),
(np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), False),
(np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), True),
],
)
def test_integer_na(self, arr, skipna):
# GH 27392
result = lib.infer_dtype(arr, skipna=skipna)
expected = "integer" if skipna else "integer-na"
assert result == expected
def test_infer_dtype_skipna_default(self):
# infer_dtype `skipna` default deprecated in GH#24050,
# changed to True in GH#29876
arr = np.array([1, 2, 3, np.nan], dtype=object)
result = lib.infer_dtype(arr)
assert result == "integer"
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "boolean"
arr = np.array([np.bool_(True), np.bool_(False)], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "boolean"
arr = np.array([True, False, True, "foo"], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed"
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr, skipna=True)
assert result == "boolean"
arr = np.array([True, np.nan, False], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "boolean"
result = lib.infer_dtype(arr, skipna=False)
assert result == "mixed"
def test_floats(self):
arr = np.array([1.0, 2.0, 3.0, np.float64(4), np.float32(5)], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "floating"
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), "foo"], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed-integer"
arr = np.array([1, 2, 3, 4, 5], dtype="f4")
result = lib.infer_dtype(arr, skipna=True)
assert result == "floating"
arr = np.array([1, 2, 3, 4, 5], dtype="f8")
result = lib.infer_dtype(arr, skipna=True)
assert result == "floating"
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == "decimal"
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == "mixed"
arr = np.array([Decimal(1), Decimal("NaN"), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == "decimal"
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype="O")
result = lib.infer_dtype(arr, skipna=True)
assert result == "decimal"
# complex is compatible with nan, so skipna has no effect
@pytest.mark.parametrize("skipna", [True, False])
def test_complex(self, skipna):
# gets cast to complex on array construction
arr = np.array([1.0, 2.0, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "complex"
arr = np.array([1.0, 2.0, 1 + 1j], dtype="O")
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "mixed"
# gets cast to complex on array construction
arr = np.array([1, np.nan, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "complex"
arr = np.array([1.0, np.nan, 1 + 1j], dtype="O")
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "mixed"
# complex with nans stays complex
arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype="O")
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "complex"
# test smaller complex dtype; will pass through _try_infer_map fastpath
arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == "complex"
def test_string(self):
pass
def test_unicode(self):
arr = ["a", np.nan, "c"]
result = lib.infer_dtype(arr, skipna=False)
# This currently returns "mixed", but it's not clear that's optimal.
# This could also return "string" or "mixed-string"
assert result == "mixed"
arr = ["a", np.nan, "c"]
result = lib.infer_dtype(arr, skipna=True)
assert result == "string"
arr = ["a", "c"]
result = lib.infer_dtype(arr, skipna=False)
assert result == "string"
@pytest.mark.parametrize(
"dtype, missing, skipna, expected",
[
(float, np.nan, False, "floating"),
(float, np.nan, True, "floating"),
(object, np.nan, False, "floating"),
(object, np.nan, True, "empty"),
(object, None, False, "mixed"),
(object, None, True, "empty"),
],
)
@pytest.mark.parametrize("box", [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == "datetime64"
def test_infer_dtype_datetime64(self):
arr = np.array(
[np.datetime64("2011-01-01"), np.datetime64("2011-01-01")], dtype=object
)
assert lib.infer_dtype(arr, skipna=True) == "datetime64"
@pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
def test_infer_dtype_datetime64_with_na(self, na_value):
# starts with nan
arr = np.array([na_value, np.datetime64("2011-01-02")])
assert lib.infer_dtype(arr, skipna=True) == "datetime64"
arr = np.array([na_value, np.datetime64("2011-01-02"), na_value])
assert lib.infer_dtype(arr, skipna=True) == "datetime64"
@pytest.mark.parametrize(
"arr",
[
np.array(
[np.timedelta64("nat"), np.datetime64("2011-01-02")], dtype=object
),
np.array(
[np.datetime64("2011-01-02"), np.timedelta64("nat")], dtype=object
),
np.array([np.datetime64("2011-01-01"), pd.Timestamp("2011-01-02")]),
np.array([pd.Timestamp("2011-01-02"), np.datetime64("2011-01-01")]),
np.array([np.nan, pd.Timestamp("2011-01-02"), 1.1]),
np.array([np.nan, "2011-01-01", pd.Timestamp("2011-01-02")]),
np.array([np.datetime64("nat"), np.timedelta64(1, "D")], dtype=object),
np.array([np.timedelta64(1, "D"), np.datetime64("nat")], dtype=object),
],
)
def test_infer_datetimelike_dtype_mixed(self, arr):
assert lib.infer_dtype(arr, skipna=False) == "mixed"
def test_infer_dtype_mixed_integer(self):
arr = np.array([np.nan, pd.Timestamp("2011-01-02"), 1])
assert lib.infer_dtype(arr, skipna=True) == "mixed-integer"
@pytest.mark.parametrize(
"arr",
[
np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")]),
np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]),
np.array([datetime(2011, 1, 1), pd.Timestamp("2011-01-02")]),
],
)
def test_infer_dtype_datetime(self, arr):
assert lib.infer_dtype(arr, skipna=True) == "datetime"
@pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
@pytest.mark.parametrize(
"time_stamp", [pd.Timestamp("2011-01-01"), datetime(2011, 1, 1)]
)
def test_infer_dtype_datetime_with_na(self, na_value, time_stamp):
# starts with nan
arr = np.array([na_value, time_stamp])
assert lib.infer_dtype(arr, skipna=True) == "datetime"
arr = np.array([na_value, time_stamp, na_value])
assert lib.infer_dtype(arr, skipna=True) == "datetime"
@pytest.mark.parametrize(
"arr",
[
np.array([pd.Timedelta("1 days"), pd.Timedelta("2 days")]),
np.array([np.timedelta64(1, "D"), np.timedelta64(2, "D")], dtype=object),
np.array([timedelta(1), timedelta(2)]),
],
)
def test_infer_dtype_timedelta(self, arr):
assert lib.infer_dtype(arr, skipna=True) == "timedelta"
@pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
@pytest.mark.parametrize(
"delta", [Timedelta("1 days"), np.timedelta64(1, "D"), timedelta(1)]
)
def test_infer_dtype_timedelta_with_na(self, na_value, delta):
# starts with nan
arr = np.array([na_value, delta])
assert lib.infer_dtype(arr, skipna=True) == "timedelta"
arr = np.array([na_value, delta, na_value])
assert lib.infer_dtype(arr, skipna=True) == "timedelta"
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period("2011-01", freq="D"), pd.Period("2011-02", freq="D")])
assert lib.infer_dtype(arr, skipna=True) == "period"
arr = np.array([pd.Period("2011-01", freq="D"), pd.Period("2011-02", freq="M")])
assert lib.infer_dtype(arr, skipna=True) == "period"
def test_infer_dtype_period_mixed(self):
arr = np.array(
[pd.Period("2011-01", freq="M"), np.datetime64("nat")], dtype=object
)
assert lib.infer_dtype(arr, skipna=False) == "mixed"
arr = np.array(
[np.datetime64("nat"), pd.Period("2011-01", freq="M")], dtype=object
)
assert lib.infer_dtype(arr, skipna=False) == "mixed"
@pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
def test_infer_dtype_period_with_na(self, na_value):
# starts with nan
arr = np.array([na_value, pd.Period("2011-01", freq="D")])
assert lib.infer_dtype(arr, skipna=True) == "period"
arr = np.array([na_value, pd.Period("2011-01", freq="D"), na_value])
assert lib.infer_dtype(arr, skipna=True) == "period"
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[
Timestamp("20170612", tz="US/Eastern"),
Timestamp("20170311", tz="US/Eastern"),
],
[date(2017, 6, 12), Timestamp("20170311", tz="US/Eastern")],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)],
],
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)],
],
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz="US/Eastern")],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz="US/Eastern")],
],
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"],
],
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second, expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == "floating"
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr, skipna=True) == "empty"
assert lib.infer_dtype(arr, skipna=False) == "mixed"
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == "empty"
assert lib.infer_dtype(arr, skipna=False) == "mixed"
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr, skipna=False) == "datetime"
# np.datetime64(nat)
arr = np.array([np.datetime64("nat")])
assert lib.infer_dtype(arr, skipna=False) == "datetime64"
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64("nat"), n])
assert lib.infer_dtype(arr, skipna=False) == "datetime64"
arr = np.array([pd.NaT, n, np.datetime64("nat"), n])
assert lib.infer_dtype(arr, skipna=False) == "datetime64"
arr = np.array([np.timedelta64("nat")], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == "timedelta"
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64("nat"), n])
assert lib.infer_dtype(arr, skipna=False) == "timedelta"
arr = np.array([pd.NaT, n, np.timedelta64("nat"), n])
assert lib.infer_dtype(arr, skipna=False) == "timedelta"
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64("nat"), np.timedelta64("nat"), np.nan])
assert lib.infer_dtype(arr, skipna=False) == "mixed"
arr = np.array([np.timedelta64("nat"), np.datetime64("nat")], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == "mixed"
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64("nat")])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64("nat")])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64("nat"), np.timedelta64("nat")])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array(
[
pd.Timestamp("20130101", tz="US/Eastern"),
pd.Timestamp("20130102", tz="US/Eastern"),
],
dtype=object,
)
)
assert not lib.is_datetime_with_singletz_array(
np.array(
[
pd.Timestamp("20130101", tz="US/Eastern"),
pd.Timestamp("20130102", tz="CET"),
],
dtype=object,
)
)
@pytest.mark.parametrize(
"func",
[
"is_datetime_array",
"is_datetime64_array",
"is_bool_array",
"is_timedelta_or_timedelta64_array",
"is_date_array",
"is_time_array",
"is_interval_array",
"is_period_array",
],
)
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(["foo", "bar"])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == "date"
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates, skipna=False)
assert result == "mixed"
result = lib.infer_dtype(dates, skipna=True)
assert result == "date"
@pytest.mark.parametrize(
"values",
[
[date(2020, 1, 1), pd.Timestamp("2020-01-01")],
[pd.Timestamp("2020-01-01"), date(2020, 1, 1)],
[date(2020, 1, 1), pd.NaT],
[pd.NaT, date(2020, 1, 1)],
],
)
@pytest.mark.parametrize("skipna", [True, False])
def test_infer_dtype_date_order_invariant(self, values, skipna):
# https://github.com/pandas-dev/pandas/issues/33741
result = lib.infer_dtype(values, skipna=skipna)
assert result == "date"
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(["foo", "bar"]))
assert not lib.is_string_array(
np.array(["foo", "bar", pd.NA], dtype=object), skipna=False
)
assert lib.is_string_array(
np.array(["foo", "bar", pd.NA], dtype=object), skipna=True
)
# NaN is not valid for string array, just NA
assert not lib.is_string_array(
np.array(["foo", "bar", np.nan], dtype=object), skipna=True
)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
lib.to_object_array_tuples(values)
# make sure record array works
record = namedtuple("record", "x y")
r = record(5, 6)
values = [r]
lib.to_object_array_tuples(values)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype="O")
result = lib.infer_dtype(arr, skipna=False)
assert result == "mixed"
result = lib.infer_dtype(arr, skipna=True)
assert result == "empty"
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(
[[1, 2, 3, None, None], [4, 5, 6, None, None]], dtype=object
)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period("2011-01", freq="M"))
assert not lib.is_period(pd.PeriodIndex(["2011-01"], freq="M"))
assert not lib.is_period(pd.Timestamp("2011-01"))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
arr = Categorical(list("abc"))
result = lib.infer_dtype(arr, skipna=True)
assert result == "categorical"
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == "categorical"
arr = Categorical(list("abc"), categories=["cegfab"], ordered=True)
result = lib.infer_dtype(arr, skipna=True)
assert result == "categorical"
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == "categorical"
def test_interval(self):
idx = pd.IntervalIndex.from_breaks(range(5), closed="both")
inferred = lib.infer_dtype(idx, skipna=False)
assert inferred == "interval"
inferred = lib.infer_dtype(idx._data, skipna=False)
assert inferred == "interval"
inferred = lib.infer_dtype(pd.Series(idx), skipna=False)
assert inferred == "interval"
@pytest.mark.parametrize("klass", [pd.array, pd.Series])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]])
def test_string_dtype(self, data, skipna, klass):
# StringArray
val = klass(data, dtype="string")
inferred = lib.infer_dtype(val, skipna=skipna)
assert inferred == "string"
@pytest.mark.parametrize("klass", [pd.array, pd.Series])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("data", [[True, False, True], [True, False, pd.NA]])
def test_boolean_dtype(self, data, skipna, klass):
# BooleanArray
val = klass(data, dtype="boolean")
inferred = lib.infer_dtype(val, skipna=skipna)
assert inferred == "boolean"
class TestNumberScalar:
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number("x")
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64("2011-01-01"))
assert not is_number(Timestamp("2011-01-01"))
assert not is_number(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta("1 days"))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, "D"))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool("x")
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64("2011-01-01"))
assert not is_bool(Timestamp("2011-01-01"))
assert not is_bool(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, "D"))
assert not is_bool(Timedelta("1 days"))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer("x")
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64("2011-01-01"))
assert not is_integer(Timestamp("2011-01-01"))
assert not is_integer(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta("1 days"))
assert not is_integer(np.timedelta64(1, "D"))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float("x")
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64("2011-01-01"))
assert not is_float(Timestamp("2011-01-01"))
assert not is_float(Timestamp("2011-01-01", tz="US/Eastern"))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, "D"))
assert not is_float(Timedelta("1 days"))
def test_is_datetime_dtypes(self):
ts = pd.date_range("20130101", periods=3)
tsa = pd.date_range("20130101", periods=3, tz="US/Eastern")
assert is_datetime64_dtype("datetime64")
assert is_datetime64_dtype("datetime64[ns]")
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype("datetime64")
assert is_datetime64_ns_dtype("datetime64[ns]")
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype("datetime64")
assert is_datetime64_any_dtype("datetime64[ns]")
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype("datetime64")
assert not is_datetime64tz_dtype("datetime64[ns]")
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ["US/Eastern", "UTC"]:
dtype = f"datetime64[ns, {tz}]"
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype("timedelta64")
assert is_timedelta64_dtype("timedelta64[ns]")
assert not is_timedelta64_ns_dtype("timedelta64")
assert is_timedelta64_ns_dtype("timedelta64[ns]")
tdi = TimedeltaIndex([1e14, 2e14], dtype="timedelta64[ns]")
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype("timedelta64[ns]"))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype("timedelta64"))
assert not is_timedelta64_ns_dtype(tdi.astype("timedelta64[h]"))
class TestIsScalar:
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Fraction())
assert is_scalar(0.0)
assert is_scalar(1)
assert is_scalar(complex(2))
assert is_scalar(float("NaN"))
assert is_scalar(np.nan)
assert is_scalar("foobar")
assert is_scalar(b"foobar")
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
assert is_scalar(pd.NA)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1,))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.0))
assert is_scalar(np.int32(1))
assert is_scalar(np.complex64(2))
assert is_scalar(np.object_("foobar"))
assert is_scalar(np.str_("foobar"))
assert is_scalar(np.unicode_("foobar"))
assert is_scalar(np.bytes_(b"foobar"))
assert is_scalar(np.datetime64("2014-01-01"))
assert is_scalar(np.timedelta64(1, "h"))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [
np.array(1),
np.array("foobar"),
np.array(np.datetime64("2014-01-01")),
np.array(np.timedelta64(1, "h")),
np.array(np.datetime64("NaT")),
]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix("1; 2"))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp("2014-01-01"))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period("2014-01-01"))
assert is_scalar(Interval(left=0, right=1))
assert is_scalar(DateOffset(days=1))
assert is_scalar(pd.offsets.Minute(3))
def test_is_scalar_pandas_containers(self):
assert not is_scalar(Series(dtype=object))
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
assert not is_scalar(Categorical([]))
assert not is_scalar(DatetimeIndex([])._data)
assert not is_scalar(TimedeltaIndex([])._data)
assert not is_scalar(DatetimeIndex([])._data.to_period("D"))
assert not is_scalar(pd.array([1, 2, 3]))
def test_is_scalar_number(self):
# Number() is not recognied by PyNumber_Check, so by extension
# is not recognized by is_scalar, but instances of non-abstract
# subclasses are.
class Numeric(Number):
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
num = Numeric(1)
assert is_scalar(num)
def test_datetimeindex_from_empty_datetime64_array():
for unit in ["ms", "us", "ns"]:
idx = DatetimeIndex(np.array([], dtype=f"datetime64[{unit}]"))
assert len(idx) == 0
def test_nan_to_nat_conversions():
df = DataFrame(
dict({"A": np.asarray(range(10), dtype="float64"), "B": Timestamp("20010101")})
)
df.iloc[3:6, :] = np.nan
result = df.loc[4, "B"]
assert result is pd.NaT
s = df["B"].copy()
s[8:9] = np.nan
assert s[8] is pd.NaT
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scipy_sparse(spmatrix): # noqa: F811
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = ensure_int32(values)
assert result.dtype == np.int32
values = np.arange(10, dtype=np.int64)
result = ensure_int32(values)
assert result.dtype == np.int32
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composer Extension
Downloads, installs and runs Composer.
"""
import os
import os.path
import sys
import logging
import re
import json
import StringIO
from build_pack_utils import utils
from build_pack_utils import stream_output
from extension_helpers import ExtensionHelper
from build_pack_utils.compile_extensions import CompileExtensions
_log = logging.getLogger('composer')
def find_composer_paths(ctx):
build_dir = ctx['BUILD_DIR']
webdir = ctx['WEBDIR']
json_path = None
lock_path = None
json_paths = [
os.path.join(build_dir, 'composer.json'),
os.path.join(build_dir, webdir, 'composer.json')
]
lock_paths = [
os.path.join(build_dir, 'composer.lock'),
os.path.join(build_dir, webdir, 'composer.lock')
]
env_path = os.getenv('COMPOSER_PATH')
if env_path is not None:
json_paths = json_paths + [
os.path.join(build_dir, env_path, 'composer.json'),
os.path.join(build_dir, webdir, env_path, 'composer.json')
]
lock_paths = lock_paths + [
os.path.join(build_dir, env_path, 'composer.lock'),
os.path.join(build_dir, webdir, env_path, 'composer.lock')
]
for path in json_paths:
if os.path.exists(path):
json_path = path
for path in lock_paths:
if os.path.exists(path):
lock_path = path
return (json_path, lock_path)
class ComposerConfiguration(object):
def __init__(self, ctx):
self._ctx = ctx
self._log = _log
self._init_composer_paths()
def _init_composer_paths(self):
(self.json_path, self.lock_path) = \
find_composer_paths(self._ctx)
def read_exts_from_path(self, path):
exts = []
if path:
req_pat = re.compile(r'"require"\s?\:\s?\{(.*?)\}', re.DOTALL)
ext_pat = re.compile(r'"ext-(.*?)"')
with open(path, 'rt') as fp:
data = fp.read()
for req_match in req_pat.finditer(data):
for ext_match in ext_pat.finditer(req_match.group(1)):
exts.append(ext_match.group(1))
return exts
def pick_php_version(self, requested):
selected = None
if requested is None:
selected = self._ctx['PHP_VERSION']
elif requested == '5.5.*' or requested == '>=5.5':
selected = self._ctx['PHP_55_LATEST']
elif requested == '5.6.*' or requested == '>=5.6':
selected = self._ctx['PHP_56_LATEST']
elif requested == '7.0.*' or requested == '>=7.0':
selected = self._ctx['PHP_70_LATEST']
elif requested.startswith('5.5.'):
selected = requested
elif requested.startswith('5.6.'):
selected = requested
elif requested.startswith('7.0.'):
selected = requested
else:
selected = self._ctx['PHP_VERSION']
return selected
def get_composer_contents(self, file_path):
try:
composer = json.load(open(file_path, 'r'))
except ValueError, e:
sys.tracebacklimit = 0
sys.stderr.write('-------> Invalid JSON present in {0}. Parser said: "{1}"'
.format(os.path.basename(file_path), e.message))
sys.stderr.write("\n")
sys.exit(1)
return composer
def read_version_from_composer(self, key):
(json_path, lock_path) = find_composer_paths(self._ctx)
if json_path is not None:
composer = self.get_composer_contents(json_path)
require = composer.get('require', {})
return require.get(key, None)
if lock_path is not None:
composer = self.get_composer_contents(lock_path)
platform = composer.get('platform', {})
return platform.get(key, None)
return None
def configure(self):
if self.json_path or self.lock_path:
exts = []
# include any existing extensions
exts.extend(self._ctx.get('PHP_EXTENSIONS', []))
# add 'openssl' extension
exts.append('openssl')
# add platform extensions from composer.json & composer.lock
exts.extend(self.read_exts_from_path(self.json_path))
exts.extend(self.read_exts_from_path(self.lock_path))
# update context with new list of extensions,
# if composer.json exists
php_version = self.read_version_from_composer('php')
self._log.debug('Composer picked PHP Version [%s]',
php_version)
self._ctx['PHP_VERSION'] = self.pick_php_version(php_version)
self._ctx['PHP_EXTENSIONS'] = utils.unique(exts)
self._ctx['PHP_VM'] = 'php'
class ComposerExtension(ExtensionHelper):
def __init__(self, ctx):
ExtensionHelper.__init__(self, ctx)
self._log = _log
def _defaults(self):
manifest_file_path = os.path.join(self._ctx["BP_DIR"], "manifest.yml")
compile_ext = CompileExtensions(self._ctx["BP_DIR"])
_, default_version = compile_ext.default_version_for(manifest_file_path=manifest_file_path, dependency="composer")
return {
'COMPOSER_VERSION': default_version,
'COMPOSER_PACKAGE': 'composer.phar',
'COMPOSER_DOWNLOAD_URL': '/composer/'
'{COMPOSER_VERSION}/{COMPOSER_PACKAGE}',
'COMPOSER_INSTALL_OPTIONS': ['--no-interaction', '--no-dev'],
'COMPOSER_VENDOR_DIR': '{BUILD_DIR}/{LIBDIR}/vendor',
'COMPOSER_BIN_DIR': '{BUILD_DIR}/php/bin',
'COMPOSER_CACHE_DIR': '{CACHE_DIR}/composer'
}
def _should_compile(self):
(json_path, lock_path) = \
find_composer_paths(self._ctx)
return (json_path is not None or lock_path is not None)
def _compile(self, install):
self._builder = install.builder
self.composer_runner = ComposerCommandRunner(self._ctx, self._builder)
self.move_local_vendor_folder()
self.install()
self.run()
def move_local_vendor_folder(self):
vendor_path = os.path.join(self._ctx['BUILD_DIR'],
self._ctx['WEBDIR'],
'vendor')
if os.path.exists(vendor_path):
self._log.debug("Vendor [%s] exists, moving to LIBDIR",
vendor_path)
(self._builder.move()
.under('{BUILD_DIR}/{WEBDIR}')
.into('{BUILD_DIR}/{LIBDIR}')
.where_name_matches('^%s/.*$' % vendor_path)
.done())
def install(self):
self._builder.install().package('PHP').done()
if self._ctx['COMPOSER_VERSION'] == 'latest':
dependencies_path = os.path.join(self._ctx['BP_DIR'],
'dependencies')
if os.path.exists(dependencies_path):
raise RuntimeError('"COMPOSER_VERSION": "latest" ' \
'is not supported in the cached buildpack. Please vendor your preferred version of composer with your app, or use the provided default composer version.')
self._ctx['COMPOSER_DOWNLOAD_URL'] = \
'https://getcomposer.org/composer.phar'
self._builder.install()._installer.install_binary_direct(
self._ctx['COMPOSER_DOWNLOAD_URL'], None,
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
else:
self._builder.install()._installer._install_binary_from_manifest(
self._ctx['COMPOSER_DOWNLOAD_URL'],
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
def _github_oauth_token_is_valid(self, candidate_oauth_token):
stringio_writer = StringIO.StringIO()
curl_command = 'curl -H "Authorization: token %s" ' \
'https://api.github.com/rate_limit' % candidate_oauth_token
stream_output(stringio_writer,
curl_command,
env=os.environ,
cwd=self._ctx['BUILD_DIR'],
shell=True)
github_response = stringio_writer.getvalue()
github_response_json = json.loads(github_response)
return 'resources' in github_response_json
def _github_rate_exceeded(self, token_is_valid):
stringio_writer = StringIO.StringIO()
if token_is_valid:
candidate_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN')
curl_command = 'curl -H "Authorization: token %s" ' \
'https://api.github.com/rate_limit' % candidate_oauth_token
else:
curl_command = 'curl https://api.github.com/rate_limit'
stream_output(stringio_writer,
curl_command,
env=os.environ,
cwd=self._ctx['BUILD_DIR'],
shell=True)
github_response = stringio_writer.getvalue()
github_response_json = json.loads(github_response)
rate = github_response_json['rate']
num_remaining = rate['remaining']
return num_remaining <= 0
def setup_composer_github_token(self):
github_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN')
if self._github_oauth_token_is_valid(github_oauth_token):
print('-----> Using custom GitHub OAuth token in'
' $COMPOSER_GITHUB_OAUTH_TOKEN')
self.composer_runner.run('config', '-g',
'github-oauth.github.com',
'"%s"' % github_oauth_token)
return True
else:
print('-----> The GitHub OAuth token supplied from '
'$COMPOSER_GITHUB_OAUTH_TOKEN is invalid')
return False
def check_github_rate_exceeded(self, token_is_valid):
if self._github_rate_exceeded(token_is_valid):
print('-----> The GitHub api rate limit has been exceeded. '
'Composer will continue by downloading from source, which might result in slower downloads. '
'You can increase your rate limit with a GitHub OAuth token. '
'Please obtain a GitHub OAuth token by registering your application at '
'https://github.com/settings/applications/new. '
'Then set COMPOSER_GITHUB_OAUTH_TOKEN in your environment to the value of this token.')
def run(self):
# Move composer files into root directory
(json_path, lock_path) = find_composer_paths(self._ctx)
if json_path is not None and os.path.dirname(json_path) != self._ctx['BUILD_DIR']:
(self._builder.move()
.under(os.path.dirname(json_path))
.where_name_is('composer.json')
.into('BUILD_DIR')
.done())
if lock_path is not None and os.path.dirname(lock_path) != self._ctx['BUILD_DIR']:
(self._builder.move()
.under(os.path.dirname(lock_path))
.where_name_is('composer.lock')
.into('BUILD_DIR')
.done())
# Sanity Checks
if not os.path.exists(os.path.join(self._ctx['BUILD_DIR'],
'composer.lock')):
msg = (
'PROTIP: Include a `composer.lock` file with your '
'application! This will make sure the exact same version '
'of dependencies are used when you deploy to CloudFoundry.')
self._log.warning(msg)
print msg
# dump composer version, if in debug mode
if self._ctx.get('BP_DEBUG', False):
self.composer_runner.run('-V')
if not os.path.exists(os.path.join(self._ctx['BP_DIR'], 'dependencies')):
token_is_valid = False
# config composer to use github token, if provided
if os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN', False):
token_is_valid = self.setup_composer_github_token()
# check that the api rate limit has not been exceeded, otherwise exit
self.check_github_rate_exceeded(token_is_valid)
# install dependencies w/Composer
self.composer_runner.run('install', '--no-progress',
*self._ctx['COMPOSER_INSTALL_OPTIONS'])
class ComposerCommandRunner(object):
def __init__(self, ctx, builder):
self._log = _log
self._ctx = ctx
self._strategy = PHPComposerStrategy(ctx)
self._php_path = self._strategy.binary_path()
self._composer_path = os.path.join(ctx['BUILD_DIR'], 'php',
'bin', 'composer.phar')
self._strategy.write_config(builder)
def _build_composer_environment(self):
env = {}
for key in os.environ.keys():
val = self._ctx.get(key, '')
env[key] = val if type(val) == str else json.dumps(val)
# add basic composer vars
env['COMPOSER_VENDOR_DIR'] = self._ctx['COMPOSER_VENDOR_DIR']
env['COMPOSER_BIN_DIR'] = self._ctx['COMPOSER_BIN_DIR']
env['COMPOSER_CACHE_DIR'] = self._ctx['COMPOSER_CACHE_DIR']
# prevent key system variables from being overridden
env['LD_LIBRARY_PATH'] = self._strategy.ld_library_path()
env['PHPRC'] = self._ctx['TMPDIR']
env['PATH'] = ':'.join(filter(None,
[env.get('PATH', ''),
os.path.dirname(self._php_path)]))
self._log.debug("ENV IS: %s",
'\n'.join(["%s=%s (%s)" % (key, val, type(val))
for (key, val) in env.iteritems()]))
return env
def run(self, *args):
try:
cmd = [self._php_path, self._composer_path]
cmd.extend(args)
self._log.debug("Running command [%s]", ' '.join(cmd))
stream_output(sys.stdout,
' '.join(cmd),
env=self._build_composer_environment(),
cwd=self._ctx['BUILD_DIR'],
shell=True)
except:
print "-----> Composer command failed"
raise
class PHPComposerStrategy(object):
def __init__(self, ctx):
self._ctx = ctx
def binary_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'php', 'bin', 'php')
def write_config(self, builder):
# rewrite a temp copy of php.ini for use by composer
(builder.copy()
.under('{BUILD_DIR}/php/etc')
.where_name_is('php.ini')
.into('TMPDIR')
.done())
utils.rewrite_cfgs(os.path.join(self._ctx['TMPDIR'], 'php.ini'),
{'TMPDIR': self._ctx['TMPDIR'],
'HOME': self._ctx['BUILD_DIR']},
delim='@')
def ld_library_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'php', 'lib')
# Extension Methods
def configure(ctx):
config = ComposerConfiguration(ctx)
config.configure()
def preprocess_commands(ctx):
composer = ComposerExtension(ctx)
return composer.preprocess_commands()
def service_commands(ctx):
composer = ComposerExtension(ctx)
return composer.service_commands()
def service_environment(ctx):
composer = ComposerExtension(ctx)
return composer.service_environment()
def compile(install):
composer = ComposerExtension(install.builder._ctx)
return composer.compile(install)
|
|
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from six import moves
from webob import exc
from neutron import context
from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron_lbaas.services.loadbalancer.drivers.common import agent_driver_base
from neutron.tests import base
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
from neutron.tests.unit import testlib_api
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
def reset_device_driver():
agent_driver_base.AgentDriverBase.device_driver = None
self.addCleanup(reset_device_driver)
self.mock_importer = mock.patch.object(
agent_driver_base, 'importutils').start()
# needed to reload provider configuration
st_db.ServiceTypeManager._instance = None
agent_driver_base.AgentDriverBase.device_driver = 'dummy'
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=('LOADBALANCER:lbaas:neutron.services.'
'loadbalancer.drivers.common.agent_driver_base.'
'AgentDriverBase:default'))
# we need access to loaded plugins to modify models
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerCallbacks, self).setUp()
self.callbacks = agent_driver_base.LoadBalancerCallbacks(
self.plugin_instance
)
get_lbaas_agents_patcher = mock.patch(
'neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.get_lbaas_agents')
get_lbaas_agents_patcher.start()
def test_get_ready_devices(self):
with self.vip() as vip:
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual(ready, [vip['vip']['pool_id']])
def test_get_ready_devices_multiple_vips_and_pools(self):
ctx = context.get_admin_context()
# add 3 pools and 2 vips directly to DB
# to create 2 "ready" devices and one pool without vip
pools = []
for i in moves.xrange(3):
pools.append(ldb.Pool(id=uuidutils.generate_uuid(),
subnet_id=self._subnet_id,
protocol="HTTP",
lb_method="ROUND_ROBIN",
status=constants.ACTIVE,
admin_state_up=True))
ctx.session.add(pools[i])
vip0 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[0].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip0)
pools[0].vip_id = vip0.id
vip1 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[1].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip1)
pools[1].vip_id = vip1.id
ctx.session.flush()
self.assertEqual(ctx.session.query(ldb.Pool).count(), 3)
self.assertEqual(ctx.session.query(ldb.Vip).count(), 2)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin'
'.list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {'pools': [{'id': pools[0].id},
{'id': pools[1].id},
{'id': pools[2].id}]}
ready = self.callbacks.get_ready_devices(ctx)
self.assertEqual(len(ready), 3)
self.assertIn(pools[0].id, ready)
self.assertIn(pools[1].id, ready)
self.assertIn(pools[2].id, ready)
# cleanup
ctx.session.query(ldb.Pool).delete()
ctx.session.query(ldb.Vip).delete()
def test_get_ready_devices_inactive_vip(self):
with self.vip() as vip:
# set the vip inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['vip']['id'],
{'vip': {'status': constants.INACTIVE}}
)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual([vip['vip']['pool_id']], ready)
def test_get_ready_devices_inactive_pool(self):
with self.vip() as vip:
# set the pool inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_pool(
context.get_admin_context(),
vip['vip']['pool_id'],
{'pool': {'status': constants.INACTIVE}}
)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_logical_device_non_active(self):
with self.pool() as pool:
ctx = context.get_admin_context()
for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'):
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], status)
pool['pool']['status'] = status
expected = {
'pool': pool['pool'],
'members': [],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id']
)
self.assertEqual(expected, logical_config)
def test_get_logical_device_active(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
# activate objects
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Member, member['member']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE')
# build the expected
port = self.plugin_instance._core_plugin.get_port(
ctx, vip['vip']['port_id']
)
subnet = self.plugin_instance._core_plugin.get_subnet(
ctx, vip['vip']['subnet_id']
)
port['fixed_ips'][0]['subnet'] = subnet
# reload pool to add members and vip
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id']
)
pool['status'] = constants.ACTIVE
vip['vip']['status'] = constants.ACTIVE
vip['vip']['port'] = port
member['member']['status'] = constants.ACTIVE
expected = {
'pool': pool,
'vip': vip['vip'],
'members': [member['member']],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['id']
)
self.assertEqual(logical_config, expected)
def test_get_logical_device_inactive_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Member,
member['member']['id'],
'INACTIVE')
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
member['member']['status'] = constants.INACTIVE
self.assertEqual([member['member']],
logical_config['members'])
def test_get_logical_device_pending_create_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
member = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE',
member['status'])
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
self.assertEqual([member], logical_config['members'])
def test_get_logical_device_pending_create_health_monitor(self):
with self.health_monitor() as monitor:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.create_pool_health_monitor(
ctx, monitor, pool['pool']['id'])
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id'])
monitor = self.plugin_instance.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.assertEqual(
'PENDING_CREATE',
pool['health_monitors_status'][0]['status'])
logical_config = self.callbacks.get_logical_device(
ctx, pool['id'])
self.assertEqual([monitor],
logical_config['healthmonitors'])
def _update_port_test_helper(self, expected, func, **kwargs):
core = self.plugin_instance._core_plugin
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
ctx = context.get_admin_context()
func(ctx, port_id=vip['vip']['port_id'], **kwargs)
db_port = core.get_port(ctx, vip['vip']['port_id'])
for k, v in expected.iteritems():
self.assertEqual(db_port[k], v)
def test_plug_vip_port(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
'admin_state_up': True
}
self._update_port_test_helper(
exp,
self.callbacks.plug_vip_port,
host='host'
)
def test_plug_vip_port_mock_with_host(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
'admin_state_up': True,
portbindings.HOST_ID: 'host'
}
with mock.patch.object(
self.plugin._core_plugin, 'update_port') as mock_update_port:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.callbacks.plug_vip_port(
ctx, port_id=vip['vip']['port_id'], host='host')
mock_update_port.assert_called_once_with(
ctx, vip['vip']['port_id'],
{'port': testlib_api.SubDictMatch(exp)})
def test_unplug_vip_port(self):
exp = {
'device_owner': '',
'device_id': '',
'admin_state_up': False
}
self._update_port_test_helper(
exp,
self.callbacks.unplug_vip_port,
host='host'
)
def test_pool_deployed(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('PENDING_CREATE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('PENDING_CREATE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE', m['status'])
self.callbacks.pool_deployed(ctx, pool['pool']['id'])
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('ACTIVE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('ACTIVE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('ACTIVE', m['status'])
def test_update_status_pool(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('PENDING_CREATE', p['status'])
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('ACTIVE', p['status'])
def test_update_status_pool_deleted_already(self):
with mock.patch.object(agent_driver_base, 'LOG') as mock_log:
pool_id = 'deleted_pool'
ctx = context.get_admin_context()
self.assertRaises(loadbalancer.PoolNotFound,
self.plugin_instance.get_pool, ctx, pool_id)
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
self.assertTrue(mock_log.warning.called)
def test_update_status_health_monitor(self):
with contextlib.nested(
self.health_monitor(),
self.pool()
) as (hm, pool):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
hm_id = hm['health_monitor']['id']
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('PENDING_CREATE', h['status'])
self.callbacks.update_status(
ctx, 'health_monitor',
{'monitor_id': hm_id, 'pool_id': pool_id}, 'ACTIVE')
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('ACTIVE', h['status'])
class TestLoadBalancerAgentApi(base.BaseTestCase):
def setUp(self):
super(TestLoadBalancerAgentApi, self).setUp()
self.api = agent_driver_base.LoadBalancerAgentApi('topic')
def test_init(self):
self.assertEqual(self.api.client.target.topic, 'topic')
def _call_test_helper(self, method_name, method_args):
with contextlib.nested(
mock.patch.object(self.api.client, 'cast'),
mock.patch.object(self.api.client, 'prepare'),
) as (
rpc_mock, prepare_mock
):
prepare_mock.return_value = self.api.client
getattr(self.api, method_name)(mock.sentinel.context,
host='host',
**method_args)
prepare_args = {'server': 'host'}
prepare_mock.assert_called_once_with(**prepare_args)
if method_name == 'agent_updated':
method_args = {'payload': method_args}
rpc_mock.assert_called_once_with(mock.sentinel.context, method_name,
**method_args)
def test_agent_updated(self):
self._call_test_helper('agent_updated', {'admin_state_up': 'test'})
def test_create_pool(self):
self._call_test_helper('create_pool', {'pool': 'test',
'driver_name': 'dummy'})
def test_update_pool(self):
self._call_test_helper('update_pool', {'old_pool': 'test',
'pool': 'test'})
def test_delete_pool(self):
self._call_test_helper('delete_pool', {'pool': 'test'})
def test_create_vip(self):
self._call_test_helper('create_vip', {'vip': 'test'})
def test_update_vip(self):
self._call_test_helper('update_vip', {'old_vip': 'test',
'vip': 'test'})
def test_delete_vip(self):
self._call_test_helper('delete_vip', {'vip': 'test'})
def test_create_member(self):
self._call_test_helper('create_member', {'member': 'test'})
def test_update_member(self):
self._call_test_helper('update_member', {'old_member': 'test',
'member': 'test'})
def test_delete_member(self):
self._call_test_helper('delete_member', {'member': 'test'})
def test_create_monitor(self):
self._call_test_helper('create_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
def test_update_monitor(self):
self._call_test_helper('update_pool_health_monitor',
{'old_health_monitor': 'test',
'health_monitor': 'test',
'pool_id': 'test'})
def test_delete_monitor(self):
self._call_test_helper('delete_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase):
def setUp(self):
self.log = mock.patch.object(agent_driver_base, 'LOG')
api_cls = mock.patch.object(agent_driver_base,
'LoadBalancerAgentApi').start()
super(TestLoadBalancerPluginNotificationWrapper, self).setUp()
self.mock_api = api_cls.return_value
self.mock_get_driver = mock.patch.object(self.plugin_instance,
'_get_driver')
self.mock_get_driver.return_value = (agent_driver_base.
AgentDriverBase(
self.plugin_instance
))
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.create_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
ctx = context.get_admin_context()
old_vip = vip['vip'].copy()
vip['vip'].pop('status')
new_vip = self.plugin_instance.update_vip(
ctx,
vip['vip']['id'],
vip
)
self.mock_api.update_vip.assert_called_once_with(
mock.ANY,
old_vip,
new_vip,
'host'
)
self.assertEqual(
new_vip['status'],
constants.PENDING_UPDATE
)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet,
do_delete=False) as vip:
ctx = context.get_admin_context()
self.plugin_instance.delete_vip(ctx, vip['vip']['id'])
vip['vip']['status'] = 'PENDING_DELETE'
self.mock_api.delete_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_create_pool(self):
with self.pool() as pool:
self.mock_api.create_pool.assert_called_once_with(
mock.ANY,
pool['pool'],
mock.ANY,
'dummy'
)
def test_update_pool_non_active(self):
with self.pool() as pool:
pool['pool']['status'] = 'INACTIVE'
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, orig_pool, 'host')
def test_update_pool_no_vip_id(self):
with self.pool() as pool:
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, orig_pool, updated, 'host')
def test_update_pool_with_vip_id(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
old_pool = pool['pool'].copy()
old_pool['vip_id'] = vip['vip']['id']
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, old_pool, updated, 'host')
def test_delete_pool(self):
with self.pool(do_delete=False) as pool:
req = self.new_delete_request('pools',
pool['pool']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
pool['pool']['status'] = 'PENDING_DELETE'
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, pool['pool'], 'host')
def test_create_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
self.mock_api.create_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_update_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
ctx = context.get_admin_context()
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.update_member.assert_called_once_with(
mock.ANY, member['member'], updated, 'host')
def test_update_member_new_pool(self):
with self.pool() as pool1:
pool1_id = pool1['pool']['id']
with self.pool() as pool2:
pool2_id = pool2['pool']['id']
with self.member(pool_id=pool1_id) as member:
self.mock_api.create_member.reset_mock()
ctx = context.get_admin_context()
old_member = member['member'].copy()
member['member']['pool_id'] = pool2_id
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, old_member, 'host')
self.mock_api.create_member.assert_called_once_with(
mock.ANY, updated, 'host')
def test_delete_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id,
do_delete=False) as member:
req = self.new_delete_request('members',
member['member']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
member['member']['status'] = 'PENDING_DELETE'
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_create_pool_health_monitor(self):
with contextlib.nested(
self.health_monitor(),
self.pool(),
) as (hm, pool):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_delete_pool_health_monitor(self):
with contextlib.nested(
self.pool(),
self.health_monitor()
) as (pool, hm):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
hm['pools'][0]['status'] = 'PENDING_DELETE'
self.plugin_instance.delete_pool_health_monitor(
ctx, hm['id'], pool_id)
self.mock_api.delete_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_update_health_monitor_associated_with_pool(self):
with contextlib.nested(
self.health_monitor(type='HTTP'),
self.pool()
) as (monitor, pool):
data = {
'health_monitor': {
'id': monitor['health_monitor']['id'],
'tenant_id': self._tenant_id
}
}
req = self.new_create_request(
'pools',
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource='health_monitors')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
# hm now has a ref to the pool with which it is associated
ctx = context.get_admin_context()
hm = self.plugin.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
pool['pool']['id'],
'host'
)
self.mock_api.reset_mock()
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
updated = hm.copy()
updated.update(data['health_monitor'])
req = self.new_update_request("health_monitors",
data,
monitor['health_monitor']['id'])
req.get_response(self.ext_api)
self.mock_api.update_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
updated,
pool['pool']['id'],
'host')
|
|
import datetime
import json
import re
import sys
import time
from email.header import Header
from http.client import responses
from urllib.parse import urlparse
from django.conf import settings
from django.core import signals, signing
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.http.cookie import SimpleCookie
from django.utils import timezone
from django.utils.encoding import force_bytes, iri_to_uri
from django.utils.http import cookie_date
_charset_from_content_type_re = re.compile(r';\s*charset=(?P<charset>[^\s;]+)', re.I)
class BadHeaderError(ValueError):
pass
class HttpResponseBase:
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
def __init__(self, content_type=None, status=None, reason=None, charset=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._closable_objects = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
self.cookies = SimpleCookie()
self.closed = False
if status is not None:
try:
self.status_code = int(status)
except (ValueError, TypeError):
raise TypeError('HTTP status code must be an integer.')
if not 100 <= self.status_code <= 599:
raise ValueError('HTTP status code must be an integer from 100 to 599.')
self._reason_phrase = reason
self._charset = charset
if content_type is None:
content_type = '%s; charset=%s' % (settings.DEFAULT_CONTENT_TYPE,
self.charset)
self['Content-Type'] = content_type
@property
def reason_phrase(self):
if self._reason_phrase is not None:
return self._reason_phrase
# Leave self._reason_phrase unset in order to use the default
# reason phrase for status code.
return responses.get(self.status_code, 'Unknown Status Code')
@reason_phrase.setter
def reason_phrase(self, value):
self._reason_phrase = value
@property
def charset(self):
if self._charset is not None:
return self._charset
content_type = self.get('Content-Type', '')
matched = _charset_from_content_type_re.search(content_type)
if matched:
# Extract the charset and strip its double quotes
return matched.group('charset').replace('"', '')
return settings.DEFAULT_CHARSET
@charset.setter
def charset(self, value):
self._charset = value
def serialize_headers(self):
"""HTTP headers as a bytestring."""
def to_bytes(val, encoding):
return val if isinstance(val, bytes) else val.encode(encoding)
headers = [
(b': '.join([to_bytes(key, 'ascii'), to_bytes(value, 'latin-1')]))
for key, value in self._headers.values()
]
return b'\r\n'.join(headers)
__bytes__ = serialize_headers
@property
def _content_type_for_repr(self):
return ', "%s"' % self['Content-Type'] if 'Content-Type' in self else ''
def _convert_to_charset(self, value, charset, mime_encode=False):
"""
Convert headers key/value to ascii/latin-1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` can't be represented in the given charset, apply MIME-encoding.
"""
if not isinstance(value, (bytes, str)):
value = str(value)
if ((isinstance(value, bytes) and (b'\n' in value or b'\r' in value)) or
isinstance(value, str) and ('\n' in value or '\r' in value)):
raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
try:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
else:
# Convert bytestring using given charset
value = value.decode(charset)
except UnicodeError as e:
if mime_encode:
value = Header(value, 'utf-8', maxlinelen=sys.maxsize).encode()
else:
e.reason += ', HTTP response headers must be in %s format' % charset
raise
return value
def __setitem__(self, header, value):
header = self._convert_to_charset(header, 'ascii')
value = self._convert_to_charset(value, 'latin-1', mime_encode=True)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Set a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then calculate ``max_age``.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
else:
self.cookies[key]['expires'] = ''
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def setdefault(self, key, value):
"""Set a header unless it has already been set."""
if key not in self:
self[key] = value
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# Handle string types -- we can't rely on force_bytes here because:
# - Python attempts str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, bytes):
return bytes(value)
if isinstance(value, str):
return bytes(value.encode(self.charset))
# Handle non-string types (#16494)
return force_bytes(value, self.charset)
# These methods partially implement the file-like object interface.
# See https://docs.python.org/3/library/io.html#io.IOBase
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closable in self._closable_objects:
try:
closable.close()
except Exception:
pass
self.closed = True
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise IOError("This %s instance is not writable" % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise IOError("This %s instance cannot tell its position" % self.__class__.__name__)
# These methods partially implement a stream-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
def readable(self):
return False
def seekable(self):
return False
def writable(self):
return False
def writelines(self, lines):
raise IOError("This %s instance is not writable" % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content that can be read, appended to, or replaced.
"""
streaming = False
def __init__(self, content=b'', *args, **kwargs):
super().__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def __repr__(self):
return '<%(cls)s status_code=%(status_code)d%(content_type)s>' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
}
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b'\r\n\r\n' + self.content
__bytes__ = serialize
@property
def content(self):
return b''.join(self._container)
@content.setter
def content(self, value):
# Consume iterators upon assignment to allow repeated iteration.
if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):
content = b''.join(self.make_bytes(chunk) for chunk in value)
if hasattr(value, 'close'):
try:
value.close()
except Exception:
pass
else:
content = self.make_bytes(value)
# Create a list of properly encoded bytestrings to support write().
self._container = [content]
def __iter__(self):
return iter(self._container)
def write(self, content):
self._container.append(self.make_bytes(content))
def tell(self):
return len(self.content)
def getvalue(self):
return self.content
def writable(self):
return True
def writelines(self, lines):
for line in lines:
self.write(line)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super().__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
@property
def content(self):
raise AttributeError(
"This %s instance has no `content` attribute. Use "
"`streaming_content` instead." % self.__class__.__name__
)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
self._set_streaming_content(value)
def _set_streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, 'close'):
self._closable_objects.append(value)
def __iter__(self):
return self.streaming_content
def getvalue(self):
return b''.join(self.streaming_content)
class FileResponse(StreamingHttpResponse):
"""
A streaming HTTP response class optimized for files.
"""
block_size = 4096
def _set_streaming_content(self, value):
if hasattr(value, 'read'):
self.file_to_stream = value
filelike = value
if hasattr(filelike, 'close'):
self._closable_objects.append(filelike)
value = iter(lambda: filelike.read(self.block_size), b'')
else:
self.file_to_stream = None
super()._set_streaming_content(value)
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to, *args, **kwargs):
super().__init__(*args, **kwargs)
self['Location'] = iri_to_uri(redirect_to)
parsed = urlparse(str(redirect_to))
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
url = property(lambda self: self['Location'])
def __repr__(self):
return '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
'url': self.url,
}
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self['content-type']
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError("You cannot set content to a 304 (Not Modified) response")
self._container = []
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super().__init__(*args, **kwargs)
self['Allow'] = ', '.join(permitted_methods)
def __repr__(self):
return '<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
'methods': self['Allow'],
}
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before EcmaScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be an json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
:param json_dumps_params: A dictionary of kwargs passed to json.dumps().
"""
def __init__(self, data, encoder=DjangoJSONEncoder, safe=True,
json_dumps_params=None, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError(
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False.'
)
if json_dumps_params is None:
json_dumps_params = {}
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder, **json_dumps_params)
super().__init__(content=data, **kwargs)
|
|
#!/usr/bin/env python
import os
import math
from numpy import interp
from random import choice, sample
from time import sleep
from math import log1p
import json
import string
ROOT = "/sys/bus/hid/drivers/hid-razer/0003:1532:020F.0001"
ROW_COUNT = 6
COL_COUNT = 16
def clamp(x):
return max(0, min(x, 255))
def write_to_file(filename, value):
with open(os.path.join(ROOT, filename), 'w') as outfile:
outfile.write(value)
def set_keyboard_rgb(rgb_list):
"""
Set all keys rgb values. Takes list of tups.
"""
hex_str = ''.join(
map(
lambda tup: ''.join(map(chr, tup)),
rgb_list
)
)
write_to_file('set_key_colors', hex_str)
def solid_color(*rgb):
rgb_list = [rgb]*ROW_COUNT*COL_COUNT
set_keyboard_rgb(rgb_list)
def two_color_noise(rgb1,rgb2,variety=64,burst=True,secs=10,per_sec=10):
def set_colors():
rgb_list = sample([rgb1, rgb2]*ROW_COUNT*COL_COUNT*16, ROW_COUNT*COL_COUNT)
for i in range(ROW_COUNT*COL_COUNT):
r,g,b = rgb_list[i]
r += choice(range(-variety/2, variety/2))
g += choice(range(-variety/2, variety/2))
b += choice(range(-variety/2, variety/2))
rgb_list[i] = tuple(map(clamp, [r,g,b]))
set_keyboard_rgb(rgb_list)
if burst:
for _ in range(secs*per_sec):
set_colors()
sleep(1.0/per_sec)
else:
set_colors()
def monochrome_noise(red,green,blue,variety=64,burst=True,secs=10,per_sec=10):
def set_colors():
rgb_list = [(red,green,blue)]*ROW_COUNT*COL_COUNT
for i in range(ROW_COUNT*COL_COUNT):
r,g,b = rgb_list[i]
r += choice(range(-variety/2, variety/2))
g += choice(range(-variety/2, variety/2))
b += choice(range(-variety/2, variety/2))
rgb_list[i] = tuple(map(clamp, [r,g,b]))
# print rgb_list
set_keyboard_rgb(rgb_list)
if burst:
for _ in range(secs*per_sec):
set_colors()
sleep(1.0/per_sec)
else:
set_colors()
def random_burst(secs=10, per_sec=10, bright=False, sparseness=0):
for interval in range(secs*per_sec):
if bright:
rng = [0,255]
else:
rng = range(256)
rgb_list = [
choice([tuple([choice(rng) for x in range(3)])]+[(0,0,0)]*sparseness) for y in range(ROW_COUNT*COL_COUNT)
]
# print rgb_list
set_keyboard_rgb(rgb_list)
sleep(1.0/per_sec)
def perlin_noise(secs=20, per_sec=15, vertical=False):
from noise import pnoise3
t = 0.0
y = 0.0
x = 0.0
random_base = choice(range(512))
for interval in range(secs*per_sec):
key_vals = [list() for _ in range(ROW_COUNT)]
for i in range(ROW_COUNT):
y += 3.3
for j in range(COL_COUNT):
x += 3.3
r_val = clamp(int(pnoise3(x,y,t, octaves=8, repeatx=1, repeaty=1, repeatz=512, base=random_base) * 256))
g_val = clamp(int(pnoise3(x,y,t, octaves=8, repeatx=1, repeaty=1, repeatz=512, base=random_base+19) * 256))
b_val = clamp(int(pnoise3(x,y,t, octaves=8, repeatx=1, repeaty=1, repeatz=512, base=random_base+61) * 256))
# print r_val, g_val, b_val
key_vals[i].append( (r_val, g_val, b_val) )
key_list = list()
for row_list in key_vals:
key_list.extend(row_list)
set_keyboard_rgb(key_list)
sleep(1.0/per_sec)
t += 0.01
def r_wipe(count=5, r_color=(255,0,0), bg_color=(0,255,0), twinkle=True, line=False):
def make_random_color(dominant_ix, variety):
init_rgb = sample(range(variety)*3, 3)
init_rgb[dominant_ix] = choice(range(256-variety,256))
return tuple(init_rgb)
if line:
starting_pt = 0
line_variance = 64
else:
starting_pt = -3
line_variance = 1
for interval in range(count):
for i in range(starting_pt, COL_COUNT):
if twinkle:
dominant_bg_ix = bg_color.index(max(bg_color))
rgb_list = [
make_random_color(dominant_bg_ix, 100) for y in range(ROW_COUNT*COL_COUNT)
]
else:
rgb_list = [bg_color]*ROW_COUNT*COL_COUNT
# if i >= 0:
rl = [i+x for x in range(0,ROW_COUNT*COL_COUNT,16)]
if not line:
if -3 <= i <= -1:
to_add = [ rl[0]+3, rl[1]+3, rl[4]+3, rl[5]+3 ]
elif -2 <= i <= -1:
to_add = [ rl[0]+2, rl[0]+3, rl[1]+3, rl[2]+2, rl[3]+2, rl[4]+2, rl[4]+3, rl[5]+3 ]
elif -1 <= i <= 12:
to_add = [ rl[0]+1, rl[0]+2, rl[0]+3, rl[1]+3, rl[2]+1, rl[2]+2, rl[3]+2, rl[4]+2, rl[4]+3, rl[5]+3 ]
elif -1 <= i <= 13:
to_add = [ rl[0]+1, rl[0]+2, rl[2]+1, rl[2]+2, rl[3]+2, rl[4]+2 ]
elif -1 <= i <= 14:
to_add = [ rl[0]+1, rl[2]+1 ]
if rl < 0:
rl = to_add
else:
rl.extend(to_add)
dominant_r_ix = r_color.index(max(r_color))
for ix in rl:
rgb_list[ix] = make_random_color(dominant_r_ix, line_variance)
set_keyboard_rgb(rgb_list)
sleep( log1p( 0.3 / ( abs(6-i)+1 ) ) )
def scrolling_text(msg, text_color=(255,0,0), bg_color=(0,255,0), twinkle=True, variety=128, speed=10):
with open('alphanumeric.json', 'r') as infile:
alpha = json.load(infile)
master_coords = list()
starting_x = 6
for c in msg:
if c in set(list(string.ascii_letters+string.digits)):
coords = alpha[c.upper()]
for x,y in coords:
master_coords.append( (x+starting_x, y) )
if c.upper() in ['1', 'I']:
starting_x += 4
elif c.upper() in ['M', 'N', 'Q', 'T', 'V', 'W', 'X']:
starting_x += 6
else:
starting_x += 5
elif c == ' ':
starting_x += 3
x_notch_count = max(master_coords, key=lambda x: x[0])[0]+COL_COUNT+6
for i in range(x_notch_count):
key_color_map = [ [bg_color for x in range(COL_COUNT)] for y in range(ROW_COUNT) ]
if twinkle:
for y in range(ROW_COUNT):
for x in range(COL_COUNT):
r,g,b = key_color_map[y][x]
r += choice(range(-variety/2, variety/2))
g += choice(range(-variety/2, variety/2))
b += choice(range(-variety/2, variety/2))
key_color_map[y][x] = tuple(map(clamp, [r,g,b]))
# print key_color_map
on_screen_coords = filter(lambda (x,y): i-COL_COUNT < x <= i, master_coords)
actual_coords = map(lambda (x, y): ((COL_COUNT-1)-i+x, y), on_screen_coords)
for x,y in actual_coords:
# print x, y
key_color_map[y][x] = text_color
key_color_list = list()
for l in key_color_map:
key_color_list.extend(l)
set_keyboard_rgb(key_color_list)
sleep(1.0/speed)
# test ROOT
root_confirmed = False
i = 0
while not root_confirmed:
try:
write_to_file('brightness', '0')
except IOError:
ROOT = ROOT[:-1] + str(i)
i += 1
else:
root_confirmed = True
if __name__ == '__main__':
write_to_file('brightness', '255')
scrolling_text('hello world', bg_color=(0,0,0), text_color=(255,255,255), speed=8, variety=32)
perlin_noise()
write_to_file('brightness', '128')
sleep(0.2)
write_to_file('brightness', '64')
sleep(0.2)
write_to_file('brightness', '32')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.