repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
v-iam/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/virtual_network_gateways_operations.py | 2 | 36131 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-09-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual
network gateway operation.
:type parameters: :class:`VirtualNetworkGateway
<azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetworkGateway
<azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGateway
<azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGatewayPaged
<azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
"""Resets the primary of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to
the begin reset of the active-active feature enabled gateway.
:type gateway_vip: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetworkGateway
<azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, processor_architecture, custom_headers=None, raw=False, **operation_config):
"""Generates VPN client package for P2S client of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param processor_architecture: VPN client Processor Architecture.
Possible values are: 'AMD64' and 'X86'. Possible values include:
'Amd64', 'X86'
:type processor_architecture: str or :class:`ProcessorArchitecture
<azure.mgmt.network.v2016_09_01.models.ProcessorArchitecture>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: str
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.VpnClientParameters(processor_architecture=processor_architecture)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_bgp_peer_status(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, **operation_config):
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`BgpPeerStatusListResult
<azure.mgmt.network.v2016_09_01.models.BgpPeerStatusListResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_learned_routes(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
has learned, including routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`GatewayRouteListResult
<azure.mgmt.network.v2016_09_01.models.GatewayRouteListResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_advertised_routes(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
is advertising to the specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`GatewayRouteListResult
<azure.mgmt.network.v2016_09_01.models.GatewayRouteListResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| mit |
TOC-Shard/moul-scripts | Python/codWeather.py | 1 | 1388 | # -*- coding: utf-8 -*-
from Plasma import *
from PlasmaTypes import *
import time
WeatherSDL = 'Event13' # 1 = Snow, 2 = Rain
class codWeather(ptResponder):
def __init__(self):
ptResponder.__init__(self)
self.id = 8501009
self.version = 1
def OnServerInitComplete(self):
self.Weather()
def Weather(self):
dnitime = PtGetDniTime()
dayNum = int(time.strftime('%d', time.gmtime(dnitime)))
monthNum = int(time.strftime('%m', time.gmtime(dnitime)))
sdlName = WeatherSDL
sdl = PtGetAgeSDL()
sdl.setFlags(sdlName, 1, 1)
sdl.sendToClients(sdlName)
if (monthNum == 01):
if (dayNum <= 17):
sdl.setIndex(sdlName, 0, 1)
PtDebugPrint(('codWeather: Current month is %d, Weather is %s - enabling' % (monthNum, sdlName)))
else:
sdl.setIndex(sdlName, 0, 0)
PtDebugPrint(('codWeather: Current month is %d, Weather is %s - enabling' % (monthNum, sdlName)))
elif (monthNum == 12):
sdl.setIndex(sdlName, 0, 1)
PtDebugPrint(('codWeather: Current month is %d, Weather is %s - enabling' % (monthNum, sdlName)))
else:
sdl.setIndex(sdlName, 0, 0)
PtDebugPrint(('codWeather: Current month is %d, Weather is %s - disabling' % (monthNum, sdlName)))
| gpl-3.0 |
jrising/research-common | python/feature.py | 1 | 16306 | ## MODIFIED from agmodels
import csv, os, math
class StaticFeature:
def feature_set_country(self, country_name):
pass
# Calls callback with (latitude, longitude, feature, weight, cb_extra)
def getll_features(self, latitude, longitude, callback, cb_extra=None):
pass
def getrect_features(self, swlat, swlon, nelat, nelon, callback, cb_extra=None):
pass
def getrect_features_grid(self, swlat, swlon, nelat, nelon, sizex, sizey, callback, cb_extra=None):
if nelat - swlat < sizey:
lats = [(nelat + swlat) / 2]
else:
#lats = StaticFeature.drange(swlat + sizey/2, nelat, sizey)
lats = StaticFeature.drange(nelat - sizey/2, swlat, -sizey)
lats = list(lats)
if nelon - swlon < sizex:
lons = [(nelon + swlon) / 2]
else:
lons = StaticFeature.drange(swlon + sizex/2, nelon, sizex)
lons = list(lons)
for lat in lats: # Go backwards to optimize seeking
for lon in lons:
self.getll_features(lat, lon, callback, cb_extra)
### Take a full list of .25x.25 grid cell centers from countries.country_points()
def country_loop(self, resfile, feature_name, weight_name):
done = [] # countries included
if os.path.exists(resfile):
with open(resfile, "r") as resfp:
reader = csv.reader(resfp)
for row in reader:
if row[0] not in done:
done.append(row[0])
with open(resfile, "a") as resfp:
resfp.write("country,%s,%s,swlat,swlon,nelat,nelon,latitude,longitude\n" % (feature_name, weight_name))
country_points = countries.country_points()
for country in country_points:
if country in done:
continue
self.feature_set_country(country)
infos = {}
points = country_points[country]['latlons']
for pp in range(len(points)):
print country, pp
latitude = points[pp][0]
longitude = points[pp][1]
swlat = latitude - .25/2
swlon = longitude - .25/2
nelat = latitude + .25/2
nelon = longitude + .25/2
self.getrect_features(swlat, swlon, nelat, nelon, StaticFeature.append_callback, infos)
for key in infos:
info = infos[key]
if info['weight'] == 0:
continue
lat = info['lats'] / info['weight']
lon = info['lons'] / info['weight']
resline = "%s,%s,%f,%f,%f,%f,%f,%f,%f" % (country, str(key), info['weight'], info['swlat'], info['swlon'], info['nelat'], info['nelon'], lat, lon)
print resline
with open(resfile, "a") as resfp:
resfp.write(resline + "\n")
self.feature_set_country(None)
def gridded_loop(self, resfile, feature_name, weight_name, percent):
done = [] # countries included
if os.path.exists(resfile):
with open(resfile, "r") as resfp:
reader = csv.reader(resfp)
for row in reader:
if row[0] not in done:
done.append(row[0])
if not done:
with open(resfile, "a") as resfp:
resfp.write("country,%s,%s,swlat,swlon,nelat,nelon,latitude,longitude\n" % (feature_name, weight_name))
country_points = countries.country_points()
for country in country_points:
if country in done:
continue
self.feature_set_country(country)
infos = {}
points = country_points[country]['latlons']
for pp in range(len(points)):
print country, points[pp]
latitude = points[pp][0]
longitude = points[pp][1]
swlat = latitude - .25/2
swlon = longitude - .25/2
nelat = latitude + .25/2
nelon = longitude + .25/2
self.getrect_features(swlat, swlon, nelat, nelon, StaticFeature.append_callback, infos)
infos = StaticFeature.top_filter(infos, percent, swlat, swlon, nelat, nelon, lambda info: info['weight'], lambda info: (info['swlat'], info['swlon'], info['nelat'], info['nelon']))
for key in infos:
info = infos[key]
lat = info['lats'] / info['weight']
lon = info['lons'] / info['weight']
resline = "%s,%s,%f,%f,%f,%f,%f,%f,%f" % (country, str(key), info['weight'], max(info['swlat'], swlat), max(info['swlon'], swlon), min(info['nelat'], nelat), min(info['nelon'], nelon), max(swlat, min(lat, nelat)), max(swlon, min(lon, nelon)))
print resline
with open(resfile, "a") as resfp:
resfp.write(resline + "\n")
self.feature_set_country(None)
@staticmethod
def top_filter(infos, percent, swlat, swlon, nelat, nelon, weightfunc, rectfunc):
# Select the top [percent] of statics
keyweights = []
totalweight = 0
for key in infos:
info = infos[key]
weight = weightfunc(info)
if weight == 0:
continue
rect = rectfunc(info)
if rect[0] > nelat or rect[1] > nelon or rect[2] < swlat or rect[3] < swlon:
continue
totalweight += weight
keyweights.append({'key': key, 'weight': weight})
if totalweight == 0:
return {}
keyweights = sorted(keyweights, key=lambda sw: sw['weight'], reverse=True)
usedweight = 0
numused = 0
result = {}
for keyweight in keyweights:
if usedweight > totalweight * .9:
break
result[keyweight['key']] = infos[keyweight['key']]
numused += 1
usedweight += keyweight['weight']
return result
@staticmethod
def append_callback(latitude, longitude, feature, weight, infos):
info = infos.get(feature, {'weight': 0, 'swlat': 90, 'swlon': 180, 'nelat': -90, 'nelon': -180, 'lats': 0, 'lons': 0})
info['weight'] += weight
info['lats'] += latitude * weight
info['lons'] += longitude * weight
info['swlat'] = min(info['swlat'], latitude)
info['swlon'] = min(info['swlon'], longitude)
info['nelat'] = max(info['nelat'], latitude)
info['nelon'] = max(info['nelon'], longitude)
infos[feature] = info
@staticmethod
def drange(start, stop, step):
if step > 0:
r = start
while r < stop:
yield r
r += step
else:
r = start
while r > stop:
yield r
r += step
class StaticFeatureCombo(StaticFeature):
def __init__(self, feature1, feature2):
self.feature1 = feature1
self.feature2 = feature2
def feature_set_country(self, country_name):
self.feature1.feature_set_country(country_name)
self.feature2.feature_set_country(country_name)
def getll_features(self, latitude, longitude, callback, cb_extra=None):
self.feature1.getll_features(latitude, longitude, StaticFeatureCombo.combo_getll_callback1, (self, callback, cb_extra))
@staticmethod
def combo_getll_callback1(latitude, longitude, feature, weight, extras):
(self, callback, cb_extra) = extras
if feature is not None:
self.feature2.getll_features(latitude, longitude, StaticFeatureCombo.combo_getll_callback2, (feature, weight, callback, cb_extra))
@staticmethod
def combo_getll_callback2(latitude, longitude, feature, weight, extras):
(feature1, weight1, callback, cb_extra) = extras
callback(latitude, longitude, str(feature1) + ":" + str(feature), weight1*weight, cb_extra)
def getrect_features(self, swlat, swlon, nelat, nelon, callback, cb_extra=None):
self.feature1.getrect_features(swlat, swlon, nelat, nelon, StaticFeatureCombo.combo_getll_callback1, (self, callback, cb_extra))
class StaticFeatureCached(StaticFeature):
def __init__(self, feature, featfile, weight, sizex, sizey):
self.feature = feature
self.featfile = featfile
self.weight = weight
self.sizex = sizex
self.sizey = sizey
self.country_name = None
def feature_set_country(self, country_name):
self.country_name = country_name
def getll_features(self, latitude, longitude, callback, cb_extra=None):
if self.country_name:
if self.country_name in self.featfile.countries:
features = self.featfile.potential_contains(self.country_name, latitude, longitude)
if len(features) == 1:
for feature in features:
callback(latitude, longitude, feature, self.weight, cb_extra)
return
self.feature.getll_features(latitude, longitude, callback, cb_extra)
def getrect_features(self, swlat, swlon, nelat, nelon, callback, cb_extra=None):
self.getrect_features_grid(swlat, swlon, nelat, nelon, self.sizex, self.sizey, callback, cb_extra)
@staticmethod
def print_callback(latitude, longitude, feature, weight, extra=None):
(callback, cb_extra) = extra
print latitude, longitude, feature, weight
callback(latitude, longitude, feature, weight, cb_extra)
class StaticFeatureFile:
def __init__(self, file=None):
if file is None:
self.countries = {}
return
countries = {}
reader = csv.reader(file, delimiter=',')
reader.next()
for row in reader:
if row[0] not in countries:
countries[row[0]] = {}
countries[row[0]][row[1]] = map(float, row[2:])
# Add on portion represents of total
for name in countries:
country = countries[name]
total = 0
for feature in country:
total += country[feature][0]
for feature in country:
country[feature].append(country[feature][0] / total)
self.countries = countries
# Returns {feature: my-weight}
def potential_contains(self, country_name, latitude, longitude):
country = self.countries[country_name]
potentials = {}
for feature in country:
if FeatureRectangle.contains(FeatureRectangle(country[feature]), latitude, longitude):
potentials[feature] = country[feature][0]
return potentials
# Returns {feature: my-weight}
def potential_intersects(self, country_name, rect):
country = self.countries[country_name]
potentials = {}
for feature in country:
both = FeatureRectangle.intersect(rect, FeatureRectangle(country[feature]))
if both.is_proper():
potentials[feature] = country[feature][0]
return potentials
@staticmethod
def intersect(featrow1, featrow2, featval1, featval2, feature1, feature2):
rect1 = FeatureRectangle(featrow1)
rect2 = FeatureRectangle(featrow2)
both = FeatureRectangle.intersect(rect1, rect2)
# if one is contained in other, weight is smaller portion
#if both == rect1:
# return [featrow1[-1]] + featrow1[2:-1]
#if both == rect2:
# return [featrow2[-1]] + featrow2[2:-1]
# Look over intersection
infos = {}
feature1.getrect_features(both.swlat, both.swlon, both.nelat, both.nelon, StaticFeatureFile.intersect_callback, (featval1, featval2, feature2, infos))
info = None
if featval2 in infos:
info = infos[featval2]
try:
if float(featval2) in infos:
info = infos[float(featval2)]
except:
pass
if info:
sumweight = sum([infos[key]['weight'] for key in infos])
# weight is new value / original value for feature2
return [float(info['weight']) / sumweight, info['swlat'], info['swlon'], info['nelat'], info['nelon'], info['lats'] / info['weight'], info['lons'] / info['weight']]
return [0, both.swlat, both.swlon, both.nelat, both.nelon, (both.swlat + both.nelat)/2, (both.swlon + both.nelon)/2]
@staticmethod
def intersect_callback(latitude, longitude, feature, weight, extras):
(featval1, featval2, feature2, infos) = extras
if ((isinstance(feature, tuple) and tuple(map(str, feature)) == featval1) or str(feature) == featval1) and weight > 0:
feature2.getll_features(latitude, longitude, StaticFeatureFile.scaled_append_callback, (featval2, weight, infos))
#elif weight == 0:
# print "No0"
else:
print "No1:" , latitude, longitude, feature, featval1
@staticmethod
def scaled_append_callback(latitude, longitude, feature, weight, extras):
(featval2, weight1, infos) = extras
#if str(feature) == featval2: # Remove this later
# print "Yes:", latitude, longitude
#else:
# print "No2:" , latitude, longitude
StaticFeature.append_callback(latitude, longitude, feature, weight1*weight, infos)
@staticmethod
def all_intersects(country_name, featfile1, featfile2, feature1, feature2):
combo = StaticFeatureFile()
combo.countries[country_name] = {}
country1 = featfile1.countries[country_name]
for featval1 in country1:
featrow1 = country1[featval1]
rect1 = FeatureRectangle(featrow1)
featval2s = featfile2.potential_intersects(country_name, rect1)
for featval2 in featval2s:
print featval1, featval2
featrow2 = featfile2.countries[country_name][featval2]
bothrow = StaticFeatureFile.intersect(featrow1, featrow2, featval1, featval2, feature1, feature2)
if bothrow[0] > 0:
combo.countries[country_name][(featval1, featval2)] = bothrow
return combo
class FeatureRectangle:
def __init__(self, row):
self.swlat = row[1]
self.swlon = row[2]
self.nelat = row[3]
self.nelon = row[4]
def __eq__(self, other):
return self.swlat == other.swlat and self.swlon == other.swlon and self.nelat == other.nelat and self.nelon == other.nelon
def is_proper(self):
return self.swlat < self.nelat and self.swlon < self.nelon
@staticmethod
def intersect(rect1, rect2):
return FeatureRectangle([None, max(rect1.swlat, rect2.swlat), max(rect1.swlon, rect2.swlon),
min(rect1.nelat, rect2.nelat), min(rect1.nelon, rect2.nelon)])
def contains(rect, latitude, longitude):
return latitude > rect.swlat and latitude < rect.nelat and longitude > rect.swlon and longitude < rect.nelon
class StaticFeatureGrid(StaticFeature):
def __init__(self, lat0, dlat, lon0, dlon):
self.lat0 = lat0
self.dlat = dlat
self.lon0 = lon0
self.dlon = dlon
def getll_features(self, latitude, longitude, callback, cb_extra=None):
lat = math.floor((latitude - self.lat0) / self.dlat) * self.dlat + self.lat0
lon = math.floor((longitude - self.lon0) / self.dlon) * self.dlon + self.lon0
callback(latitude, longitude, str(lat) + ":" + str(lon), 1, cb_extra)
def getrect_features(self, swlat, swlon, nelat, nelon, callback, cb_extra=None):
self.getrect_feature_grid(swlat, swlon, nelat, nelon, self.dlon, self.dlat, callback, cb_extra)
def country_points():
print "Reading GIS data..."
countries = {}
with open("~/data/political/countries-0.25x0.25.pts") as fp:
reader = csv.reader(fp)
reader.next() # ignore header
for row in reader:
country = countries.get(row[1], {'latlons': []})
country['latlons'].append((float(row[3]), float(row[2])))
countries[row[1]] = country
return countries
| mit |
BartoszCichecki/onlinepython | onlinepython/pypy-2.4.0-win32/lib-python/2.7/test/test_curses.py | 44 | 9518 | #
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import sys, tempfile, os
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import unittest
from test.test_support import requires, import_module
requires('curses')
curses = import_module('curses')
curses.panel = import_module('curses.panel')
# XXX: if newterm was supported we could use it instead of initscr and not exit
term = os.environ.get('TERM')
if not term or term == 'unknown':
raise unittest.SkipTest, "$TERM=%r, calling initscr() may cause exit" % term
if sys.platform == "cygwin":
raise unittest.SkipTest("cygwin's curses mostly just hangs")
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
try:
win.border(65, 66, 67, 68,
69, [], 71, 72)
except TypeError:
pass
else:
raise RuntimeError, "Expected win.border() to raise TypeError"
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 3, 3, 2, 1)
win2.overwrite(win, 1, 2, 3, 3, 2, 1)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def module_funcs(stdscr):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp('abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm('cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
# availmask indicates that mouse stuff not available.
if availmask != 0:
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
if hasattr(curses, 'is_term_resized'):
curses.is_term_resized(*stdscr.getmaxyx())
if hasattr(curses, 'resizeterm'):
curses.resizeterm(*stdscr.getmaxyx())
if hasattr(curses, 'resize_term'):
curses.resize_term(*stdscr.getmaxyx())
def unit_tests():
from curses import ascii
for ch, expected in [('a', 'a'), ('A', 'A'),
(';', ';'), (' ', ' '),
('\x7f', '^?'), ('\n', '^J'), ('\0', '^@'),
# Meta-bit characters
('\x8a', '!^J'), ('\xc1', '!A'),
]:
if ascii.unctrl(ch) != expected:
print 'curses.unctrl fails on character', repr(ch)
def test_userptr_without_set(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
try:
p.userptr()
raise RuntimeError, 'userptr should fail since not set'
except curses.panel.error:
pass
def test_userptr_memory_leak(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
obj = object()
nrefs = sys.getrefcount(obj)
for i in range(100):
p.set_userptr(obj)
p.set_userptr(None)
if sys.getrefcount(obj) != nrefs:
raise RuntimeError, "set_userptr leaked references"
def test_userptr_segfault(stdscr):
panel = curses.panel.new_panel(stdscr)
class A:
def __del__(self):
panel.set_userptr(None)
panel.set_userptr(A())
panel.set_userptr(None)
def test_resize_term(stdscr):
if hasattr(curses, 'resizeterm'):
lines, cols = curses.LINES, curses.COLS
curses.resizeterm(lines - 1, cols + 1)
if curses.LINES != lines - 1 or curses.COLS != cols + 1:
raise RuntimeError, "Expected resizeterm to update LINES and COLS"
def test_issue6243(stdscr):
curses.ungetch(1025)
stdscr.getkey()
def main(stdscr):
curses.savetty()
try:
module_funcs(stdscr)
window_funcs(stdscr)
test_userptr_without_set(stdscr)
test_userptr_memory_leak(stdscr)
test_userptr_segfault(stdscr)
test_resize_term(stdscr)
test_issue6243(stdscr)
finally:
curses.resetty()
if __name__ == '__main__':
curses.wrapper(main)
unit_tests()
else:
if not sys.__stdout__.isatty():
raise unittest.SkipTest("sys.__stdout__ is not a tty")
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=sys.__stdout__.fileno())
try:
stdscr = curses.initscr()
main(stdscr)
finally:
curses.endwin()
unit_tests()
| gpl-2.0 |
pvtodorov/indra | indra/assemblers/graph/assembler.py | 2 | 10590 | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
import itertools
from indra.statements import *
logger = logging.getLogger(__name__)
try:
import pygraphviz
except ImportError:
logger.warning('Cannot use graph assembler because '
'pygraphviz could not be imported.')
default_graph_properties = {
'directed': True,
'fixedsize': True,
'fontname': 'arial',
'splines': 'spline',
'rankdir': 'LR'
}
default_node_properties = {
'color': '#FBAF3F',
'shape': 'Mrecord',
'fontsize': 8
}
default_edge_properties = {
'arrowsize': 0.5
}
class GraphAssembler():
"""The Graph assembler assembles INDRA Statements into a
Graphviz node-edge graph.
Parameters
----------
stmts : Optional[list[indra.statements.Statement]]
A list of INDRA Statements to be added to the assembler's list
of Statements.
graph_properties : Optional[dict[str: str]]
A dictionary of graphviz graph properties overriding the default ones.
node_properties : Optional[dict[str: str]]
A dictionary of graphviz node properties overriding the default ones.
edge_properties : Optional[dict[str: str]]
A dictionary of graphviz edge properties overriding the default ones.
Attributes
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to be assembled.
graph : pygraphviz.AGraph
A pygraphviz graph that is assembled by this assembler.
existing_nodes : list[tuple]
The list of nodes (identified by node key tuples) that are
already in the graph.
existing_edges : list[tuple]
The list of edges (identified by edge key tuples) that are
already in the graph.
graph_properties : dict[str: str]
A dictionary of graphviz graph properties used for assembly.
node_properties : dict[str: str]
A dictionary of graphviz node properties used for assembly.
edge_properties : dict[str: str]
A dictionary of graphviz edge properties used for assembly.
Note that most edge properties are determined based on the type of
the edge by the assembler (e.g. color, arrowhead).
These settings cannot be directly controlled through the API.
"""
def __init__(self, stmts=None, graph_properties=None,
node_properties=None, edge_properties=None):
if stmts is None:
self.statements = []
else:
self.statements = stmts
self.graph_properties = default_graph_properties
self.node_properties = default_node_properties
self.edge_properties = default_edge_properties
if graph_properties:
for k, v in graph_properties.items():
self.graph_properties[k] = v
if node_properties:
for k, v in node_properties.items():
self.node_properties[k] = v
if edge_properties:
for k, v in edge_properties.items():
self.edge_properties[k] = v
self.graph = pygraphviz.AGraph(**self.graph_properties)
self.existing_nodes = []
self.existing_edges = []
self._complex_nodes = []
def add_statements(self, stmts):
"""Add a list of statements to be assembled.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of INDRA Statements to be appended to the assembler's list.
"""
for stmt in stmts:
self.statements.append(stmt)
def make_model(self):
"""Assemble the graph from the assembler's list of INDRA Statements."""
# Assemble in two stages.
# First, create the nodes of the graph
for stmt in self.statements:
# Skip SelfModification (self loops) -- has one node
if isinstance(stmt, SelfModification) or \
isinstance(stmt, Translocation) or \
isinstance(stmt, ActiveForm):
continue
# Special handling for Complexes -- more than 1 node
elif isinstance(stmt, Complex):
for m in stmt.members:
self._add_node(m)
# All else should have exactly 2 nodes
elif all([ag is not None for ag in stmt.agent_list()]):
if not len(stmt.agent_list()) == 2:
logger.warning(
'%s has less/more than the expected 2 agents.' % stmt)
continue
for ag in stmt.agent_list():
self._add_node(ag)
# Second, create the edges of the graph
for stmt in self.statements:
# Skip SelfModification (self loops) -- has one node
if isinstance(stmt, SelfModification) or \
isinstance(stmt, Translocation) or \
isinstance(stmt, ActiveForm):
continue
elif isinstance(stmt, Complex):
self._add_complex(stmt.members)
elif all([ag is not None for ag in stmt.agent_list()]):
self._add_stmt_edge(stmt)
def get_string(self):
"""Return the assembled graph as a string.
Returns
-------
graph_string : str
The assembled graph as a string.
"""
graph_string = self.graph.to_string()
graph_string = graph_string.replace('\\N', '\\n')
return graph_string
def save_dot(self, file_name='graph.dot'):
"""Save the graph in a graphviz dot file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the graph dot string to.
"""
s = self.get_string()
with open(file_name, 'wt') as fh:
fh.write(s)
def save_pdf(self, file_name='graph.pdf', prog='dot'):
"""Draw the graph and save as an image or pdf file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the graph as. Default: graph.pdf
prog : Optional[str]
The graphviz program to use for graph layout. Default: dot
"""
self.graph.draw(file_name, prog=prog)
def _add_edge(self, source, target, **kwargs):
"""Add an edge to the graph."""
# Start with default edge properties
edge_properties = self.edge_properties
# Overwrite ones that are given in function call explicitly
for k, v in kwargs.items():
edge_properties[k] = v
self.graph.add_edge(source, target, **edge_properties)
def _add_node(self, agent):
"""Add an Agent as a node to the graph."""
if agent is None:
return
node_label = _get_node_label(agent)
if isinstance(agent, Agent) and agent.bound_conditions:
bound_agents = [bc.agent for bc in agent.bound_conditions if
bc.is_bound]
if bound_agents:
bound_names = [_get_node_label(a) for a in bound_agents]
node_label = _get_node_label(agent) + '/' + \
'/'.join(bound_names)
self._complex_nodes.append([agent] + bound_agents)
else:
node_label = _get_node_label(agent)
node_key = _get_node_key(agent)
if node_key in self.existing_nodes:
return
self.existing_nodes.append(node_key)
self.graph.add_node(node_key,
label=node_label,
**self.node_properties)
def _add_stmt_edge(self, stmt):
"""Assemble a Modification statement."""
# Skip statements with None in the subject position
source = _get_node_key(stmt.agent_list()[0])
target = _get_node_key(stmt.agent_list()[1])
edge_key = (source, target, stmt.__class__.__name__)
if edge_key in self.existing_edges:
return
self.existing_edges.append(edge_key)
if isinstance(stmt, RemoveModification) or \
isinstance(stmt, Inhibition) or \
isinstance(stmt, DecreaseAmount) or \
isinstance(stmt, Gap) or \
(isinstance(stmt, Influence) and stmt.overall_polarity() == -1):
color = '#ff0000'
else:
color = '#000000'
params = {'color': color,
'arrowhead': 'normal',
'dir': 'forward'}
self._add_edge(source, target, **params)
def _add_complex(self, members):
"""Assemble a Complex statement."""
params = {'color': '#0000ff',
'arrowhead': 'dot',
'arrowtail': 'dot',
'dir': 'both'}
for m1, m2 in itertools.combinations(members, 2):
if self._has_complex_node(m1, m2):
continue
m1_key = _get_node_key(m1)
m2_key = _get_node_key(m2)
edge_key = (set([m1_key, m2_key]), 'complex')
if edge_key in self.existing_edges:
return
self.existing_edges.append(edge_key)
self._add_edge(m1_key, m2_key, **params)
def _has_complex_node(self, m1, m2):
for cplx in self._complex_nodes:
names = [m.name for m in cplx]
if m1.name in names and m2.name in names:
return True
else:
return False
def _get_node_label(agent):
def sanitize_name(name):
name = name.replace('\n', ' ')
name = name.replace('<', '')
name = name.replace('>', '')
return name
# If the agent doesn't have grounding in a known
# database, try to use the original text as a node name.
# otherwise return the agent name.
if ('UP' not in agent.db_refs and
'HGNC' not in agent.db_refs and
'CHEBI' not in agent.db_refs and
'UN' not in agent.db_refs):
if 'FPLX' in agent.db_refs:
name_for_node = agent.db_refs['FPLX']
return sanitize_name(name_for_node)
elif 'BE' in agent.db_refs:
name_for_node = agent.db_refs['BE']
return sanitize_name(name_for_node)
elif 'TEXT' in agent.db_refs:
name_for_node = agent.db_refs['TEXT']
return sanitize_name(name_for_node)
name_for_node = agent.name
return sanitize_name(name_for_node)
def _get_node_key(agent):
#return agent.matches_key()
return _get_node_label(agent)
| bsd-2-clause |
Jandersolutions/jander777-ghost | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/formatters/rtf.py | 364 | 4536 | # -*- coding: utf-8 -*-
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft® Word® documents.
*New in Pygments 0.6.*
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
unicodeoutput = False
def __init__(self, **options):
"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
def _escape(self, text):
return text.replace('\\', '\\\\') \
.replace('{', '\\{') \
.replace('}', '\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvment
if not text:
return ''
# escape text
text = self._escape(text)
if self.encoding in ('utf-8', 'utf-16', 'utf-32'):
encoding = 'iso-8859-15'
else:
encoding = self.encoding or 'iso-8859-15'
buf = []
for c in text:
if ord(c) > 128:
ansic = c.encode(encoding, 'ignore') or '?'
if ord(ansic) > 128:
ansic = '\\\'%x' % ord(ansic)
else:
ansic = c
buf.append(r'\ud{\u%d%s}' % (ord(c), ansic))
else:
buf.append(str(c))
return ''.join(buf).replace('\n', '\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write(r'{\rtf1\ansi\deff0'
r'{\fonttbl{\f0\fmodern\fprq1\fcharset0%s;}}'
r'{\colortbl;' % (self.fontface and
' ' + self._escape(self.fontface) or
''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write(r'\red%d\green%d\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write(r'}\f0')
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append(r'\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append(r'\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append(r'\b')
if style['italic']:
buf.append(r'\i')
if style['underline']:
buf.append(r'\ul')
if style['border']:
buf.append(r'\chbrdr\chcfpat%d' %
color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write('{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
outfile.write('}')
| mit |
anoopcs9/samba | source4/dsdb/tests/python/linked_attributes.py | 2 | 26228 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Originally based on ./sam.py
import optparse
import sys
import os
import itertools
sys.path.insert(0, "bin/python")
import samba
from samba.tests.subunitrun import SubunitOptions, TestProgram
import samba.getopt as options
from samba.auth import system_session
import ldb
from samba.samdb import SamDB
from samba.dcerpc import misc
parser = optparse.OptionParser("linked_attributes.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
parser.add_option('--delete-in-setup', action='store_true',
help="cleanup in setup")
parser.add_option('--no-cleanup', action='store_true',
help="don't cleanup in teardown")
parser.add_option('--no-reveal-internals', action='store_true',
help="Only use windows compatible ldap controls")
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
class LATestException(Exception):
pass
class LATests(samba.tests.TestCase):
def setUp(self):
super(LATests, self).setUp()
self.samdb = SamDB(host, credentials=creds,
session_info=system_session(lp), lp=lp)
self.base_dn = self.samdb.domain_dn()
self.ou = "OU=la,%s" % self.base_dn
if opts.delete_in_setup:
try:
self.samdb.delete(self.ou, ['tree_delete:1'])
except ldb.LdbError, e:
print "tried deleting %s, got error %s" % (self.ou, e)
self.samdb.add({'objectclass': 'organizationalUnit',
'dn': self.ou})
def tearDown(self):
super(LATests, self).tearDown()
if not opts.no_cleanup:
self.samdb.delete(self.ou, ['tree_delete:1'])
def add_object(self, cn, objectclass, more_attrs={}):
dn = "CN=%s,%s" % (cn, self.ou)
attrs = {'cn': cn,
'objectclass': objectclass,
'dn': dn}
attrs.update(more_attrs)
self.samdb.add(attrs)
return dn
def add_objects(self, n, objectclass, prefix=None, more_attrs={}):
if prefix is None:
prefix = objectclass
dns = []
for i in range(n):
dns.append(self.add_object("%s%d" % (prefix, i + 1),
objectclass,
more_attrs=more_attrs))
return dns
def add_linked_attribute(self, src, dest, attr='member',
controls=None):
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, src)
m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_ADD, attr)
self.samdb.modify(m, controls=controls)
def remove_linked_attribute(self, src, dest, attr='member',
controls=None):
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, src)
m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_DELETE, attr)
self.samdb.modify(m, controls=controls)
def replace_linked_attribute(self, src, dest, attr='member',
controls=None):
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, src)
m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_REPLACE, attr)
self.samdb.modify(m, controls=controls)
def attr_search(self, obj, attr, scope=ldb.SCOPE_BASE, **controls):
if opts.no_reveal_internals:
if 'reveal_internals' in controls:
del controls['reveal_internals']
controls = ['%s:%d' % (k, int(v)) for k, v in controls.items()]
res = self.samdb.search(obj,
scope=scope,
attrs=[attr],
controls=controls)
return res
def assert_links(self, obj, expected, attr, msg='', **kwargs):
res = self.attr_search(obj, attr, **kwargs)
if len(expected) == 0:
if attr in res[0]:
self.fail("found attr '%s' in %s" % (attr, res[0]))
return
try:
results = list([x[attr] for x in res][0])
except KeyError:
self.fail("missing attr '%s' on %s" % (attr, obj))
expected = sorted(expected)
results = sorted(results)
if expected != results:
print msg
print "expected %s" % expected
print "received %s" % results
self.assertEqual(results, expected)
def assert_back_links(self, obj, expected, attr='memberOf', **kwargs):
self.assert_links(obj, expected, attr=attr,
msg='back links do not match', **kwargs)
def assert_forward_links(self, obj, expected, attr='member', **kwargs):
self.assert_links(obj, expected, attr=attr,
msg='forward links do not match', **kwargs)
def get_object_guid(self, dn):
res = self.samdb.search(dn,
scope=ldb.SCOPE_BASE,
attrs=['objectGUID'])
return str(misc.GUID(res[0]['objectGUID'][0]))
def _test_la_backlinks(self, reveal=False):
tag = 'backlinks'
kwargs = {}
if reveal:
tag += '_reveal'
kwargs = {'reveal_internals': 0}
u1, u2 = self.add_objects(2, 'user', 'u_%s' % tag)
g1, g2 = self.add_objects(2, 'group', 'g_%s' % tag)
self.add_linked_attribute(g1, u1)
self.add_linked_attribute(g2, u1)
self.add_linked_attribute(g2, u2)
self.assert_back_links(u1, [g1, g2], **kwargs)
self.assert_back_links(u2, [g2], **kwargs)
def test_la_backlinks(self):
self._test_la_backlinks()
def test_la_backlinks_reveal(self):
if opts.no_reveal_internals:
print 'skipping because --no-reveal-internals'
return
self._test_la_backlinks(True)
def _test_la_backlinks_delete_group(self, reveal=False):
tag = 'del_group'
kwargs = {}
if reveal:
tag += '_reveal'
kwargs = {'reveal_internals': 0}
u1, u2 = self.add_objects(2, 'user', 'u_' + tag)
g1, g2 = self.add_objects(2, 'group', 'g_' + tag)
self.add_linked_attribute(g1, u1)
self.add_linked_attribute(g2, u1)
self.add_linked_attribute(g2, u2)
self.samdb.delete(g2, ['tree_delete:1'])
self.assert_back_links(u1, [g1], **kwargs)
self.assert_back_links(u2, set(), **kwargs)
def test_la_backlinks_delete_group(self):
self._test_la_backlinks_delete_group()
def test_la_backlinks_delete_group_reveal(self):
if opts.no_reveal_internals:
print 'skipping because --no-reveal-internals'
return
self._test_la_backlinks_delete_group(True)
def test_links_all_delete_group(self):
u1, u2 = self.add_objects(2, 'user', 'u_all_del_group')
g1, g2 = self.add_objects(2, 'group', 'g_all_del_group')
g2guid = self.get_object_guid(g2)
self.add_linked_attribute(g1, u1)
self.add_linked_attribute(g2, u1)
self.add_linked_attribute(g2, u2)
self.samdb.delete(g2)
self.assert_back_links(u1, [g1], show_deleted=1, show_recycled=1,
show_deactivated_link=0)
self.assert_back_links(u2, set(), show_deleted=1, show_recycled=1,
show_deactivated_link=0)
self.assert_forward_links(g1, [u1], show_deleted=1, show_recycled=1,
show_deactivated_link=0)
self.assert_forward_links('<GUID=%s>' % g2guid,
[], show_deleted=1, show_recycled=1,
show_deactivated_link=0)
def test_links_all_delete_group_reveal(self):
u1, u2 = self.add_objects(2, 'user', 'u_all_del_group_reveal')
g1, g2 = self.add_objects(2, 'group', 'g_all_del_group_reveal')
g2guid = self.get_object_guid(g2)
self.add_linked_attribute(g1, u1)
self.add_linked_attribute(g2, u1)
self.add_linked_attribute(g2, u2)
self.samdb.delete(g2)
self.assert_back_links(u1, [g1], show_deleted=1, show_recycled=1,
show_deactivated_link=0,
reveal_internals=0)
self.assert_back_links(u2, set(), show_deleted=1, show_recycled=1,
show_deactivated_link=0,
reveal_internals=0)
self.assert_forward_links(g1, [u1], show_deleted=1, show_recycled=1,
show_deactivated_link=0,
reveal_internals=0)
self.assert_forward_links('<GUID=%s>' % g2guid,
[], show_deleted=1, show_recycled=1,
show_deactivated_link=0,
reveal_internals=0)
def test_la_links_delete_link(self):
u1, u2 = self.add_objects(2, 'user', 'u_del_link')
g1, g2 = self.add_objects(2, 'group', 'g_del_link')
res = self.samdb.search(g1, scope=ldb.SCOPE_BASE,
attrs=['uSNChanged'])
old_usn1 = int(res[0]['uSNChanged'][0])
self.add_linked_attribute(g1, u1)
res = self.samdb.search(g1, scope=ldb.SCOPE_BASE,
attrs=['uSNChanged'])
new_usn1 = int(res[0]['uSNChanged'][0])
self.assertNotEqual(old_usn1, new_usn1, "USN should have incremented")
self.add_linked_attribute(g2, u1)
self.add_linked_attribute(g2, u2)
res = self.samdb.search(g2, scope=ldb.SCOPE_BASE,
attrs=['uSNChanged'])
old_usn2 = int(res[0]['uSNChanged'][0])
self.remove_linked_attribute(g2, u1)
res = self.samdb.search(g2, scope=ldb.SCOPE_BASE,
attrs=['uSNChanged'])
new_usn2 = int(res[0]['uSNChanged'][0])
self.assertNotEqual(old_usn2, new_usn2, "USN should have incremented")
self.assert_forward_links(g1, [u1])
self.assert_forward_links(g2, [u2])
self.add_linked_attribute(g2, u1)
self.assert_forward_links(g2, [u1, u2])
self.remove_linked_attribute(g2, u2)
self.assert_forward_links(g2, [u1])
self.remove_linked_attribute(g2, u1)
self.assert_forward_links(g2, [])
self.remove_linked_attribute(g1, [])
self.assert_forward_links(g1, [])
def _test_la_links_delete_link_reveal(self):
u1, u2 = self.add_objects(2, 'user', 'u_del_link_reveal')
g1, g2 = self.add_objects(2, 'group', 'g_del_link_reveal')
self.add_linked_attribute(g1, u1)
self.add_linked_attribute(g2, u1)
self.add_linked_attribute(g2, u2)
self.remove_linked_attribute(g2, u1)
self.assert_forward_links(g2, [u1, u2], show_deleted=1,
show_recycled=1,
show_deactivated_link=0,
reveal_internals=0
)
def test_la_links_delete_link_reveal(self):
if opts.no_reveal_internals:
print 'skipping because --no-reveal-internals'
return
self._test_la_links_delete_link_reveal()
def test_la_links_delete_user(self):
u1, u2 = self.add_objects(2, 'user', 'u_del_user')
g1, g2 = self.add_objects(2, 'group', 'g_del_user')
self.add_linked_attribute(g1, u1)
self.add_linked_attribute(g2, u1)
self.add_linked_attribute(g2, u2)
res = self.samdb.search(g1, scope=ldb.SCOPE_BASE,
attrs=['uSNChanged'])
old_usn1 = int(res[0]['uSNChanged'][0])
res = self.samdb.search(g2, scope=ldb.SCOPE_BASE,
attrs=['uSNChanged'])
old_usn2 = int(res[0]['uSNChanged'][0])
self.samdb.delete(u1)
self.assert_forward_links(g1, [])
self.assert_forward_links(g2, [u2])
res = self.samdb.search(g1, scope=ldb.SCOPE_BASE,
attrs=['uSNChanged'])
new_usn1 = int(res[0]['uSNChanged'][0])
res = self.samdb.search(g2, scope=ldb.SCOPE_BASE,
attrs=['uSNChanged'])
new_usn2 = int(res[0]['uSNChanged'][0])
# Assert the USN on the alternate object is unchanged
self.assertEqual(old_usn1, new_usn1)
self.assertEqual(old_usn2, new_usn2)
def test_la_links_delete_user_reveal(self):
u1, u2 = self.add_objects(2, 'user', 'u_del_user_reveal')
g1, g2 = self.add_objects(2, 'group', 'g_del_user_reveal')
self.add_linked_attribute(g1, u1)
self.add_linked_attribute(g2, u1)
self.add_linked_attribute(g2, u2)
self.samdb.delete(u1)
self.assert_forward_links(g2, [u2],
show_deleted=1, show_recycled=1,
show_deactivated_link=0,
reveal_internals=0)
self.assert_forward_links(g1, [],
show_deleted=1, show_recycled=1,
show_deactivated_link=0,
reveal_internals=0)
def test_multiple_links(self):
u1, u2, u3, u4 = self.add_objects(4, 'user', 'u_multiple_links')
g1, g2, g3, g4 = self.add_objects(4, 'group', 'g_multiple_links')
self.add_linked_attribute(g1, [u1, u2, u3, u4])
self.add_linked_attribute(g2, [u3, u1])
self.add_linked_attribute(g3, u2)
try:
# adding u2 twice should be an error
self.add_linked_attribute(g2, [u1, u2, u3, u2])
except ldb.LdbError as (num, msg):
if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
self.fail("adding duplicate values, expected "
"ERR_ENTRY_ALREADY_EXISTS, (%d) "
"got %d" % (ldb.ERR_ENTRY_ALREADY_EXISTS, num))
self.assert_forward_links(g1, [u1, u2, u3, u4])
self.assert_forward_links(g2, [u3, u1])
self.assert_forward_links(g3, [u2])
self.assert_back_links(u1, [g2, g1])
self.assert_back_links(u2, [g3, g1])
self.assert_back_links(u3, [g2, g1])
self.assert_back_links(u4, [g1])
self.remove_linked_attribute(g2, [u1, u3])
self.remove_linked_attribute(g1, [u1, u3])
self.assert_forward_links(g1, [u2, u4])
self.assert_forward_links(g2, [])
self.assert_forward_links(g3, [u2])
self.assert_back_links(u1, [])
self.assert_back_links(u2, [g3, g1])
self.assert_back_links(u3, [])
self.assert_back_links(u4, [g1])
self.add_linked_attribute(g1, [u1, u3])
self.add_linked_attribute(g2, [u3, u1])
self.add_linked_attribute(g3, [u1, u3])
self.assert_forward_links(g1, [u1, u2, u3, u4])
self.assert_forward_links(g2, [u1, u3])
self.assert_forward_links(g3, [u1, u2, u3])
self.assert_back_links(u1, [g1, g2, g3])
self.assert_back_links(u2, [g3, g1])
self.assert_back_links(u3, [g3, g2, g1])
self.assert_back_links(u4, [g1])
def test_la_links_replace(self):
u1, u2, u3, u4 = self.add_objects(4, 'user', 'u_replace')
g1, g2, g3, g4 = self.add_objects(4, 'group', 'g_replace')
self.add_linked_attribute(g1, [u1, u2])
self.add_linked_attribute(g2, [u1, u3])
self.add_linked_attribute(g3, u1)
self.replace_linked_attribute(g1, [u2])
self.replace_linked_attribute(g2, [u2, u3])
self.replace_linked_attribute(g3, [u1, u3])
self.replace_linked_attribute(g4, [u4])
self.assert_forward_links(g1, [u2])
self.assert_forward_links(g2, [u3, u2])
self.assert_forward_links(g3, [u3, u1])
self.assert_forward_links(g4, [u4])
self.assert_back_links(u1, [g3])
self.assert_back_links(u2, [g1, g2])
self.assert_back_links(u3, [g2, g3])
self.assert_back_links(u4, [g4])
self.replace_linked_attribute(g1, [u1, u2, u3])
self.replace_linked_attribute(g2, [u1])
self.replace_linked_attribute(g3, [u2])
self.replace_linked_attribute(g4, [])
self.assert_forward_links(g1, [u1, u2, u3])
self.assert_forward_links(g2, [u1])
self.assert_forward_links(g3, [u2])
self.assert_forward_links(g4, [])
self.assert_back_links(u1, [g1, g2])
self.assert_back_links(u2, [g1, g3])
self.assert_back_links(u3, [g1])
self.assert_back_links(u4, [])
def test_la_links_replace2(self):
users = self.add_objects(12, 'user', 'u_replace2')
g1, = self.add_objects(1, 'group', 'g_replace2')
self.add_linked_attribute(g1, users[:6])
self.assert_forward_links(g1, users[:6])
self.replace_linked_attribute(g1, users)
self.assert_forward_links(g1, users)
self.replace_linked_attribute(g1, users[6:])
self.assert_forward_links(g1, users[6:])
self.remove_linked_attribute(g1, users[6:9])
self.assert_forward_links(g1, users[9:])
self.remove_linked_attribute(g1, users[9:])
self.assert_forward_links(g1, [])
def test_la_links_permutations(self):
"""Make sure the order in which we add links doesn't matter."""
users = self.add_objects(3, 'user', 'u_permutations')
groups = self.add_objects(6, 'group', 'g_permutations')
for g, p in zip(groups, itertools.permutations(users)):
self.add_linked_attribute(g, p)
# everyone should be in every group
for g in groups:
self.assert_forward_links(g, users)
for u in users:
self.assert_back_links(u, groups)
for g, p in zip(groups[::-1], itertools.permutations(users)):
self.replace_linked_attribute(g, p)
for g in groups:
self.assert_forward_links(g, users)
for u in users:
self.assert_back_links(u, groups)
for g, p in zip(groups, itertools.permutations(users)):
self.remove_linked_attribute(g, p)
for g in groups:
self.assert_forward_links(g, [])
for u in users:
self.assert_back_links(u, [])
def test_la_links_relaxed(self):
"""Check that the relax control doesn't mess with linked attributes."""
relax_control = ['relax:0']
users = self.add_objects(10, 'user', 'u_relax')
groups = self.add_objects(3, 'group', 'g_relax',
more_attrs={'member': users[:2]})
g_relax1, g_relax2, g_uptight = groups
# g_relax1 has all users added at once
# g_relax2 gets them one at a time in reverse order
# g_uptight never relaxes
self.add_linked_attribute(g_relax1, users[2:5], controls=relax_control)
for u in reversed(users[2:5]):
self.add_linked_attribute(g_relax2, u, controls=relax_control)
self.add_linked_attribute(g_uptight, u)
for g in groups:
self.assert_forward_links(g, users[:5])
self.add_linked_attribute(g, users[5:7])
self.assert_forward_links(g, users[:7])
for u in users[7:]:
self.add_linked_attribute(g, u)
self.assert_forward_links(g, users)
for u in users:
self.assert_back_links(u, groups)
# try some replacement permutations
import random
random.seed(1)
users2 = users[:]
for i in range(5):
random.shuffle(users2)
self.replace_linked_attribute(g_relax1, users2,
controls=relax_control)
self.assert_forward_links(g_relax1, users)
for i in range(5):
random.shuffle(users2)
self.remove_linked_attribute(g_relax2, users2,
controls=relax_control)
self.remove_linked_attribute(g_uptight, users2)
self.replace_linked_attribute(g_relax1, [], controls=relax_control)
random.shuffle(users2)
self.add_linked_attribute(g_relax2, users2,
controls=relax_control)
self.add_linked_attribute(g_uptight, users2)
self.replace_linked_attribute(g_relax1, users2,
controls=relax_control)
self.assert_forward_links(g_relax1, users)
self.assert_forward_links(g_relax2, users)
self.assert_forward_links(g_uptight, users)
for u in users:
self.assert_back_links(u, groups)
def test_add_all_at_once(self):
"""All these other tests are creating linked attributes after the
objects are there. We want to test creating them all at once
using LDIF.
"""
users = self.add_objects(7, 'user', 'u_all_at_once')
g1, g3 = self.add_objects(2, 'group', 'g_all_at_once',
more_attrs={'member': users})
(g2,) = self.add_objects(1, 'group', 'g_all_at_once2',
more_attrs={'member': users[:5]})
try:
self.add_objects(1, 'group', 'g_with_duplicate_links',
more_attrs={'member': users[:5] + users[1:2]})
except ldb.LdbError as (num, msg):
if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
self.fail("adding duplicate values, expected "
"ERR_ENTRY_ALREADY_EXISTS, (%d) "
"got %d" % (ldb.ERR_ENTRY_ALREADY_EXISTS, num))
self.assert_forward_links(g1, users)
self.assert_forward_links(g2, users[:5])
self.assert_forward_links(g3, users)
for u in users[:5]:
self.assert_back_links(u, [g1, g2, g3])
for u in users[5:]:
self.assert_back_links(u, [g1, g3])
self.remove_linked_attribute(g2, users[0])
self.remove_linked_attribute(g2, users[1])
self.add_linked_attribute(g2, users[1])
self.add_linked_attribute(g2, users[5])
self.add_linked_attribute(g2, users[6])
self.assert_forward_links(g1, users)
self.assert_forward_links(g2, users[1:])
for u in users[1:]:
self.remove_linked_attribute(g2, u)
self.remove_linked_attribute(g1, users)
for u in users:
self.samdb.delete(u)
self.assert_forward_links(g1, [])
self.assert_forward_links(g2, [])
self.assert_forward_links(g3, [])
def test_one_way_attributes(self):
e1, e2 = self.add_objects(2, 'msExchConfigurationContainer',
'e_one_way')
guid = self.get_object_guid(e2)
self.add_linked_attribute(e1, e2, attr="addressBookRoots")
self.assert_forward_links(e1, [e2], attr='addressBookRoots')
self.samdb.delete(e2)
res = self.samdb.search("<GUID=%s>" % guid,
scope=ldb.SCOPE_BASE,
controls=['show_deleted:1',
'show_recycled:1'])
new_dn = str(res[0].dn)
self.assert_forward_links(e1, [new_dn], attr='addressBookRoots')
self.assert_forward_links(e1, [new_dn],
attr='addressBookRoots',
show_deactivated_link=0)
def test_one_way_attributes_delete_link(self):
e1, e2 = self.add_objects(2, 'msExchConfigurationContainer',
'e_one_way')
guid = self.get_object_guid(e2)
self.add_linked_attribute(e1, e2, attr="addressBookRoots")
self.assert_forward_links(e1, [e2], attr='addressBookRoots')
self.remove_linked_attribute(e1, e2, attr="addressBookRoots")
self.assert_forward_links(e1, [], attr='addressBookRoots')
self.assert_forward_links(e1, [], attr='addressBookRoots',
show_deactivated_link=0)
def test_pretend_one_way_attributes(self):
e1, e2 = self.add_objects(2, 'msExchConfigurationContainer',
'e_one_way')
guid = self.get_object_guid(e2)
self.add_linked_attribute(e1, e2, attr="addressBookRoots2")
self.assert_forward_links(e1, [e2], attr='addressBookRoots2')
self.samdb.delete(e2)
res = self.samdb.search("<GUID=%s>" % guid,
scope=ldb.SCOPE_BASE,
controls=['show_deleted:1',
'show_recycled:1'])
new_dn = str(res[0].dn)
self.assert_forward_links(e1, [], attr='addressBookRoots2')
self.assert_forward_links(e1, [], attr='addressBookRoots2',
show_deactivated_link=0)
def test_pretend_one_way_attributes_delete_link(self):
e1, e2 = self.add_objects(2, 'msExchConfigurationContainer',
'e_one_way')
guid = self.get_object_guid(e2)
self.add_linked_attribute(e1, e2, attr="addressBookRoots2")
self.assert_forward_links(e1, [e2], attr='addressBookRoots2')
self.remove_linked_attribute(e1, e2, attr="addressBookRoots2")
self.assert_forward_links(e1, [], attr='addressBookRoots2')
self.assert_forward_links(e1, [], attr='addressBookRoots2',
show_deactivated_link=0)
if "://" not in host:
if os.path.isfile(host):
host = "tdb://%s" % host
else:
host = "ldap://%s" % host
TestProgram(module=__name__, opts=subunitopts)
| gpl-3.0 |
luckylavish/zamboni | mkt/api/tests/test_fields.py | 9 | 15385 | # -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.test.client import RequestFactory
from mock import Mock
from nose.tools import eq_, ok_
from rest_framework.request import Request
from rest_framework.serializers import CharField, Serializer
from rest_framework.test import APIRequestFactory
from mkt.api.fields import (ESTranslationSerializerField,
GuessLanguageTranslationField, IntegerRangeField,
SlugChoiceField, SlugOrPrimaryKeyRelatedField,
SplitField, TranslationSerializerField)
from mkt.carriers import CARRIER_MAP
from mkt.site.fixtures import fixture
from mkt.site.tests import TestCase
from mkt.site.utils import app_factory
from mkt.translations.models import Translation
from mkt.webapps.models import Webapp
class _TestTranslationSerializerField(object):
field_class = TranslationSerializerField
def setUp(self):
super(_TestTranslationSerializerField, self).setUp()
self.factory = APIRequestFactory()
self.app = Webapp.objects.get(pk=337141)
def _test_expected_dict(self, field):
result = field.field_to_native(self.app, 'name')
expected = {
'en-US': unicode(Translation.objects.get(id=self.app.name.id,
locale='en-US')),
'es': unicode(Translation.objects.get(id=self.app.name.id,
locale='es')),
}
eq_(result, expected)
result = field.field_to_native(self.app, 'description')
expected = {
'en-US': Translation.objects.get(id=self.app.description.id,
locale='en-US'),
}
eq_(result, expected)
def _test_expected_single_string(self, field):
result = field.field_to_native(self.app, 'name')
expected = unicode(self.app.name)
eq_(result, expected)
result = field.field_to_native(self.app, 'description')
expected = unicode(self.app.description)
eq_(result, expected)
def test_from_native(self):
data = u'Translatiön'
field = self.field_class()
result = field.from_native(data)
eq_(result, data)
data = {
'fr': u'Non mais Allô quoi !',
'en-US': u'No But Hello what!'
}
field = self.field_class()
result = field.from_native(data)
eq_(result, data)
data = ['Bad Data']
field = self.field_class()
result = field.from_native(data)
eq_(result, unicode(data))
def test_field_from_native_strip(self):
data = {
'fr': u' Non mais Allô quoi ! ',
'en-US': u''
}
field = self.field_class()
result = field.from_native(data)
eq_(result, {'fr': u'Non mais Allô quoi !', 'en-US': u''})
def test_wrong_locale_code(self):
data = {
'unknown-locale': 'some name',
}
field = self.field_class()
result = field.from_native(data)
with self.assertRaises(ValidationError) as exc:
field.validate(result)
eq_(exc.exception.message,
"The language code 'unknown-locale' is invalid.")
def test_none_type_locale_is_allowed(self):
# None values are valid because they are used to nullify existing
# translations in something like a PATCH.
data = {
'en-US': None,
}
field = self.field_class()
result = field.from_native(data)
field.validate(result)
eq_(result, data)
def test_field_to_native(self):
field = self.field_class()
self._test_expected_dict(field)
def test_field_to_native_source(self):
self.app.mymock = Mock()
self.app.mymock.mymocked_field = self.app.name
field = self.field_class(source='mymock.mymocked_field')
result = field.field_to_native(self.app, 'shouldbeignored')
expected = {
'en-US': unicode(Translation.objects.get(id=self.app.name.id,
locale='en-US')),
'es': unicode(Translation.objects.get(id=self.app.name.id,
locale='es')),
}
eq_(result, expected)
def test_field_to_native_empty_context(self):
mock_serializer = Serializer()
mock_serializer.context = {}
field = self.field_class()
field.initialize(mock_serializer, 'name')
self._test_expected_dict(field)
def test_field_to_native_request_POST(self):
request = Request(self.factory.post('/'))
mock_serializer = Serializer()
mock_serializer.context = {'request': request}
field = self.field_class()
field.initialize(mock_serializer, 'name')
self._test_expected_dict(field)
def test_field_to_native_request_GET(self):
request = Request(self.factory.get('/'))
mock_serializer = Serializer()
mock_serializer.context = {'request': request}
field = self.field_class()
field.initialize(mock_serializer, 'name')
self._test_expected_dict(field)
def test_field_to_native_request_GET_lang(self):
"""
Pass a lang in the query string, expect to have a single string
returned instead of an object.
"""
# Note that we don't go through the middlewares etc so the actual
# language for the process isn't changed, we don't care as
# _expect_single_string() method simply tests with the current
# language, whatever it is.
request = Request(self.factory.get('/', {'lang': 'lol'}))
eq_(request.GET['lang'], 'lol')
mock_serializer = Serializer()
mock_serializer.context = {'request': request}
field = self.field_class()
field.initialize(mock_serializer, 'name')
self._test_expected_single_string(field)
def test_field_null(self):
field = self.field_class()
self.app = Webapp()
result = field.field_to_native(self.app, 'name')
eq_(result, None)
result = field.field_to_native(self.app, 'description')
eq_(result, None)
class TestGuessLanguageTranslationField(TestCase):
def guessed_value(self, input, expected_output):
field = GuessLanguageTranslationField()
FIELD_NAME = 'testfield'
into = {}
DATA = {FIELD_NAME: input}
field.field_from_native(DATA, None, FIELD_NAME, into)
eq_(expected_output, into[FIELD_NAME])
def test_english(self):
data = u'This is in English.'
self.guessed_value(data, {'en-us': data})
def test_french(self):
data = u'Ceci est écrit en français.'
self.guessed_value(data, {'fr': data})
class TestESTranslationSerializerField(_TestTranslationSerializerField,
TestCase):
field_class = ESTranslationSerializerField
def setUp(self):
self.factory = APIRequestFactory()
self.app = Webapp()
self.app.default_locale = 'en-US'
self.app.name_translations = {
'en-US': u'English Name',
'es': u'Spànish Name'
}
self.app.description_translations = {
'en-US': u'English Description',
'fr': u'Frençh Description'
}
def test_attach_translations(self):
data = {
'foo_translations': [{
'lang': 'testlang',
'string': 'teststring'
}, {
'lang': 'testlang2',
'string': 'teststring2'
}]
}
self.app = Webapp()
self.field_class().attach_translations(self.app, data, 'foo')
eq_(self.app.foo_translations, {'testlang': 'teststring',
'testlang2': 'teststring2'})
def test_attach_translations_target_name(self):
data = {
'foo_translations': [{
'lang': 'testlang',
'string': 'teststring'
}, {
'lang': 'testlang2',
'string': 'teststring2'
}]
}
self.app = Webapp()
self.field_class().attach_translations(
self.app, data, 'foo', target_name='bar')
eq_(self.app.bar_translations, {'testlang': 'teststring',
'testlang2': 'teststring2'})
def test_attach_translations_missing_key(self):
data = {
'foo_translations': None
}
self.app = Webapp()
self.field_class().attach_translations(self.app, data, 'foo')
eq_(self.app.foo_translations, {})
def _test_expected_dict(self, field):
result = field.field_to_native(self.app, 'name')
expected = self.app.name_translations
eq_(result, expected)
result = field.field_to_native(self.app, 'description')
expected = self.app.description_translations
eq_(result, expected)
def _test_expected_single_string(self, field):
result = field.field_to_native(self.app, 'name')
expected = unicode(self.app.name_translations['en-US'])
eq_(result, expected)
result = field.field_to_native(self.app, 'description')
expected = unicode(self.app.description_translations['en-US'])
eq_(result, expected)
def test_field_to_native_source(self):
self.app.mymock = Mock()
self.app.mymock.mymockedfield_translations = self.app.name_translations
field = self.field_class(source='mymock.mymockedfield')
result = field.field_to_native(self.app, 'shouldbeignored')
expected = self.app.name_translations
eq_(result, expected)
def test_field_null(self):
field = self.field_class()
self.app.name_translations = {}
result = field.field_to_native(self.app, 'name')
eq_(result, None)
self.app.description_translations = None
result = field.field_to_native(self.app, 'description')
eq_(result, None)
class SlugOrPrimaryKeyRelatedFieldTests(TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
def test_render_as_pk(self):
obj = Mock()
obj.attached = self.app
field = SlugOrPrimaryKeyRelatedField()
eq_(field.field_to_native(obj, 'attached'), self.app.pk)
def test_render_as_pks_many(self):
obj = Mock()
obj.attached = [self.app]
field = SlugOrPrimaryKeyRelatedField(many=True)
eq_(field.field_to_native(obj, 'attached'), [self.app.pk])
def test_render_as_slug(self):
obj = Mock()
obj.attached = self.app
field = SlugOrPrimaryKeyRelatedField(render_as='slug',
slug_field='app_slug')
eq_(field.field_to_native(obj, 'attached'), self.app.app_slug)
def test_render_as_slugs_many(self):
obj = Mock()
obj.attached = [self.app]
field = SlugOrPrimaryKeyRelatedField(render_as='slug',
slug_field='app_slug', many=True)
eq_(field.field_to_native(obj, 'attached'), [self.app.app_slug])
def test_parse_as_pk(self):
into = {}
field = SlugOrPrimaryKeyRelatedField(queryset=Webapp.objects.all())
field.field_from_native({'addon': self.app.pk}, None, 'addon', into)
eq_(into, {'addon': self.app})
def test_parse_as_pks_many(self):
app2 = app_factory()
into = {}
field = SlugOrPrimaryKeyRelatedField(queryset=Webapp.objects.all(),
many=True)
field.field_from_native({'apps': [self.app.pk, app2.pk]}, None,
'apps', into)
eq_(into, {'apps': [self.app, app2]})
def test_parse_as_slug(self):
into = {}
field = SlugOrPrimaryKeyRelatedField(queryset=Webapp.objects.all(),
slug_field='app_slug')
field.field_from_native({'app': self.app.app_slug}, None, 'app', into)
eq_(into, {'app': self.app})
def test_parse_as_slugs_many(self):
app2 = app_factory(app_slug='foo')
into = {}
field = SlugOrPrimaryKeyRelatedField(queryset=Webapp.objects.all(),
slug_field='app_slug', many=True)
field.field_from_native({'apps': [self.app.app_slug, app2.app_slug]},
None, 'apps', into)
eq_(into, {'apps': [self.app, app2]})
class TestSlugChoiceField(TestCase):
field_class = SlugChoiceField
def setUp(self):
super(TestSlugChoiceField, self).setUp()
self.factory = APIRequestFactory()
def field(self, **kwargs):
self.field = self.field_class(**kwargs)
return self.field
def test_to_native(self):
field = self.field(choices_dict=CARRIER_MAP)
eq_(field.to_native(1), 'telefonica')
def test_to_native_none(self):
field = self.field(choices_dict=CARRIER_MAP)
eq_(field.to_native(None), None)
def test_to_native_zero(self):
field = self.field(choices_dict=CARRIER_MAP)
eq_(field.to_native(0), 'carrierless')
class Spud(object):
pass
class Potato(object):
def __init__(self, spud):
self.spud = spud
class SpudSerializer(Serializer):
pass
class PotatoSerializer(Serializer):
spud = SplitField(CharField(), SpudSerializer())
class TestSplitField(TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
self.spud = Spud()
self.potato = Potato(self.spud)
self.serializer = PotatoSerializer(self.potato,
context={'request': self.request})
def test_initialize(self):
"""
Test that the request context is passed from PotatoSerializer's context
to the context of `PotatoSerializer.spud.output`.
"""
field = self.serializer.fields['spud']
eq_(self.request, field.output.context['request'],
self.serializer.context['request'])
ok_(not hasattr(field.input, 'context'))
class TestIntegerRangeField(TestCase):
field_class = IntegerRangeField
def setUp(self):
self.field = None
def set_field(self, min_value=None, max_value=None):
self.field = self.field_class(min_value=min_value, max_value=max_value)
def is_invalid(self, value):
with self.assertRaises(ValidationError):
self.field.to_python(value)
def is_valid(self, value):
eq_(value, self.field.to_python(value))
def test_min_value(self):
self.set_field(min_value=2)
self.is_invalid(1)
self.is_valid(2)
self.is_valid(3)
def test_max_value(self):
self.set_field(max_value=2)
self.is_valid(1)
self.is_valid(2)
self.is_invalid(3)
def test_min_max_value(self):
self.set_field(min_value=2, max_value=4)
self.is_invalid(1)
self.is_valid(2)
self.is_valid(3)
self.is_valid(4)
self.is_invalid(5)
| bsd-3-clause |
osstech-jp/samba | python/samba/tests/docs.py | 15 | 15827 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2012
#
# Tests for documentation.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for presence of documentation."""
import samba
import samba.tests
import os
import re
import subprocess
import xml.etree.ElementTree as ET
class TestCase(samba.tests.TestCaseInTempDir):
def _format_message(self, parameters, message):
parameters = list(parameters)
parameters = map(str, parameters)
parameters.sort()
return message + '\n\n %s' % ('\n '.join(parameters))
def get_documented_parameters(sourcedir):
path = os.path.join(sourcedir, "bin", "default", "docs-xml", "smbdotconf")
if not os.path.exists(os.path.join(path, "parameters.all.xml")):
raise Exception("Unable to find parameters.all.xml")
try:
p = open(os.path.join(path, "parameters.all.xml"), 'r')
except IOError, e:
raise Exception("Error opening parameters file")
out = p.read()
root = ET.fromstring(out)
for parameter in root:
name = parameter.attrib.get('name')
if parameter.attrib.get('removed') == "1":
continue
yield name
syn = parameter.findall('synonym')
if syn is not None:
for sy in syn:
yield sy.text
p.close()
def get_param_table_full(sourcedir, filename="lib/param/param_table_static.c"):
# Reading entries from source code
f = open(os.path.join(sourcedir, filename), "r")
try:
# burn through the preceding lines
while True:
l = f.readline()
if l.startswith("struct parm_struct parm_table"):
break
for l in f.readlines():
if re.match("^\s*\}\;\s*$", l):
# end of the table reached
break
if re.match("^\s*\{\s*$", l):
# start a new entry
_label = ""
_type = ""
_class = ""
_offset = ""
_special = ""
_enum_list = ""
_flags = ""
continue
if re.match("^\s*\},\s*$", l):
# finish the entry
yield _label, _type, _class, _offset, _special, _enum_list, _flags
continue
m = re.match("^\s*\.([^\s]+)\s*=\s*(.*),.*", l)
if not m:
continue
attrib = m.group(1)
value = m.group(2)
if attrib == "label":
_label = value
elif attrib == "type":
_type = value
elif attrib == "p_class":
_class = value
elif attrib == "offset":
_offset = value
elif attrib == "special":
_special = value
elif attrib == "enum_list":
_special = value
elif attrib == "flags":
_flags = value
finally:
f.close()
def get_documented_tuples(sourcedir, omit_no_default=True):
path = os.path.join(sourcedir, "bin", "default", "docs-xml", "smbdotconf")
if not os.path.exists(os.path.join(path, "parameters.all.xml")):
raise Exception("Unable to find parameters.all.xml")
try:
p = open(os.path.join(path, "parameters.all.xml"), 'r')
except IOError, e:
raise Exception("Error opening parameters file")
out = p.read()
root = ET.fromstring(out)
for parameter in root:
name = parameter.attrib.get("name")
param_type = parameter.attrib.get("type")
if parameter.attrib.get('removed') == "1":
continue
values = parameter.findall("value")
defaults = []
for value in values:
if value.attrib.get("type") == "default":
defaults.append(value)
default_text = None
if len(defaults) == 0:
if omit_no_default:
continue
elif len(defaults) > 1:
raise Exception("More than one default found for parameter %s" % name)
else:
default_text = defaults[0].text
if default_text is None:
default_text = ""
context = parameter.attrib.get("context")
yield name, default_text, context, param_type
p.close()
class SmbDotConfTests(TestCase):
# defines the cases where the defaults may differ from the documentation
special_cases = set(['log level', 'path', 'ldapsam:trusted', 'spoolss: architecture',
'share:fake_fscaps', 'ldapsam:editposix', 'rpc_daemon:DAEMON',
'rpc_server:SERVER', 'panic action', 'homedir map', 'NIS homedir',
'server string', 'netbios name', 'socket options', 'use mmap',
'ctdbd socket', 'printing', 'printcap name', 'queueresume command',
'queuepause command','lpresume command', 'lppause command',
'lprm command', 'lpq command', 'print command', 'template homedir',
'spoolss: os_major', 'spoolss: os_minor', 'spoolss: os_build',
'max open files', 'fss: prune stale', 'fss: sequence timeout'])
def setUp(self):
super(SmbDotConfTests, self).setUp()
# create a minimal smb.conf file for testparm
self.smbconf = os.path.join(self.tempdir, "paramtestsmb.conf")
f = open(self.smbconf, 'w')
try:
f.write("""
[test]
path = /
""")
finally:
f.close()
self.blankconf = os.path.join(self.tempdir, "emptytestsmb.conf")
f = open(self.blankconf, 'w')
try:
f.write("")
finally:
f.close()
self.topdir = os.path.abspath(samba.source_tree_topdir())
try:
self.documented = set(get_documented_parameters(self.topdir))
except:
self.fail("Unable to load documented parameters")
try:
self.table_gen = set(get_param_table_full(self.topdir,
"bin/default/lib/param/param_table_gen.c"))
except:
self.fail("Unable to load generated parameter table")
try:
self.defaults = set(get_documented_tuples(self.topdir))
except:
self.fail("Unable to load parameters")
try:
self.defaults_all = set(get_documented_tuples(self.topdir, False))
except:
self.fail("Unable to load parameters")
def tearDown(self):
super(SmbDotConfTests, self).tearDown()
os.unlink(self.smbconf)
os.unlink(self.blankconf)
def test_default_s3(self):
self._test_default(['bin/testparm'])
self._set_defaults(['bin/testparm'])
# registry shares appears to need sudo
self._set_arbitrary(['bin/testparm'],
exceptions = ['client lanman auth',
'client plaintext auth',
'registry shares',
'smb ports'])
self._test_empty(['bin/testparm'])
def test_default_s4(self):
self._test_default(['bin/samba-tool', 'testparm'])
self._set_defaults(['bin/samba-tool', 'testparm'])
self._set_arbitrary(['bin/samba-tool', 'testparm'],
exceptions = ['smb ports'])
self._test_empty(['bin/samba-tool', 'testparm'])
def _test_default(self, program):
failset = set()
count = 0
for tuples in self.defaults:
param, default, context, param_type = tuples
if param in self.special_cases:
continue
section = None
if context == "G":
section = "global"
elif context == "S":
section = "test"
else:
self.fail("%s has no valid context" % param)
p = subprocess.Popen(program + ["-s", self.smbconf,
"--section-name", section, "--parameter-name", param],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.topdir).communicate()
if p[0].upper().strip() != default.upper():
if not (p[0].upper().strip() == "" and default == '""'):
doc_triple = "%s\n Expected: %s" % (param, default)
failset.add("%s\n Got: %s" % (doc_triple, p[0].upper().strip()))
if len(failset) > 0:
self.fail(self._format_message(failset,
"Parameters that do not have matching defaults:"))
def _set_defaults(self, program):
failset = set()
count = 0
for tuples in self.defaults:
param, default, context, param_type = tuples
if param in ['printing']:
continue
section = None
if context == "G":
section = "global"
elif context == "S":
section = "test"
else:
self.fail("%s has no valid context" % param)
p = subprocess.Popen(program + ["-s", self.smbconf,
"--section-name", section, "--parameter-name", param,
"--option", "%s = %s" % (param, default)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.topdir).communicate()
if p[0].upper().strip() != default.upper():
if not (p[0].upper().strip() == "" and default == '""'):
doc_triple = "%s\n Expected: %s" % (param, default)
failset.add("%s\n Got: %s" % (doc_triple, p[0].upper().strip()))
if len(failset) > 0:
self.fail(self._format_message(failset,
"Parameters that do not have matching defaults:"))
def _set_arbitrary(self, program, exceptions=None):
arbitrary = {'string': 'string', 'boolean': 'yes', 'integer': '5',
'boolean-rev': 'yes',
'cmdlist': 'a b c',
'bytes': '10',
'octal': '0123',
'ustring': 'ustring',
'enum':'', 'boolean-auto': '', 'char': 'a', 'list': 'a, b, c'}
opposite_arbitrary = {'string': 'string2', 'boolean': 'no', 'integer': '6',
'boolean-rev': 'no',
'cmdlist': 'd e f',
'bytes': '11',
'octal': '0567',
'ustring': 'ustring2',
'enum':'', 'boolean-auto': '', 'char': 'b', 'list': 'd, e, f'}
failset = set()
count = 0
for tuples in self.defaults_all:
param, default, context, param_type = tuples
if param in ['printing', 'copy', 'include', 'log level']:
continue
# currently no easy way to set an arbitrary value for these
if param_type in ['enum', 'boolean-auto']:
continue
if exceptions is not None:
if param in exceptions:
continue
section = None
if context == "G":
section = "global"
elif context == "S":
section = "test"
else:
self.fail("%s has no valid context" % param)
value_to_use = arbitrary.get(param_type)
if value_to_use is None:
self.fail("%s has an invalid type" % param)
p = subprocess.Popen(program + ["-s", self.smbconf,
"--section-name", section, "--parameter-name", param,
"--option", "%s = %s" % (param, value_to_use)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.topdir).communicate()
if p[0].upper().strip() != value_to_use.upper():
# currently no way to distinguish command lists
if param_type == 'list':
if ", ".join(p[0].upper().strip().split()) == value_to_use.upper():
continue
# currently no way to identify octal
if param_type == 'integer':
try:
if int(value_to_use, 8) == int(p[0].strip(), 8):
continue
except:
pass
doc_triple = "%s\n Expected: %s" % (param, value_to_use)
failset.add("%s\n Got: %s" % (doc_triple, p[0].upper().strip()))
opposite_value = opposite_arbitrary.get(param_type)
tempconf = os.path.join(self.tempdir, "tempsmb.conf")
g = open(tempconf, 'w')
try:
towrite = section + "\n"
towrite += param + " = " + opposite_value
g.write(towrite)
finally:
g.close()
p = subprocess.Popen(program + ["-s", tempconf, "--suppress-prompt",
"--option", "%s = %s" % (param, value_to_use)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.topdir).communicate()
os.unlink(tempconf)
# testparm doesn't display a value if they are equivalent
if (value_to_use.lower() != opposite_value.lower()):
for line in p[0].splitlines():
if not line.strip().startswith(param):
continue
value_found = line.split("=")[1].upper().strip()
if value_found != value_to_use.upper():
# currently no way to distinguish command lists
if param_type == 'list':
if ", ".join(value_found.split()) == value_to_use.upper():
continue
# currently no way to identify octal
if param_type == 'integer':
try:
if int(value_to_use, 8) == int(value_found, 8):
continue
except:
pass
doc_triple = "%s\n Expected: %s" % (param, value_to_use)
failset.add("%s\n Got: %s" % (doc_triple, value_found))
if len(failset) > 0:
self.fail(self._format_message(failset,
"Parameters that were unexpectedly not set:"))
def _test_empty(self, program):
p = subprocess.Popen(program + ["-s", self.blankconf, "--suppress-prompt"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.topdir).communicate()
output = ""
for line in p[0].splitlines():
if line.strip().startswith('#'):
continue
if line.strip().startswith("idmap config *"):
continue
output += line.strip().lower() + '\n'
if output.strip() != '[global]' and output.strip() != '[globals]':
self.fail("Testparm returned unexpected output on an empty smb.conf.")
| gpl-3.0 |
Edraak/circleci-edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore.py | 23 | 108271 | """
Test split modulestore w/o using any django stuff.
"""
from mock import patch
import datetime
from importlib import import_module
from path import Path as path
import random
import re
import unittest
import uuid
import ddt
from contracts import contract
from nose.plugins.attrib import attr
from django.core.cache import caches, InvalidCacheBackendError
from openedx.core.lib import tempdir
from xblock.fields import Reference, ReferenceList, ReferenceValueDict
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import (
ItemNotFoundError, VersionConflictError,
DuplicateItemError, DuplicateCourseError,
InsufficientSpecificationError
)
from opaque_keys.edx.locator import CourseKey, CourseLocator, BlockUsageLocator, VersionTree, LocalId
from ccx_keys.locator import CCXBlockUsageLocator
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin
from xmodule.fields import Date, Timedelta
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.tests.test_modulestore import check_has_course_method
from xmodule.modulestore.split_mongo import BlockKey
from xmodule.modulestore.tests.factories import check_mongo_calls
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.utils import mock_tab_from_json
from xmodule.modulestore.edit_info import EditInfoMixin
BRANCH_NAME_DRAFT = ModuleStoreEnum.BranchName.draft
BRANCH_NAME_PUBLISHED = ModuleStoreEnum.BranchName.published
@attr('mongo')
class SplitModuleTest(unittest.TestCase):
'''
The base set of tests manually populates a db w/ courses which have
versions. It creates unique collection names and removes them after all
tests finish.
'''
# Snippets of what would be in the django settings envs file
DOC_STORE_CONFIG = {
'host': MONGO_HOST,
'db': 'test_xmodule',
'port': MONGO_PORT_NUM,
'collection': 'modulestore{0}'.format(uuid.uuid4().hex[:5]),
}
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': tempdir.mkdtemp_clean(),
'xblock_mixins': (InheritanceMixin, XModuleMixin, EditInfoMixin)
}
MODULESTORE = {
'ENGINE': 'xmodule.modulestore.split_mongo.split.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
# don't create django dependency; so, duplicates common.py in envs
match = re.search(r'(.*?/common)(?:$|/)', path(__file__))
COMMON_ROOT = match.group(1)
modulestore = None
_date_field = Date()
_time_delta_field = Timedelta()
COURSE_CONTENT = {
"testx.GreekHero": {
"org": "testx",
"course": "GreekHero",
"run": "run",
"root_block_id": "head12345",
"user_id": "[email protected]",
"fields": {
"tabs": [
{
"type": "courseware"
},
{
"type": "course_info",
"name": "Course Info"
},
{
"type": "discussion",
"name": "Discussion"
},
{
"type": "wiki",
"name": "Wiki"
}
],
"start": _date_field.from_json("2013-02-14T05:00"),
"display_name": "The Ancient Greek Hero",
"grading_policy": {
"GRADER": [
{
"min_count": 5,
"weight": 0.15,
"type": "Homework",
"drop_count": 1,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 2,
"type": "Lab",
"drop_count": 0,
"weight": 0.15
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.3
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.75
},
},
},
"revisions": [
{
"user_id": "[email protected]",
"update": {
("course", "head12345"): {
"end": _date_field.from_json("2013-04-13T04:30"),
"tabs": [
{
"type": "courseware"
},
{
"type": "course_info",
"name": "Course Info"
},
{
"type": "discussion",
"name": "Discussion"
},
{
"type": "wiki",
"name": "Wiki"
},
{
"type": "static_tab",
"name": "Syllabus",
"url_slug": "01356a17b5924b17a04b7fc2426a3798"
},
{
"type": "static_tab",
"name": "Advice for Students",
"url_slug": "57e9991c0d794ff58f7defae3e042e39"
}
],
"graceperiod": _time_delta_field.from_json("2 hours 0 minutes 0 seconds"),
"grading_policy": {
"GRADER": [
{
"min_count": 5,
"weight": 0.15,
"type": "Homework",
"drop_count": 1,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 12,
"type": "Lab",
"drop_count": 2,
"weight": 0.15
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.3
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.55
}
},
}
}
},
{
"user_id": "[email protected]",
"update": {
("course", "head12345"): {
"end": _date_field.from_json("2013-06-13T04:30"),
"grading_policy": {
"GRADER": [
{
"min_count": 4,
"weight": 0.15,
"type": "Homework",
"drop_count": 2,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 12,
"type": "Lab",
"drop_count": 2,
"weight": 0.15
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.3
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.45
}
},
"enrollment_start": _date_field.from_json("2013-01-01T05:00"),
"enrollment_end": _date_field.from_json("2013-03-02T05:00"),
"advertised_start": "Fall 2013",
}
},
"create": [
{
"id": "chapter1",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hercules"
},
},
{
"id": "chapter2",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hera heckles Hercules"
},
},
{
"id": "chapter3",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hera cuckolds Zeus"
},
},
{
"id": "problem1",
"parent": "chapter3",
"parent_type": "chapter",
"category": "problem",
"fields": {
"display_name": "Problem 3.1",
"graceperiod": _time_delta_field.from_json("4 hours 0 minutes 0 seconds"),
},
},
{
"id": "problem3_2",
"parent": "chapter3",
"parent_type": "chapter",
"category": "problem",
"fields": {
"display_name": "Problem 3.2"
},
},
{
"id": "problem32",
"parent": "chapter3",
"parent_type": "chapter",
"category": "problem",
"fields": {
"display_name": "Problem 3.3",
"group_access": {"3": ["33"]},
},
}
]
},
]
},
"testx.wonderful": {
"org": "testx",
"course": "wonderful",
"run": "run",
"root_block_id": "head23456",
"user_id": "[email protected]",
"fields": {
"tabs": [
{
"type": "courseware"
},
{
"type": "course_info",
"name": "Course Info"
},
{
"type": "discussion",
"name": "Discussion"
},
{
"type": "wiki",
"name": "Wiki"
}
],
"start": _date_field.from_json("2013-02-14T05:00"),
"display_name": "A wonderful course",
"grading_policy": {
"GRADER": [
{
"min_count": 14,
"weight": 0.25,
"type": "Homework",
"drop_count": 1,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 12,
"type": "Lab",
"drop_count": 2,
"weight": 0.25
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.2
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.3
}
],
"GRADE_CUTOFFS": {
"Pass": 0.95
}
},
},
"revisions": [
{
"user_id": "[email protected]",
"update": {
("course", "head23456"): {
"display_name": "The most wonderful course",
"grading_policy": {
"GRADER": [
{
"min_count": 14,
"weight": 0.25,
"type": "Homework",
"drop_count": 1,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 12,
"type": "Lab",
"drop_count": 2,
"weight": 0.25
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.2
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.3
}
],
"GRADE_CUTOFFS": {
"Pass": 0.45
}
},
}
}
}
]
},
"guestx.contender": {
"org": "guestx",
"course": "contender",
"run": "run",
"root_block_id": "head345679",
"user_id": "[email protected]",
"fields": {
"tabs": [
{
"type": "courseware"
},
{
"type": "course_info",
"name": "Course Info"
},
{
"type": "discussion",
"name": "Discussion"
},
{
"type": "wiki",
"name": "Wiki"
}
],
"start": _date_field.from_json("2013-03-14T05:00"),
"display_name": "Yet another contender",
"grading_policy": {
"GRADER": [
{
"min_count": 4,
"weight": 0.25,
"type": "Homework",
"drop_count": 0,
"short_label": "HW"
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.4
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.35
}
],
"GRADE_CUTOFFS": {
"Pass": 0.25
}
},
}
},
}
@staticmethod
def bootstrapDB(split_store): # pylint: disable=invalid-name
'''
Sets up the initial data into the db
'''
for _course_id, course_spec in SplitModuleTest.COURSE_CONTENT.iteritems():
course = split_store.create_course(
course_spec['org'],
course_spec['course'],
course_spec['run'],
course_spec['user_id'],
master_branch=BRANCH_NAME_DRAFT,
fields=course_spec['fields'],
root_block_id=course_spec['root_block_id']
)
for revision in course_spec.get('revisions', []):
for (block_type, block_id), fields in revision.get('update', {}).iteritems():
# cheat since course is most frequent
if course.location.block_id == block_id:
block = course
else:
# not easy to figure out the category but get_item won't care
block_usage = BlockUsageLocator.make_relative(course.location, block_type, block_id)
block = split_store.get_item(block_usage)
for key, value in fields.iteritems():
setattr(block, key, value)
# create new blocks into dag: parent must already exist; thus, order is important
new_ele_dict = {}
for spec in revision.get('create', []):
if spec['parent'] in new_ele_dict:
parent = new_ele_dict.get(spec['parent'])
elif spec['parent'] == course.location.block_id:
parent = course
else:
block_usage = BlockUsageLocator.make_relative(course.location, spec['parent_type'], spec['parent'])
parent = split_store.get_item(block_usage)
block_id = LocalId(spec['id'])
child = split_store.create_xblock(
course.runtime, course.id, spec['category'], block_id, spec['fields'], parent_xblock=parent
)
new_ele_dict[spec['id']] = child
course = split_store.persist_xblock_dag(course, revision['user_id'])
# publish "testx.wonderful"
source_course = CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_DRAFT)
to_publish = BlockUsageLocator(
source_course,
block_type='course',
block_id="head23456"
)
destination = CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_PUBLISHED)
split_store.copy("[email protected]", source_course, destination, [to_publish], None)
def setUp(self):
super(SplitModuleTest, self).setUp()
self.user_id = random.getrandbits(32)
def tearDown(self):
"""
Clear persistence between each test.
"""
collection_prefix = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['collection'] + '.'
if SplitModuleTest.modulestore:
for collection in ('active_versions', 'structures', 'definitions'):
modulestore().db.drop_collection(collection_prefix + collection)
# drop the modulestore to force re init
SplitModuleTest.modulestore = None
super(SplitModuleTest, self).tearDown()
def findByIdInResult(self, collection, _id): # pylint: disable=invalid-name
"""
Result is a collection of descriptors. Find the one whose block id
matches the _id.
"""
for element in collection:
if element.location.block_id == _id:
return element
class TestHasChildrenAtDepth(SplitModuleTest):
"""Test the has_children_at_depth method of XModuleMixin. """
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_has_children_at_depth(self, _from_json):
course_locator = CourseLocator(
org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT
)
block_locator = BlockUsageLocator(
course_locator, 'course', 'head12345'
)
block = modulestore().get_item(block_locator)
self.assertRaises(
ValueError, block.has_children_at_depth, -1,
)
self.assertTrue(block.has_children_at_depth(0))
self.assertTrue(block.has_children_at_depth(1))
self.assertFalse(block.has_children_at_depth(2))
ch1 = modulestore().get_item(
BlockUsageLocator(course_locator, 'chapter', block_id='chapter1')
)
self.assertFalse(ch1.has_children_at_depth(0))
ch2 = modulestore().get_item(
BlockUsageLocator(course_locator, 'chapter', block_id='chapter2')
)
self.assertFalse(ch2.has_children_at_depth(0))
ch3 = modulestore().get_item(
BlockUsageLocator(course_locator, 'chapter', block_id='chapter3')
)
self.assertTrue(ch3.has_children_at_depth(0))
self.assertFalse(ch3.has_children_at_depth(1))
@ddt.ddt
class SplitModuleCourseTests(SplitModuleTest):
'''
Course CRUD operation tests
'''
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_courses(self, _from_json):
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
# should have gotten 3 draft courses
self.assertEqual(len(courses), 3, "Wrong number of courses")
# check metadata -- NOTE no promised order
course = self.findByIdInResult(courses, "head12345")
self.assertEqual(course.location.org, "testx")
self.assertEqual(course.category, 'course', 'wrong category')
self.assertEqual(len(course.tabs), 6, "wrong number of tabs")
self.assertEqual(
course.display_name, "The Ancient Greek Hero",
"wrong display name"
)
self.assertEqual(
course.advertised_start, "Fall 2013",
"advertised_start"
)
self.assertEqual(len(course.children), 3, "children")
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "[email protected]")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_courses_with_same_course_index(self, _from_json):
"""
Test that if two courses pointing to same course index,
get_courses should return both.
"""
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
# Should have gotten 3 draft courses.
self.assertEqual(len(courses), 3)
course_index = modulestore().get_course_index_info(courses[0].id)
# Creating a new course with same course index of another course.
new_draft_course = modulestore().create_course(
'testX', 'rerun_2.0', 'run_q2', 1, BRANCH_NAME_DRAFT, versions_dict=course_index['versions']
)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
# Should have gotten 4 draft courses.
self.assertEqual(len(courses), 4)
self.assertIn(new_draft_course.id.version_agnostic(), [c.id for c in courses])
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_org_courses(self, _from_json):
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='guestx')
# should have gotten 1 draft courses
self.assertEqual(len(courses), 1)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='testx')
# should have gotten 2 draft courses
self.assertEqual(len(courses), 2)
# although this is already covered in other tests, let's
# also not pass in org= parameter to make sure we get back
# 3 courses
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
self.assertEqual(len(courses), 3)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_branch_requests(self, _from_json):
# query w/ branch qualifier (both draft and published)
def _verify_published_course(courses_published):
""" Helper function for verifying published course. """
self.assertEqual(len(courses_published), 1, len(courses_published))
course = self.findByIdInResult(courses_published, "head23456")
self.assertIsNotNone(course, "published courses")
self.assertEqual(course.location.course_key.org, "testx")
self.assertEqual(course.location.course_key.course, "wonderful")
self.assertEqual(course.category, 'course', 'wrong category')
self.assertEqual(len(course.tabs), 4, "wrong number of tabs")
self.assertEqual(course.display_name, "The most wonderful course",
course.display_name)
self.assertIsNone(course.advertised_start)
self.assertEqual(len(course.children), 0,
"children")
_verify_published_course(modulestore().get_courses(branch=BRANCH_NAME_PUBLISHED))
def test_has_course(self):
'''
Test the various calling forms for has_course
'''
check_has_course_method(
modulestore(),
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT),
locator_key_fields=['org', 'course', 'run']
)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_course(self, _from_json):
'''
Test the various calling forms for get_course
'''
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
head_course = modulestore().get_course(locator)
self.assertNotEqual(head_course.location.version_guid, head_course.previous_version)
locator = CourseLocator(version_guid=head_course.previous_version)
course = modulestore().get_course(locator)
self.assertIsNone(course.location.course_key.org)
self.assertEqual(course.location.version_guid, head_course.previous_version)
self.assertEqual(course.category, 'course')
self.assertEqual(len(course.tabs), 6)
self.assertEqual(course.display_name, "The Ancient Greek Hero")
self.assertEqual(course.graceperiod, datetime.timedelta(hours=2))
self.assertIsNone(course.advertised_start)
self.assertEqual(len(course.children), 0)
self.assertNotEqual(course.definition_locator.definition_id, head_course.definition_locator.definition_id)
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "[email protected]")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.55})
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
self.assertEqual(course.location.course_key.org, "testx")
self.assertEqual(course.location.course_key.course, "GreekHero")
self.assertEqual(course.location.course_key.run, "run")
self.assertEqual(course.category, 'course')
self.assertEqual(len(course.tabs), 6)
self.assertEqual(course.display_name, "The Ancient Greek Hero")
self.assertEqual(course.advertised_start, "Fall 2013")
self.assertEqual(len(course.children), 3)
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "[email protected]")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
locator = CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_PUBLISHED)
course = modulestore().get_course(locator)
published_version = course.location.version_guid
locator = CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
self.assertNotEqual(course.location.version_guid, published_version)
def test_get_course_negative(self):
# Now negative testing
with self.assertRaises(InsufficientSpecificationError):
modulestore().get_course(CourseLocator(org='edu', course='meh', run='blah'))
with self.assertRaises(ItemNotFoundError):
modulestore().get_course(CourseLocator(org='edu', course='nosuchthing', run="run", branch=BRANCH_NAME_DRAFT))
with self.assertRaises(ItemNotFoundError):
modulestore().get_course(CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_PUBLISHED))
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_cache(self, _from_json):
"""
Test that the mechanics of caching work.
"""
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
block_map = modulestore().cache_items(
course.system, [BlockKey.from_usage_key(child) for child in course.children], course.id, depth=3
)
self.assertIn(BlockKey('chapter', 'chapter1'), block_map)
self.assertIn(BlockKey('problem', 'problem3_2'), block_map)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_course_successors(self, _from_json):
"""
get_course_successors(course_locator, version_history_depth=1)
"""
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
versions = [course.location.version_guid, course.previous_version]
locator = CourseLocator(version_guid=course.previous_version)
course = modulestore().get_course(locator)
versions.append(course.previous_version)
locator = CourseLocator(version_guid=course.previous_version)
result = modulestore().get_course_successors(locator)
self.assertIsInstance(result, VersionTree)
self.assertIsNone(result.locator.org)
self.assertEqual(result.locator.version_guid, versions[-1])
self.assertEqual(len(result.children), 1)
self.assertEqual(result.children[0].locator.version_guid, versions[-2])
self.assertEqual(len(result.children[0].children), 0, "descended more than one level")
result = modulestore().get_course_successors(locator, version_history_depth=2)
self.assertEqual(len(result.children), 1)
self.assertEqual(result.children[0].locator.version_guid, versions[-2])
self.assertEqual(len(result.children[0].children), 1)
result = modulestore().get_course_successors(locator, version_history_depth=99)
self.assertEqual(len(result.children), 1)
self.assertEqual(result.children[0].locator.version_guid, versions[-2])
self.assertEqual(len(result.children[0].children), 1)
self.assertEqual(result.children[0].children[0].locator.version_guid, versions[0])
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_persist_dag(self, _from_json):
"""
try saving temporary xblocks
"""
test_course = modulestore().create_course(
course='course', run='2014', org='testx',
display_name='fun test course', user_id='testbot',
master_branch=ModuleStoreEnum.BranchName.draft
)
test_chapter = modulestore().create_xblock(
test_course.system, test_course.id, 'chapter', fields={'display_name': 'chapter n'},
parent_xblock=test_course
)
self.assertEqual(test_chapter.display_name, 'chapter n')
test_def_content = '<problem>boo</problem>'
# create child
new_block = modulestore().create_xblock(
test_course.system, test_course.id,
'problem',
fields={
'data': test_def_content,
'display_name': 'problem'
},
parent_xblock=test_chapter
)
self.assertIsNotNone(new_block.definition_locator)
self.assertTrue(isinstance(new_block.definition_locator.definition_id, LocalId))
# better to pass in persisted parent over the subdag so
# subdag gets the parent pointer (otherwise 2 ops, persist dag, update parent children,
# persist parent
persisted_course = modulestore().persist_xblock_dag(test_course, 'testbot')
self.assertEqual(len(persisted_course.children), 1)
persisted_chapter = persisted_course.get_children()[0]
self.assertEqual(persisted_chapter.category, 'chapter')
self.assertEqual(persisted_chapter.display_name, 'chapter n')
self.assertEqual(len(persisted_chapter.children), 1)
persisted_problem = persisted_chapter.get_children()[0]
self.assertEqual(persisted_problem.category, 'problem')
self.assertEqual(persisted_problem.data, test_def_content)
# update it
persisted_problem.display_name = 'altered problem'
persisted_problem = modulestore().update_item(persisted_problem, 'testbot')
self.assertEqual(persisted_problem.display_name, 'altered problem')
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_block_generations(self, _from_json):
"""
Test get_block_generations
"""
test_course = modulestore().create_course(
org='edu.harvard',
course='history',
run='hist101',
display_name='history test course',
user_id='testbot',
master_branch=ModuleStoreEnum.BranchName.draft
)
chapter = modulestore().create_child(
None, test_course.location,
block_type='chapter',
block_id='chapter1',
fields={'display_name': 'chapter 1'}
)
sub = modulestore().create_child(
None, chapter.location,
block_type='vertical',
block_id='subsection1',
fields={'display_name': 'subsection 1'}
)
first_problem = modulestore().create_child(
None, sub.location,
block_type='problem',
block_id='problem1',
fields={'display_name': 'problem 1', 'data': '<problem></problem>'}
)
first_problem.max_attempts = 3
first_problem.save() # decache the above into the kvs
updated_problem = modulestore().update_item(first_problem, 'testbot')
self.assertIsNotNone(updated_problem.previous_version)
self.assertEqual(updated_problem.previous_version, first_problem.update_version)
self.assertNotEqual(updated_problem.update_version, first_problem.update_version)
modulestore().delete_item(updated_problem.location, 'testbot')
second_problem = modulestore().create_child(
None, sub.location.version_agnostic(),
block_type='problem',
block_id='problem2',
fields={'display_name': 'problem 2', 'data': '<problem></problem>'}
)
# The draft course root has 2 revisions: the published revision, and then the subsequent
# changes to the draft revision
version_history = modulestore().get_block_generations(test_course.location)
self.assertIsNotNone(version_history)
self.assertEqual(version_history.locator.version_guid, test_course.location.version_guid)
self.assertEqual(len(version_history.children), 1)
self.assertEqual(version_history.children[0].children, [])
self.assertEqual(version_history.children[0].locator.version_guid, chapter.location.version_guid)
# sub changed on add, add problem, delete problem, add problem in strict linear seq
version_history = modulestore().get_block_generations(sub.location)
self.assertEqual(len(version_history.children), 1)
self.assertEqual(len(version_history.children[0].children), 1)
self.assertEqual(len(version_history.children[0].children[0].children), 1)
self.assertEqual(len(version_history.children[0].children[0].children[0].children), 0)
# first and second problem may show as same usage_id; so, need to ensure their histories are right
version_history = modulestore().get_block_generations(updated_problem.location)
self.assertEqual(version_history.locator.version_guid, first_problem.location.version_guid)
self.assertEqual(len(version_history.children), 1) # updated max_attempts
self.assertEqual(len(version_history.children[0].children), 0)
version_history = modulestore().get_block_generations(second_problem.location)
self.assertNotEqual(version_history.locator.version_guid, first_problem.location.version_guid)
@ddt.data(
("course-v1:edx+test_course+test_run", BlockUsageLocator),
("ccx-v1:edX+test_course+test_run+ccx@1", CCXBlockUsageLocator),
)
@ddt.unpack
def test_make_course_usage_key(self, course_id, root_block_cls):
"""Test that we get back the appropriate usage key for the root of a course key.
In particular, we want to make sure that it properly handles CCX courses.
"""
course_key = CourseKey.from_string(course_id)
root_block_key = modulestore().make_course_usage_key(course_key)
self.assertIsInstance(root_block_key, root_block_cls)
self.assertEqual(root_block_key.block_type, "course")
self.assertEqual(root_block_key.name, "course")
class TestCourseStructureCache(SplitModuleTest):
"""Tests for the CourseStructureCache"""
def setUp(self):
# use the default cache, since the `course_structure_cache`
# is a dummy cache during testing
self.cache = caches['default']
# make sure we clear the cache before every test...
self.cache.clear()
# ... and after
self.addCleanup(self.cache.clear)
# make a new course:
self.user = random.getrandbits(32)
self.new_course = modulestore().create_course(
'org', 'course', 'test_run', self.user, BRANCH_NAME_DRAFT,
)
super(TestCourseStructureCache, self).setUp()
@patch('xmodule.modulestore.split_mongo.mongo_connection.get_cache')
def test_course_structure_cache(self, mock_get_cache):
# force get_cache to return the default cache so we can test
# its caching behavior
mock_get_cache.return_value = self.cache
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# when cache is warmed, we should have one fewer mongo call
with check_mongo_calls(0):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
self.assertEqual(cached_structure, not_cached_structure)
@patch('xmodule.modulestore.split_mongo.mongo_connection.get_cache')
def test_course_structure_cache_no_cache_configured(self, mock_get_cache):
mock_get_cache.side_effect = InvalidCacheBackendError
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# if the cache isn't configured, we expect to have to make
# another mongo call here if we want the same course structure
with check_mongo_calls(1):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
self.assertEqual(cached_structure, not_cached_structure)
def test_dummy_cache(self):
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# Since the test is using the dummy cache, it's not actually caching
# anything
with check_mongo_calls(1):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
self.assertEqual(cached_structure, not_cached_structure)
def _get_structure(self, course):
"""
Helper function to get a structure from a course.
"""
return modulestore().db_connection.get_structure(
course.location.as_object_id(course.location.version_guid)
)
class SplitModuleItemTests(SplitModuleTest):
'''
Item read tests including inheritance
'''
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_has_item(self, _from_json):
'''
has_item(BlockUsageLocator)
'''
org = 'testx'
course = 'GreekHero'
run = 'run'
course_locator = CourseLocator(org=org, course=course, run=run, branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(course_locator)
previous_version = course.previous_version
# positive tests of various forms
locator = course.location.map_into_course(CourseLocator(version_guid=previous_version))
self.assertTrue(
modulestore().has_item(locator), "couldn't find in %s" % previous_version
)
locator = course.location.version_agnostic()
self.assertTrue(
modulestore().has_item(locator),
)
self.assertFalse(
modulestore().has_item(
BlockUsageLocator(
locator.course_key.for_branch(BRANCH_NAME_PUBLISHED),
block_type=locator.block_type,
block_id=locator.block_id
)
),
"found in published head"
)
# not a course obj
locator = BlockUsageLocator(course_locator, block_type='chapter', block_id='chapter1')
self.assertTrue(
modulestore().has_item(locator),
"couldn't find chapter1"
)
# in published course
locator = BlockUsageLocator(
CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_DRAFT),
block_type="course",
block_id="head23456"
)
self.assertTrue(
modulestore().has_item(locator.for_branch(BRANCH_NAME_PUBLISHED))
)
def test_negative_has_item(self):
# negative tests--not found
# no such course or block
locator = BlockUsageLocator(
CourseLocator(org="foo", course="doesnotexist", run="run", branch=BRANCH_NAME_DRAFT),
block_type="course",
block_id="head23456"
)
self.assertFalse(modulestore().has_item(locator))
locator = BlockUsageLocator(
CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_DRAFT),
block_type="vertical",
block_id="doesnotexist"
)
self.assertFalse(modulestore().has_item(locator))
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_item(self, _from_json):
'''
get_item(blocklocator)
'''
hero_locator = CourseLocator(org="testx", course="GreekHero", run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(hero_locator)
previous_version = course.previous_version
# positive tests of various forms
locator = course.location.map_into_course(CourseLocator(version_guid=previous_version))
block = modulestore().get_item(locator)
self.assertIsInstance(block, CourseDescriptor)
self.assertIsInstance(modulestore().get_item(locator), CourseDescriptor)
def verify_greek_hero(block):
"""
Check contents of block
"""
self.assertEqual(block.location.org, "testx")
self.assertEqual(block.location.course, "GreekHero")
self.assertEqual(block.location.run, "run")
self.assertEqual(len(block.tabs), 6, "wrong number of tabs")
self.assertEqual(block.display_name, "The Ancient Greek Hero")
self.assertEqual(block.advertised_start, "Fall 2013")
self.assertEqual(len(block.children), 3)
# check dates and graders--forces loading of descriptor
self.assertEqual(block.edited_by, "[email protected]")
self.assertDictEqual(
block.grade_cutoffs, {"Pass": 0.45},
)
verify_greek_hero(modulestore().get_item(course.location))
# try to look up other branches
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(course.location.for_branch(BRANCH_NAME_PUBLISHED))
def test_get_non_root(self):
# not a course obj
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'chapter', 'chapter1'
)
block = modulestore().get_item(locator)
self.assertEqual(block.location.org, "testx")
self.assertEqual(block.location.course, "GreekHero")
self.assertEqual(block.category, 'chapter')
self.assertEqual(block.display_name, "Hercules")
self.assertEqual(block.edited_by, "[email protected]")
# in published course
locator = BlockUsageLocator(
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_PUBLISHED), 'course', 'head23456'
)
self.assertIsInstance(
modulestore().get_item(locator),
CourseDescriptor
)
# negative tests--not found
# no such course or block
locator = BlockUsageLocator(
CourseLocator(org='doesnotexist', course='doesnotexist', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head23456'
)
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
locator = BlockUsageLocator(
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT), 'html', 'doesnotexist'
)
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
# pylint: disable=protected-access
def test_matching(self):
'''
test the block and value matches help functions
'''
self.assertTrue(modulestore()._value_matches('help', 'help'))
self.assertFalse(modulestore()._value_matches('help', 'Help'))
self.assertTrue(modulestore()._value_matches(['distract', 'help', 'notme'], 'help'))
self.assertFalse(modulestore()._value_matches(['distract', 'Help', 'notme'], 'help'))
self.assertFalse(modulestore()._block_matches({'field': ['distract', 'Help', 'notme']}, {'field': 'help'}))
self.assertTrue(modulestore()._block_matches(
{'field': ['distract', 'help', 'notme'],
'irrelevant': 2},
{'field': 'help'}))
self.assertTrue(modulestore()._value_matches('I need some help', re.compile(r'help')))
self.assertTrue(modulestore()._value_matches(['I need some help', 'today'], re.compile(r'help')))
self.assertFalse(modulestore()._value_matches('I need some help', re.compile(r'Help')))
self.assertTrue(modulestore()._value_matches(['I need some help', 'today'], re.compile(r'Help', re.IGNORECASE)))
self.assertTrue(modulestore()._value_matches('gotcha', {'$in': ['a', 'bunch', 'of', 'gotcha']}))
self.assertFalse(modulestore()._value_matches('gotcha', {'$in': ['a', 'bunch', 'of', 'gotchas']}))
self.assertFalse(modulestore()._value_matches('gotcha', {'$nin': ['a', 'bunch', 'of', 'gotcha']}))
self.assertTrue(modulestore()._value_matches('gotcha', {'$nin': ['a', 'bunch', 'of', 'gotchas']}))
self.assertTrue(modulestore()._block_matches({'group_access': {'1': [1]}}, {'group_access': {'$exists': True}}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'group_access': {'$exists': False}}))
self.assertTrue(modulestore()._block_matches(
{'a': 1, 'group_access': {'1': [1]}},
{'a': 1, 'group_access': {'$exists': True}}))
self.assertFalse(modulestore()._block_matches(
{'a': 1, 'group_access': {'1': [1]}},
{'a': 111, 'group_access': {'$exists': True}}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1, 'group_access': {'$exists': False}}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 9, 'group_access': {'$exists': False}}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 2}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'c': 1}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1, 'c': 1}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': lambda i: 0 < i < 2}))
def test_get_items(self):
'''
get_items(locator, qualifiers, [branch])
'''
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
# get all modules
matches = modulestore().get_items(locator)
self.assertEqual(len(matches), 7)
matches = modulestore().get_items(locator)
self.assertEqual(len(matches), 7)
matches = modulestore().get_items(locator, qualifiers={'category': 'chapter'})
self.assertEqual(len(matches), 3)
matches = modulestore().get_items(locator, qualifiers={'category': 'garbage'})
self.assertEqual(len(matches), 0)
matches = modulestore().get_items(
locator,
qualifiers={'category': 'chapter'},
settings={'display_name': re.compile(r'Hera')},
)
self.assertEqual(len(matches), 2)
matches = modulestore().get_items(locator, settings={'group_access': {'$exists': True}})
self.assertEqual(len(matches), 1)
matches = modulestore().get_items(locator, settings={'group_access': {'$exists': False}})
self.assertEqual(len(matches), 6)
def test_get_parents(self):
'''
get_parent_location(locator): BlockUsageLocator
'''
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT),
'chapter', block_id='chapter1'
)
parent = modulestore().get_parent_location(locator)
self.assertIsNotNone(parent)
self.assertEqual(parent.block_id, 'head12345')
self.assertEqual(parent.org, "testx")
self.assertEqual(parent.course, "GreekHero")
locator = locator.course_key.make_usage_key('chapter', 'chapter2')
parent = modulestore().get_parent_location(locator)
self.assertIsNotNone(parent)
self.assertEqual(parent.block_id, 'head12345')
locator = locator.course_key.make_usage_key('garbage', 'nosuchblock')
parent = modulestore().get_parent_location(locator)
self.assertIsNone(parent)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_children(self, _from_json):
"""
Test the existing get_children method on xdescriptors
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head12345'
)
block = modulestore().get_item(locator)
children = block.get_children()
expected_ids = [
"chapter1", "chapter2", "chapter3"
]
for child in children:
self.assertEqual(child.category, "chapter")
self.assertIn(child.location.block_id, expected_ids)
expected_ids.remove(child.location.block_id)
self.assertEqual(len(expected_ids), 0)
def version_agnostic(children):
"""
children: list of descriptors
Returns the `children` list with each member version-agnostic
"""
return [child.version_agnostic() for child in children]
class TestItemCrud(SplitModuleTest):
"""
Test create update and delete of items
"""
# DHM do I need to test this case which I believe won't work:
# 1) fetch a course and some of its blocks
# 2) do a series of CRUD operations on those previously fetched elements
# The problem here will be that the version_guid of the items will be the version at time of fetch.
# Each separate save will change the head version; so, the 2nd piecemeal change will flag the version
# conflict. That is, if versions are v0..vn and start as v0 in initial fetch, the first CRUD op will
# say it's changing an object from v0, splitMongo will process it and make the current head v1, the next
# crud op will pass in its v0 element and splitMongo will flag the version conflict.
# What I don't know is how realistic this test is and whether to wrap the modulestore with a higher level
# transactional operation which manages the version change or make the threading cache reason out whether or
# not the changes are independent and additive and thus non-conflicting.
# A use case I expect is
# (client) change this metadata
# (server) done, here's the new info which, btw, updates the course version to v1
# (client) add these children to this other node (which says it came from v0 or
# will the client have refreshed the version before doing the op?)
# In this case, having a server side transactional model won't help b/c the bug is a long-transaction on the
# on the client where it would be a mistake for the server to assume anything about client consistency. The best
# the server could do would be to see if the parent's children changed at all since v0.
def test_create_minimal_item(self):
"""
create_item(user, location, category, definition_locator=None, fields): new_desciptor
"""
# grab link to course to ensure new versioning works
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
premod_course = modulestore().get_course(locator)
premod_history = modulestore().get_course_history_info(locator)
# add minimal one w/o a parent
category = 'sequential'
new_module = modulestore().create_item(
'user123', locator, category,
fields={'display_name': 'new sequential'}
)
# check that course version changed and course's previous is the other one
self.assertEqual(new_module.location.course, "GreekHero")
self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)
self.assertIsNone(locator.version_guid, "Version inadvertently filled in")
current_course = modulestore().get_course(locator)
self.assertEqual(new_module.location.version_guid, current_course.location.version_guid)
history_info = modulestore().get_course_history_info(current_course.location.course_key)
self.assertEqual(history_info['previous_version'], premod_course.location.version_guid)
self.assertEqual(history_info['original_version'], premod_history['original_version'])
self.assertEqual(history_info['edited_by'], "user123")
# check block's info: category, definition_locator, and display_name
self.assertEqual(new_module.category, 'sequential')
self.assertIsNotNone(new_module.definition_locator)
self.assertEqual(new_module.display_name, 'new sequential')
# check that block does not exist in previous version
locator = new_module.location.map_into_course(
CourseLocator(version_guid=premod_course.location.version_guid)
)
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
def test_create_parented_item(self):
"""
Test create_item w/ specifying the parent of the new item
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT),
'chapter', block_id='chapter2'
)
original = modulestore().get_item(locator)
locator = BlockUsageLocator(
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head23456'
)
premod_course = modulestore().get_course(locator.course_key)
category = 'chapter'
new_module = modulestore().create_child(
'user123', locator, category,
fields={'display_name': 'new chapter'},
definition_locator=original.definition_locator
)
# check that course version changed and course's previous is the other one
self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)
parent = modulestore().get_item(locator)
self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))
self.assertEqual(new_module.definition_locator.definition_id, original.definition_locator.definition_id)
def test_unique_naming(self):
"""
Check that 2 modules of same type get unique block_ids. Also check that if creation provides
a definition id and new def data that it branches the definition in the db.
Actually, this tries to test all create_item features not tested above.
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT),
'problem', block_id='problem1'
)
original = modulestore().get_item(locator)
locator = BlockUsageLocator(
CourseLocator(org='guestx', course='contender', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head345679'
)
category = 'problem'
new_payload = "<problem>empty</problem>"
new_module = modulestore().create_child(
'anotheruser', locator, category,
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
another_module = modulestore().create_child(
'anotheruser', locator, category,
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=original.definition_locator,
)
# check that course version changed and course's previous is the other one
parent = modulestore().get_item(locator)
self.assertNotEqual(new_module.location.block_id, another_module.location.block_id)
self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))
self.assertIn(another_module.location.version_agnostic(), version_agnostic(parent.children))
self.assertEqual(new_module.data, new_payload)
self.assertEqual(another_module.data, another_payload)
# check definition histories
new_history = modulestore().get_definition_history_info(new_module.definition_locator)
self.assertIsNone(new_history['previous_version'])
self.assertEqual(new_history['original_version'], new_module.definition_locator.definition_id)
self.assertEqual(new_history['edited_by'], "anotheruser")
another_history = modulestore().get_definition_history_info(another_module.definition_locator)
self.assertEqual(another_history['previous_version'], original.definition_locator.definition_id)
def test_encoded_naming(self):
"""
Check that using odd characters in block id don't break ability to add and retrieve block.
"""
course_key = CourseLocator(org='guestx', course='contender', run="run", branch=BRANCH_NAME_DRAFT)
parent_locator = BlockUsageLocator(course_key, 'course', block_id="head345679")
chapter_locator = BlockUsageLocator(course_key, 'chapter', block_id="foo.bar_-~:0")
modulestore().create_child(
'anotheruser', parent_locator, 'chapter',
block_id=chapter_locator.block_id,
fields={'display_name': 'chapter 99'},
)
# check that course version changed and course's previous is the other one
new_module = modulestore().get_item(chapter_locator)
self.assertEqual(new_module.location.block_id, "foo.bar_-~:0") # hardcode to ensure BUL init didn't change
# now try making that a parent of something
new_payload = "<problem>empty</problem>"
problem_locator = BlockUsageLocator(course_key, 'problem', block_id="prob.bar_-~:99a")
modulestore().create_child(
'anotheruser', chapter_locator, 'problem',
block_id=problem_locator.block_id,
fields={'display_name': 'chapter 99', 'data': new_payload},
)
# check that course version changed and course's previous is the other one
new_module = modulestore().get_item(problem_locator)
self.assertEqual(new_module.location.block_id, problem_locator.block_id)
chapter = modulestore().get_item(chapter_locator)
self.assertIn(problem_locator, version_agnostic(chapter.children))
def test_create_bulk_operations(self):
"""
Test create_item using bulk_operations
"""
# start transaction w/ simple creation
user = random.getrandbits(32)
course_key = CourseLocator('test_org', 'test_transaction', 'test_run')
with modulestore().bulk_operations(course_key):
new_course = modulestore().create_course('test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT)
new_course_locator = new_course.id
index_history_info = modulestore().get_course_history_info(new_course.location.course_key)
course_block_prev_version = new_course.previous_version
course_block_update_version = new_course.update_version
self.assertIsNotNone(new_course_locator.version_guid, "Want to test a definite version")
versionless_course_locator = new_course_locator.version_agnostic()
# positive simple case: no force, add chapter
new_ele = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 1'},
)
# version info shouldn't change
self.assertEqual(new_ele.update_version, course_block_update_version)
self.assertEqual(new_ele.update_version, new_ele.location.version_guid)
refetch_course = modulestore().get_course(versionless_course_locator)
self.assertEqual(refetch_course.location.version_guid, new_course.location.version_guid)
self.assertEqual(refetch_course.previous_version, course_block_prev_version)
self.assertEqual(refetch_course.update_version, course_block_update_version)
refetch_index_history_info = modulestore().get_course_history_info(refetch_course.location.course_key)
self.assertEqual(refetch_index_history_info, index_history_info)
self.assertIn(new_ele.location.version_agnostic(), version_agnostic(refetch_course.children))
# try to create existing item
with self.assertRaises(DuplicateItemError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
block_id=new_ele.location.block_id,
fields={'display_name': 'chapter 2'},
)
# start a new transaction
with modulestore().bulk_operations(course_key):
new_ele = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 2'},
)
transaction_guid = new_ele.location.version_guid
# ensure force w/ continue gives exception
with self.assertRaises(VersionConflictError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 2'},
force=True
)
# ensure trying to continue the old one gives exception
with self.assertRaises(VersionConflictError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 3'},
)
# add new child to old parent in continued (leave off version_guid)
course_module_locator = new_course.location.version_agnostic()
new_ele = modulestore().create_child(
user, course_module_locator, 'chapter',
fields={'display_name': 'chapter 4'},
)
self.assertNotEqual(new_ele.update_version, course_block_update_version)
self.assertEqual(new_ele.location.version_guid, transaction_guid)
# check children, previous_version
refetch_course = modulestore().get_course(versionless_course_locator)
self.assertIn(new_ele.location.version_agnostic(), version_agnostic(refetch_course.children))
self.assertEqual(refetch_course.previous_version, course_block_update_version)
self.assertEqual(refetch_course.update_version, transaction_guid)
def test_bulk_ops_org_filtering(self):
"""
Make sure of proper filtering when using bulk operations and
calling get_courses with an 'org' filter
"""
# start transaction w/ simple creation
user = random.getrandbits(32)
course_key = CourseLocator('test_org', 'test_transaction', 'test_run')
with modulestore().bulk_operations(course_key):
modulestore().create_course('test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='test_org')
self.assertEqual(len(courses), 1)
self.assertEqual(courses[0].id.org, course_key.org)
self.assertEqual(courses[0].id.course, course_key.course)
self.assertEqual(courses[0].id.run, course_key.run)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='other_org')
self.assertEqual(len(courses), 0)
# re-assert after the end of the with scope
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='test_org')
self.assertEqual(len(courses), 1)
self.assertEqual(courses[0].id.org, course_key.org)
self.assertEqual(courses[0].id.course, course_key.course)
self.assertEqual(courses[0].id.run, course_key.run)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='other_org')
self.assertEqual(len(courses), 0)
def test_update_metadata(self):
"""
test updating an items metadata ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(
CourseLocator(org="testx", course="GreekHero", run="run", branch=BRANCH_NAME_DRAFT),
'problem', block_id="problem3_2"
)
problem = modulestore().get_item(locator)
pre_def_id = problem.definition_locator.definition_id
pre_version_guid = problem.location.version_guid
self.assertIsNotNone(pre_def_id)
self.assertIsNotNone(pre_version_guid)
self.assertNotEqual(problem.max_attempts, 4, "Invalidates rest of test")
problem.max_attempts = 4
problem.save() # decache above setting into the kvs
updated_problem = modulestore().update_item(problem, self.user_id)
# check that course version changed and course's previous is the other one
self.assertEqual(updated_problem.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(updated_problem.max_attempts, 4)
# refetch to ensure original didn't change
original_location = problem.location.map_into_course(CourseLocator(version_guid=pre_version_guid))
problem = modulestore().get_item(original_location)
self.assertNotEqual(problem.max_attempts, 4, "original changed")
current_course = modulestore().get_course(locator.course_key)
self.assertEqual(updated_problem.location.version_guid, current_course.location.version_guid)
history_info = modulestore().get_course_history_info(current_course.location.course_key)
self.assertEqual(history_info['previous_version'], pre_version_guid)
self.assertEqual(history_info['edited_by'], self.user_id)
def test_update_children(self):
"""
test updating an item's children ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'chapter', 'chapter3'
)
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
# reorder children
self.assertGreater(len(block.children), 0, "meaningless test")
moved_child = block.children.pop()
block.save() # decache model changes
updated_problem = modulestore().update_item(block, self.user_id)
# check that course version changed and course's previous is the other one
self.assertEqual(updated_problem.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(version_agnostic(updated_problem.children), version_agnostic(block.children))
self.assertNotIn(moved_child, version_agnostic(updated_problem.children))
locator = locator.course_key.make_usage_key('chapter', "chapter1")
other_block = modulestore().get_item(locator)
other_block.children.append(moved_child)
other_updated = modulestore().update_item(other_block, self.user_id)
self.assertIn(moved_child.version_agnostic(), version_agnostic(other_updated.children))
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_update_definition(self, _from_json):
"""
test updating an item's definition: ensure it gets versioned as well as the course getting versioned
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head12345'
)
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
block.grading_policy['GRADER'][0]['min_count'] = 13
block.save() # decache model changes
updated_block = modulestore().update_item(block, self.user_id)
self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)
self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)
def test_update_manifold(self):
"""
Test updating metadata, children, and definition in a single call ensuring all the versioning occurs
"""
locator = BlockUsageLocator(
CourseLocator('testx', 'GreekHero', 'run', branch=BRANCH_NAME_DRAFT),
'problem', block_id='problem1'
)
original = modulestore().get_item(locator)
# first add 2 children to the course for the update to manipulate
locator = BlockUsageLocator(
CourseLocator('guestx', 'contender', 'run', branch=BRANCH_NAME_DRAFT),
'course', block_id="head345679"
)
category = 'problem'
new_payload = "<problem>empty</problem>"
modulestore().create_child(
'test_update_manifold', locator, category,
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
modulestore().create_child(
'test_update_manifold', locator, category,
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=original.definition_locator,
)
# pylint: disable=protected-access
modulestore()._clear_cache()
# now begin the test
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
self.assertNotEqual(block.grading_policy['GRADER'][0]['min_count'], 13)
block.grading_policy['GRADER'][0]['min_count'] = 13
block.children = block.children[1:] + [block.children[0]]
block.advertised_start = "Soon"
block.save() # decache model changes
updated_block = modulestore().update_item(block, self.user_id)
self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)
self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)
self.assertEqual(updated_block.children[0].version_agnostic(), block.children[0].version_agnostic())
self.assertEqual(updated_block.advertised_start, "Soon")
def test_delete_item(self):
course = self.create_course_for_deletion()
with self.assertRaises(ValueError):
modulestore().delete_item(course.location, self.user_id)
reusable_location = course.id.version_agnostic().for_branch(BRANCH_NAME_DRAFT)
# delete a leaf
problems = modulestore().get_items(reusable_location, qualifiers={'category': 'problem'})
locn_to_del = problems[0].location
new_course_loc = modulestore().delete_item(locn_to_del, self.user_id)
deleted = locn_to_del.version_agnostic()
self.assertFalse(modulestore().has_item(deleted))
with self.assertRaises(VersionConflictError):
modulestore().has_item(locn_to_del)
with self.assertRaises(ValueError):
modulestore().delete_item(deleted, self.user_id)
self.assertTrue(modulestore().has_item(locn_to_del.course_agnostic()))
self.assertNotEqual(new_course_loc.version_guid, course.location.version_guid)
# delete a subtree
nodes = modulestore().get_items(reusable_location, qualifiers={'category': 'chapter'})
new_course_loc = modulestore().delete_item(nodes[0].location, self.user_id)
# check subtree
def check_subtree(node):
"""
Check contents of subtree recursively
"""
if node:
node_loc = node.location
self.assertFalse(
modulestore().has_item(node_loc.version_agnostic())
)
self.assertTrue(modulestore().has_item(node_loc.course_agnostic()))
if node.has_children:
for sub in node.get_children():
check_subtree(sub)
check_subtree(nodes[0])
def create_course_for_deletion(self):
"""
Create a course we can delete
"""
course = modulestore().create_course('nihilx', 'deletion', 'run', 'deleting_user', BRANCH_NAME_DRAFT)
root = course.location.version_agnostic().for_branch(BRANCH_NAME_DRAFT)
for _ in range(4):
self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])
return modulestore().get_item(root)
def create_subtree_for_deletion(self, parent, category_queue):
"""
Create a subtree in the tb deleted course
"""
if not category_queue:
return
node = modulestore().create_child(
'deleting_user', parent.version_agnostic(), category_queue[0]
)
node_loc = node.location.map_into_course(parent.course_key)
for _ in range(4):
self.create_subtree_for_deletion(node_loc, category_queue[1:])
def test_split_modulestore_create_child_with_position(self):
"""
This test is designed to hit a specific set of use cases having to do with
the child positioning logic found in split_mongo/split.py:create_child()
"""
# Set up the split module store
store = modulestore()
user = random.getrandbits(32)
course_key = CourseLocator('test_org', 'test_transaction', 'test_run')
with store.bulk_operations(course_key):
new_course = store.create_course('test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT)
new_course_locator = new_course.id
versionless_course_locator = new_course_locator.version_agnostic()
first_child = store.create_child(
self.user_id,
new_course.location,
"chapter"
)
refetch_course = store.get_course(versionless_course_locator)
second_child = store.create_child(
self.user_id,
refetch_course.location,
"chapter",
position=0
)
# First child should have been moved to second position, and better child takes the lead
refetch_course = store.get_course(versionless_course_locator)
children = refetch_course.get_children()
self.assertEqual(unicode(children[1].location), unicode(first_child.location))
self.assertEqual(unicode(children[0].location), unicode(second_child.location))
# Clean up the data so we don't break other tests which apparently expect a particular state
store.delete_course(refetch_course.id, user)
class TestCourseCreation(SplitModuleTest):
"""
Test create_course
"""
def test_simple_creation(self):
"""
The simplest case but probing all expected results from it.
"""
# Oddly getting differences of 200nsec
new_course = modulestore().create_course(
'test_org', 'test_course', 'test_run', 'create_user', BRANCH_NAME_DRAFT
)
new_locator = new_course.location
# check index entry
index_info = modulestore().get_course_index_info(new_locator.course_key)
self.assertEqual(index_info['org'], 'test_org')
self.assertEqual(index_info['edited_by'], 'create_user')
# check structure info
structure_info = modulestore().get_course_history_info(new_locator.course_key)
self.assertEqual(structure_info['original_version'], index_info['versions'][BRANCH_NAME_DRAFT])
self.assertIsNone(structure_info['previous_version'])
self.assertEqual(structure_info['edited_by'], 'create_user')
# check the returned course object
self.assertIsInstance(new_course, CourseDescriptor)
self.assertEqual(new_course.category, 'course')
self.assertFalse(new_course.show_calculator)
self.assertTrue(new_course.allow_anonymous)
self.assertEqual(len(new_course.children), 0)
self.assertEqual(new_course.edited_by, "create_user")
self.assertEqual(len(new_course.grading_policy['GRADER']), 4)
self.assertDictEqual(new_course.grade_cutoffs, {"Pass": 0.5})
def test_cloned_course(self):
"""
Test making a course which points to an existing draft and published but not making any changes to either.
"""
original_locator = CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT)
original_index = modulestore().get_course_index_info(original_locator)
new_draft = modulestore().create_course(
'best', 'leech', 'leech_run', 'leech_master', BRANCH_NAME_DRAFT,
versions_dict=original_index['versions'])
new_draft_locator = new_draft.location
self.assertRegexpMatches(new_draft_locator.org, 'best')
# the edited_by and other meta fields on the new course will be the original author not this one
self.assertEqual(new_draft.edited_by, '[email protected]')
self.assertEqual(new_draft_locator.version_guid, original_index['versions'][BRANCH_NAME_DRAFT])
# however the edited_by and other meta fields on course_index will be this one
new_index = modulestore().get_course_index_info(new_draft_locator.course_key)
self.assertEqual(new_index['edited_by'], 'leech_master')
new_published_locator = new_draft_locator.course_key.for_branch(BRANCH_NAME_PUBLISHED)
new_published = modulestore().get_course(new_published_locator)
self.assertEqual(new_published.edited_by, '[email protected]')
self.assertEqual(new_published.location.version_guid, original_index['versions'][BRANCH_NAME_PUBLISHED])
# changing this course will not change the original course
# using new_draft.location will insert the chapter under the course root
new_item = modulestore().create_child(
'leech_master', new_draft.location, 'chapter',
fields={'display_name': 'new chapter'}
)
new_draft_locator = new_draft_locator.course_key.version_agnostic()
new_index = modulestore().get_course_index_info(new_draft_locator)
self.assertNotEqual(new_index['versions'][BRANCH_NAME_DRAFT], original_index['versions'][BRANCH_NAME_DRAFT])
new_draft = modulestore().get_course(new_draft_locator)
self.assertEqual(new_item.edited_by, 'leech_master')
self.assertNotEqual(new_item.location.version_guid, original_index['versions'][BRANCH_NAME_DRAFT])
self.assertNotEqual(new_draft.location.version_guid, original_index['versions'][BRANCH_NAME_DRAFT])
structure_info = modulestore().get_course_history_info(new_draft_locator)
self.assertEqual(structure_info['edited_by'], 'leech_master')
original_course = modulestore().get_course(original_locator)
self.assertEqual(original_course.location.version_guid, original_index['versions'][BRANCH_NAME_DRAFT])
def test_derived_course(self):
"""
Create a new course which overrides metadata and course_data
"""
original_locator = CourseLocator(org='guestx', course='contender', run="run", branch=BRANCH_NAME_DRAFT)
original = modulestore().get_course(original_locator)
original_index = modulestore().get_course_index_info(original_locator)
fields = {
'grading_policy': original.grading_policy,
'display_name': 'Derivative',
}
fields['grading_policy']['GRADE_CUTOFFS'] = {'A': .9, 'B': .8, 'C': .65}
new_draft = modulestore().create_course(
'counter', 'leech', 'leech_run', 'leech_master', BRANCH_NAME_DRAFT,
versions_dict={BRANCH_NAME_DRAFT: original_index['versions'][BRANCH_NAME_DRAFT]},
fields=fields
)
new_draft_locator = new_draft.location
self.assertRegexpMatches(new_draft_locator.org, 'counter')
# the edited_by and other meta fields on the new course will be the original author not this one
self.assertEqual(new_draft.edited_by, 'leech_master')
self.assertNotEqual(new_draft_locator.version_guid, original_index['versions'][BRANCH_NAME_DRAFT])
# however the edited_by and other meta fields on course_index will be this one
new_index = modulestore().get_course_index_info(new_draft_locator.course_key)
self.assertEqual(new_index['edited_by'], 'leech_master')
self.assertEqual(new_draft.display_name, fields['display_name'])
self.assertDictEqual(
new_draft.grading_policy['GRADE_CUTOFFS'],
fields['grading_policy']['GRADE_CUTOFFS']
)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_update_course_index(self, _from_json):
"""
Test the versions pointers. NOTE: you can change the org, course, or other things, but
it's not clear how you'd find them again or associate them w/ existing student history since
we use course_key so many places as immutable.
"""
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course_info = modulestore().get_course_index_info(locator)
# an allowed but not necessarily recommended way to revert the draft version
head_course = modulestore().get_course(locator)
versions = course_info['versions']
versions[BRANCH_NAME_DRAFT] = head_course.previous_version
modulestore().update_course_index(None, course_info)
course = modulestore().get_course(locator)
self.assertEqual(course.location.version_guid, versions[BRANCH_NAME_DRAFT])
# an allowed but not recommended way to publish a course
versions[BRANCH_NAME_PUBLISHED] = versions[BRANCH_NAME_DRAFT]
modulestore().update_course_index(None, course_info)
course = modulestore().get_course(locator.for_branch(BRANCH_NAME_PUBLISHED))
self.assertEqual(course.location.version_guid, versions[BRANCH_NAME_DRAFT])
def test_create_with_root(self):
"""
Test create_course with a specified root id and category
"""
user = random.getrandbits(32)
new_course = modulestore().create_course(
'test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT,
root_block_id='top', root_category='chapter'
)
self.assertEqual(new_course.location.block_id, 'top')
self.assertEqual(new_course.category, 'chapter')
# look at db to verify
db_structure = modulestore().db_connection.get_structure(
new_course.location.as_object_id(new_course.location.version_guid)
)
self.assertIsNotNone(db_structure, "Didn't find course")
self.assertNotIn(BlockKey('course', 'course'), db_structure['blocks'])
self.assertIn(BlockKey('chapter', 'top'), db_structure['blocks'])
self.assertEqual(db_structure['blocks'][BlockKey('chapter', 'top')].block_type, 'chapter')
def test_create_id_dupe(self):
"""
Test create_course rejects duplicate id
"""
user = random.getrandbits(32)
courses = modulestore().get_courses(BRANCH_NAME_DRAFT)
with self.assertRaises(DuplicateCourseError):
dupe_course_key = courses[0].location.course_key
modulestore().create_course(
dupe_course_key.org, dupe_course_key.course, dupe_course_key.run, user, BRANCH_NAME_DRAFT
)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_bulk_ops_get_courses(self, _from_json):
"""
Test get_courses when some are created, updated, and deleted w/in a bulk operation
"""
# create 3 courses before bulk operation
split_store = modulestore()
user = random.getrandbits(32)
to_be_created = split_store.make_course_key('new', 'created', 'course')
with split_store.bulk_operations(to_be_created):
split_store.create_course(
to_be_created.org, to_be_created.course, to_be_created.run, user, master_branch=BRANCH_NAME_DRAFT,
)
modified_course_loc = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
with split_store.bulk_operations(modified_course_loc):
modified_course = modulestore().get_course(modified_course_loc)
modified_course.advertised_start = 'coming soon to a theater near you'
split_store.update_item(modified_course, user)
to_be_deleted = split_store.make_course_key("guestx", "contender", "run")
with split_store.bulk_operations(to_be_deleted):
split_store.delete_course(to_be_deleted, user)
# now get_courses
courses = split_store.get_courses(BRANCH_NAME_DRAFT)
self.assertEqual(len(courses), 3)
course_ids = [course.id.for_branch(None) for course in courses]
self.assertNotIn(to_be_deleted, course_ids)
self.assertIn(to_be_created, course_ids)
fetched_modified = [course for course in courses if course.id == modified_course_loc][0]
self.assertEqual(fetched_modified.advertised_start, modified_course.advertised_start)
class TestInheritance(SplitModuleTest):
"""
Test the metadata inheritance mechanism.
"""
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_inheritance(self, _from_json):
"""
The actual test
"""
# Note, not testing value where defined (course) b/c there's no
# defined accessor for it on CourseDescriptor.
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'problem', 'problem3_2'
)
node = modulestore().get_item(locator)
# inherited
self.assertEqual(node.graceperiod, datetime.timedelta(hours=2))
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'problem', 'problem1'
)
node = modulestore().get_item(locator)
# overridden
self.assertEqual(node.graceperiod, datetime.timedelta(hours=4))
def test_inheritance_not_saved(self):
"""
Was saving inherited settings with updated blocks causing inheritance to be sticky
"""
# set on parent, retrieve child, verify setting
chapter = modulestore().get_item(
BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'chapter', 'chapter3'
)
)
problem = modulestore().get_item(
BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'problem', 'problem3_2'
)
)
self.assertFalse(problem.visible_to_staff_only)
chapter.visible_to_staff_only = True
modulestore().update_item(chapter, self.user_id)
problem = modulestore().get_item(problem.location.version_agnostic())
self.assertTrue(problem.visible_to_staff_only)
# unset on parent, retrieve child, verify unset
chapter = modulestore().get_item(chapter.location.version_agnostic())
del chapter.visible_to_staff_only
modulestore().update_item(chapter, self.user_id)
problem = modulestore().get_item(problem.location.version_agnostic())
self.assertFalse(problem.visible_to_staff_only)
def test_dynamic_inheritance(self):
"""
Test inheritance for create_item with and without a parent pointer
"""
course_key = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
chapter = modulestore().get_item(BlockUsageLocator(course_key, 'chapter', 'chapter3'))
chapter.visible_to_staff_only = True
orphan_problem = modulestore().create_item(self.user_id, course_key, 'problem')
self.assertFalse(orphan_problem.visible_to_staff_only)
parented_problem = modulestore().create_child(self.user_id, chapter.location.version_agnostic(), 'problem')
# FIXME LMS-11376
# self.assertTrue(parented_problem.visible_to_staff_only)
orphan_problem = modulestore().create_xblock(chapter.runtime, course_key, 'problem')
self.assertFalse(orphan_problem.visible_to_staff_only)
parented_problem = modulestore().create_xblock(chapter.runtime, course_key, 'problem', parent_xblock=chapter)
# FIXME LMS-11376
# self.assertTrue(parented_problem.visible_to_staff_only)
class TestPublish(SplitModuleTest):
"""
Test the publishing api
"""
def setUp(self):
super(TestPublish, self).setUp()
def tearDown(self):
SplitModuleTest.tearDown(self)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_publish_safe(self, _from_json):
"""
Test the standard patterns: publish to new branch, revise and publish
"""
source_course = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
dest_course = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_PUBLISHED)
head = source_course.make_usage_key('course', "head12345")
chapter1 = source_course.make_usage_key('chapter', 'chapter1')
chapter2 = source_course.make_usage_key('chapter', 'chapter2')
chapter3 = source_course.make_usage_key('chapter', 'chapter3')
modulestore().copy(self.user_id, source_course, dest_course, [head], [chapter2, chapter3])
expected = [BlockKey.from_usage_key(head), BlockKey.from_usage_key(chapter1)]
unexpected = [
BlockKey.from_usage_key(chapter2),
BlockKey.from_usage_key(chapter3),
BlockKey("problem", "problem1"),
BlockKey("problem", "problem3_2")
]
self._check_course(source_course, dest_course, expected, unexpected)
# add a child under chapter1
new_module = modulestore().create_child(
self.user_id, chapter1, "sequential",
fields={'display_name': 'new sequential'},
)
# remove chapter1 from expected b/c its pub'd version != the source anymore since source changed
expected.remove(BlockKey.from_usage_key(chapter1))
# check that it's not in published course
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(new_module.location.map_into_course(dest_course))
# publish it
modulestore().copy(self.user_id, source_course, dest_course, [new_module.location], None)
expected.append(BlockKey.from_usage_key(new_module.location))
# check that it is in the published course and that its parent is the chapter
pub_module = modulestore().get_item(new_module.location.map_into_course(dest_course))
self.assertEqual(
modulestore().get_parent_location(pub_module.location).block_id, chapter1.block_id
)
# ensure intentionally orphaned blocks work (e.g., course_info)
new_module = modulestore().create_item(
self.user_id, source_course, "course_info", block_id="handouts"
)
# publish it
modulestore().copy(self.user_id, source_course, dest_course, [new_module.location], None)
expected.append(BlockKey.from_usage_key(new_module.location))
# check that it is in the published course (no error means it worked)
pub_module = modulestore().get_item(new_module.location.map_into_course(dest_course))
self._check_course(source_course, dest_course, expected, unexpected)
def test_exceptions(self):
"""
Test the exceptions which preclude successful publication
"""
source_course = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
# destination does not exist
destination_course = CourseLocator(org='fake', course='Unknown', run="run", branch=BRANCH_NAME_PUBLISHED)
head = source_course.make_usage_key('course', "head12345")
chapter3 = source_course.make_usage_key('chapter', 'chapter3')
problem1 = source_course.make_usage_key('problem', 'problem1')
with self.assertRaises(ItemNotFoundError):
modulestore().copy(self.user_id, source_course, destination_course, [chapter3], None)
# publishing into a new branch w/o publishing the root
destination_course = CourseLocator(org='testx', course='GreekHero', run='run', branch=BRANCH_NAME_PUBLISHED)
with self.assertRaises(ItemNotFoundError):
modulestore().copy(self.user_id, source_course, destination_course, [chapter3], None)
# publishing a subdag w/o the parent already in course
modulestore().copy(self.user_id, source_course, destination_course, [head], [chapter3])
with self.assertRaises(ItemNotFoundError):
modulestore().copy(self.user_id, source_course, destination_course, [problem1], [])
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_move_delete(self, _from_json):
"""
Test publishing moves and deletes.
"""
source_course = CourseLocator(org='testx', course='GreekHero', run='run', branch=BRANCH_NAME_DRAFT)
dest_course = CourseLocator(org='testx', course='GreekHero', run='run', branch=BRANCH_NAME_PUBLISHED)
head = source_course.make_usage_key('course', "head12345")
chapter2 = source_course.make_usage_key('chapter', 'chapter2')
problem1 = source_course.make_usage_key('problem', 'problem1')
modulestore().copy(self.user_id, source_course, dest_course, [head], [chapter2])
expected = [
BlockKey("course", "head12345"),
BlockKey("chapter", "chapter1"),
BlockKey("chapter", "chapter3"),
BlockKey("problem", "problem1"),
BlockKey("problem", "problem3_2"),
]
self._check_course(source_course, dest_course, expected, [BlockKey("chapter", "chapter2")])
# now move problem1 and delete problem3_2
chapter1 = modulestore().get_item(source_course.make_usage_key("chapter", "chapter1"))
chapter3 = modulestore().get_item(source_course.make_usage_key("chapter", "chapter3"))
chapter1.children.append(problem1)
chapter3.children.remove(problem1.map_into_course(chapter3.location.course_key))
modulestore().delete_item(source_course.make_usage_key("problem", "problem3_2"), self.user_id)
modulestore().copy(self.user_id, source_course, dest_course, [head], [chapter2])
expected = [
BlockKey("course", "head12345"),
BlockKey("chapter", "chapter1"),
BlockKey("chapter", "chapter3"),
BlockKey("problem", "problem1")
]
self._check_course(source_course, dest_course, expected, [BlockKey("chapter", "chapter2"), BlockKey("problem", "problem3_2")])
@contract(expected_blocks="list(BlockKey)", unexpected_blocks="list(BlockKey)")
def _check_course(self, source_course_loc, dest_course_loc, expected_blocks, unexpected_blocks):
"""
Check that the course has the expected blocks and does not have the unexpected blocks
"""
history_info = modulestore().get_course_history_info(dest_course_loc)
self.assertEqual(history_info['edited_by'], self.user_id)
for expected in expected_blocks:
source = modulestore().get_item(source_course_loc.make_usage_key(expected.type, expected.id))
pub_copy = modulestore().get_item(dest_course_loc.make_usage_key(expected.type, expected.id))
# everything except previous_version & children should be the same
self.assertEqual(source.category, pub_copy.category)
self.assertEqual(
source.update_version, pub_copy.source_version,
u"Versions don't match for {}: {} != {}".format(
expected, source.update_version, pub_copy.update_version
)
)
self.assertEqual(
self.user_id, pub_copy.edited_by,
"{} edited_by {} not {}".format(pub_copy.location, pub_copy.edited_by, self.user_id)
)
for field in source.fields.values():
if field.name == 'children':
self._compare_children(field.read_from(source), field.read_from(pub_copy), unexpected_blocks)
elif isinstance(field, (Reference, ReferenceList, ReferenceValueDict)):
self.assertReferenceEqual(field.read_from(source), field.read_from(pub_copy))
else:
self.assertEqual(field.read_from(source), field.read_from(pub_copy))
for unexp in unexpected_blocks:
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(dest_course_loc.make_usage_key(unexp.type, unexp.id))
def assertReferenceEqual(self, expected, actual):
if isinstance(expected, BlockUsageLocator):
expected = BlockKey.from_usage_key(expected)
actual = BlockKey.from_usage_key(actual)
elif isinstance(expected, list):
expected = [BlockKey.from_usage_key(key) for key in expected]
actual = [BlockKey.from_usage_key(key) for key in actual]
elif isinstance(expected, dict):
expected = {key: BlockKey.from_usage_key(val) for (key, val) in expected}
actual = {key: BlockKey.from_usage_key(val) for (key, val) in actual}
self.assertEqual(expected, actual)
@contract(
source_children="list(BlockUsageLocator)",
dest_children="list(BlockUsageLocator)",
unexpected="list(BlockKey)"
)
def _compare_children(self, source_children, dest_children, unexpected):
"""
Ensure dest_children == source_children minus unexpected
"""
source_block_keys = [
src_key
for src_key
in (BlockKey.from_usage_key(src) for src in source_children)
if src_key not in unexpected
]
dest_block_keys = [BlockKey.from_usage_key(dest) for dest in dest_children]
for unexp in unexpected:
self.assertNotIn(unexp, dest_block_keys)
self.assertEqual(source_block_keys, dest_block_keys)
class TestSchema(SplitModuleTest):
"""
Test the db schema (and possibly eventually migrations?)
"""
def test_schema(self):
"""
Test that the schema is set in each document
"""
db_connection = modulestore().db_connection
for collection in [db_connection.course_index, db_connection.structures, db_connection.definitions]:
self.assertEqual(
collection.find({'schema_version': {'$exists': False}}).count(),
0,
"{0.name} has records without schema_version".format(collection)
)
self.assertEqual(
collection.find({'schema_version': {'$ne': SplitMongoModuleStore.SCHEMA_VERSION}}).count(),
0,
"{0.name} has records with wrong schema_version".format(collection)
)
# ===========================================
def modulestore():
"""
Mock the django dependent global modulestore function to disentangle tests from django
"""
def load_function(engine_path):
"""
Load the given engine
"""
module_path, _, name = engine_path.rpartition('.')
return getattr(import_module(module_path), name)
if SplitModuleTest.modulestore is None:
class_ = load_function(SplitModuleTest.MODULESTORE['ENGINE'])
options = {}
options.update(SplitModuleTest.MODULESTORE['OPTIONS'])
options['render_template'] = render_to_template_mock
# pylint: disable=star-args
SplitModuleTest.modulestore = class_(
None, # contentstore
SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG'],
**options
)
SplitModuleTest.bootstrapDB(SplitModuleTest.modulestore)
return SplitModuleTest.modulestore
# pylint: disable=unused-argument, missing-docstring
def render_to_template_mock(*args):
pass
| agpl-3.0 |
1000Springs/web-app | hotspringsapp/models.py | 1 | 10858 | from hotspringsapp import app,db
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy.sql import text
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import sessionmaker
class Location(db.Model):
__tablename__ = 'public_location'
id = db.Column(db.Integer, primary_key=True)
feature_name = db.Column(db.String(50))
feature_system = db.Column(db.String(50))
district = db.Column(db.String(50))
location = db.Column(db.String(50))
lat = db.Column(db.Float)
lng = db.Column(db.Float)
description = db.Column(db.String(200))
private = db.Column(db.Boolean)
access = db.Column(db.String(15))
feature_type = db.Column(db.String(50))
def latestSample(self):
return Sample.query.filter(Location.id == self.id, Sample.location_id == Location.id ).order_by(Sample.location_id,Sample.date_gathered.desc()).first()
@staticmethod
def latestSampleIdsAllLocations():
query = text('select id from public_sample s where s.date_gathered = (select max(date_gathered) from public_sample where location_id = s.location_id limit 1 )')
rows = db.engine.execute(query).fetchall()
sample_ids = []
for row in rows:
sample_ids.append(row[0])
return sample_ids
def __init__(self,location_id,fName,latPos,lngPos,fSystem,dist,loc,desc,private,access):
self.id = location_id
self.feature_name = fName
self.feature_system = fSystem
self.district = dist
self.location = loc
self.lat = latPos
self.lng = lngPos
self.description = desc
self.private = private
self.access = access
def __repr__(self):
return '<Location {0} {1}>'.format(self.id,self.feature_name)
class Physical_data(db.Model):
__tablename__ = 'public_physical_data'
id = db.Column(db.Integer, primary_key=True)
initialTemp = db.Column(db.Float)
sampleTemp = db.Column(db.Float)
pH = db.Column(db.Float)
redox = db.Column(db.Float)
dO = db.Column(db.Float)
conductivity = db.Column(db.Float)
size = db.Column(db.String(20))
colour = db.Column(db.String(7))
ebullition = db.Column(db.String(50))
turbidity = db.Column(db.Float)
dnaVolume = db.Column(db.Float)
ferrousIronAbs = db.Column(db.Float)
def __init__(self, phys_id,iTemp,sTemp,ph,red,dis_ox,cond,date,size,colour,ebul,turb,dnaVol,ferIron):
self.id = phys_id
self.initialTemp = iTemp
self.pH = ph
self.redox = red
self.dO = dis_ox
self.conductivity = cond
self.size = size
self.colour = colour
self.ebullition = ebul
self.turbidity = turb
self.dnaVolume = dnaVol
self.ferrousIronAbs = ferIron
self.date_gathered = date
self.sampleTemp = sTemp
class Sample_Taxonomy(db.Model):
__tablename__ = 'public_sample_taxonomy'
id = db.Column(db.Integer, primary_key=True)
sample_id = db.Column( db.Integer, db.ForeignKey('public_sample.id'))
taxonomy_id = db.Column(db.Integer, db.ForeignKey('public_taxonomy.id'))
read_count = db.Column(db.Integer)
sample = db.relationship("Sample",backref="Sample_Taxonomy", lazy="select")
taxon = db.relationship("Taxonomy",backref="Sample_Taxonomy", lazy="joined")
class Taxonomy(db.Model):
__tablename__ = 'public_taxonomy'
id = db.Column(db.Integer, primary_key=True)
domain = db.Column(db.String(100), nullable=True)
phylum = db.Column(db.String(100), nullable=True)
Class = db.Column(db.String(100), nullable=True)
# read_count = db.Column(db.Integer, nullable=True)
def __getitem__(self,index):
return getattr(self,index)
class Sample(db.Model):
__tablename__ = 'public_sample'
id = db.Column(db.Integer, primary_key=True)
date_gathered = db.Column(db.DateTime, nullable=False)
sampler = db.Column(db.String(50), nullable=False)
sample_number = db.Column(db.String(50), nullable=False)
location_id = db.Column(db.Integer, db.ForeignKey("public_location.id"))
phys_id = db.Column(db.Integer, db.ForeignKey("public_physical_data.id"))
chem_id = db.Column(db.Integer, db.ForeignKey("public_chemical_data.id"))
location = db.relationship("Location",backref="Sample",lazy="select")
phys = db.relationship("Physical_data",backref="Sample",lazy="select")
image = db.relationship("Image",backref="Sample",lazy="select" , uselist=True)
chem = db.relationship("Chemical_data",backref="Sample",lazy="select")
samp_taxon = db.relationship("Sample_Taxonomy",backref="Sample",lazy="select")
taxon = association_proxy('samp_taxon', 'taxon')
def __init__(self,id,date,location,physID,chemID,sampleNum):
self.id = id
self.date_gathered = date
self.location_id = location
self.phys_id = physID
self.chem_id = chemID
self.sample_number = sampleNum
def getTaxonomy(self):
# Queries of views without primary keys don't fit very well in the
# SQLAlchemy ORM, so query the DB with raw SQL
column_names = ["read_count", "domain", "phylum", "class", "order", "family", "genus", "species"]
query = text(
'select `' + ('`,`'.join(column_names)) + '` from public_confident_taxonomy where sample_id = :sample_id' +
' order by `'+ ('`,`'.join(column_names[1:])) +'`'
)
rows = db.engine.execute(query, sample_id=self.id).fetchall()
return [dict(zip(column_names,row)) for row in rows]
def hasTaxonomy(self):
query = text(
'select * from public_sample_taxonomy where sample_id = :sample_id limit 1'
)
rows = db.engine.execute(query, sample_id=self.id).fetchall()
return len(rows) > 0
def __repr__(self):
return '<Sample {0} {1} {2}>'.format(self.id,self.location_id,self.date_gathered)
class Image(db.Model):
__tablename__ = 'public_image'
id = db.Column(db.Integer, primary_key=True)
sample_id = db.Column(db.Integer, db.ForeignKey("public_sample.id"), nullable=False)
image_path = db.Column(db.String (150), nullable = False)
image_type = db.Column(db.String (150))
def __init__(self,id,sid,iPath,iName):
self.id = id
self.sample_id = sid
self.image_path = iPath
self.image_name = iName
class User(db.Model):
username = db.Column(db.String(100), primary_key=True)
password = db.Column(db.String(100))
def __init__(self,username,password):
self.username = username
self.password = password
def check_password(self, password):
return check_password_hash(self.password,password)
class Chemical_data(db.Model):
__tablename__ = 'public_chemical_data'
id = db.Column(db.Integer, primary_key=True)
Li = db.Column(db.Float)
B = db.Column(db.Float)
N = db.Column(db.Float)
Na = db.Column(db.Float)
P = db.Column(db.Float)
Cl = db.Column(db.Float)
C = db.Column(db.Float)
Al = db.Column(db.Float)
Si = db.Column(db.Float)
K = db.Column(db.Float)
Ca = db.Column(db.Float)
V = db.Column(db.Float)
Cr = db.Column(db.Float)
Fe = db.Column(db.Float)
Mn = db.Column(db.Float)
cobalt = db.Column(db.Float)
Ni = db.Column(db.Float)
Cu = db.Column(db.Float)
Zn = db.Column(db.Float)
Mg = db.Column(db.Float)
As = db.Column(db.Float)
Se = db.Column(db.Float)
Br = db.Column(db.Float)
Sr = db.Column(db.Float)
Mo = db.Column(db.Float)
Ag = db.Column(db.Float)
Cd = db.Column(db.Float)
In = db.Column(db.Float)
Ba = db.Column(db.Float)
La = db.Column(db.Float)
Ti = db.Column(db.Float)
Pb = db.Column(db.Float)
Bi = db.Column(db.Float)
U = db.Column(db.Float)
CH4 = db.Column(db.Float)
H2S = db.Column(db.Float)
H2 = db.Column(db.Float)
CO = db.Column(db.Float)
nitrate = db.Column(db.Float)
nitrite = db.Column(db.Float)
ammonium = db.Column(db.Float)
sulfate = db.Column(db.Float)
chloride = db.Column(db.Float)
phosphate = db.Column(db.Float)
iron2 = db.Column(db.Float)
bicarbonate = db.Column(db.Float)
thallium = db.Column(db.Float)
def returnElements(self):
elements = []
elements.append(["Li",self.Li])
elements.append(["B",self.B])
elements.append(["N",self.N])
elements.append(["Na",self.Na])
elements.append(["P",self.P])
elements.append(["Cl",self.Cl])
elements.append(["C",self.C])
elements.append(["Al",self.Al])
elements.append(["Si",self.Si])
elements.append(["K",self.K])
elements.append(["Ca",self.Ca])
elements.append(["V",self.V])
elements.append(["Cr",self.Cr])
elements.append(["Fe",self.Fe])
elements.append(["Mn",self.Mn])
elements.append(["cobalt",self.cobalt])
elements.append(["Ni",self.Ni])
elements.append(["Cu",self.Cu])
elements.append(["Zn",self.Zn])
elements.append(["Mg",self.Mg])
elements.append(["As",self.As])
elements.append(["Se",self.Se])
elements.append(["Br",self.Br])
elements.append(["Sr",self.Sr])
elements.append(["Mo",self.Mo])
elements.append(["Ag",self.Ag])
elements.append(["Cd",self.Cd])
elements.append(["In",self.In])
elements.append(["Ba",self.Ba])
elements.append(["La",self.La])
elements.append(["Ti",self.Ti])
elements.append(["Pb",self.Pb])
elements.append(["Bi",self.Bi])
elements.append(["U",self.U])
return elements
def returnGases(self):
gases = []
gases.append(["CH4",self.CH4])
gases.append(["H2S",self.H2S])
gases.append(["H2",self.H2])
gases.append(["CO",self.CO])
return gases
def returnCompounds(self):
compounds = []
compounds.append(["nitrate",self.nitrate])
compounds.append(["nitrite",self.nitrite])
compounds.append(["ammonium",self.ammonium])
compounds.append(["sulfate",self.sulfate])
compounds.append(["chloride",self.chloride])
compounds.append(["phosphate",self.phosphate])
compounds.append(["iron2",self.iron2])
compounds.append(["bicarbonate",self.bicarbonate])
return compounds
def __getitem__(self,index):
return getattr(self,index)
| mit |
choard1895/monolith_lamp | convert_image.py | 1 | 14461 | import logging
import math
import sys
import zlib
from PIL import Image, ImageStat
import scipy.stats
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Types of images this can handle
# * Alpha-encoded monochrome
# * RGB monochrome w/o cast
# * RGB monochrome w/ cast
# * Full RGBW
# * Full RGB
# * Greyscale
# Implemented compression methods:
# * IMAGE_UNCOMPRESSED_MONOCHROME
# * IMAGE_DEFLATE_MONOCHROME
# * IMAGE_UNCOMPRESSED_FULL
# * IMAGE_DEFLATE_FULL
# Known failures:
# * sample_images/greyscale_small_mask.png
# TODO: Override cast on monochrome images
def formatData(data, width=79, line_start=None):
def maybeHex(x):
try:
return '0x{:02X}'.format(x)
except ValueError:
return str(x)
if line_start is None:
line_start = ' '
result = '{\n'
line = line_start
for datum in data:
str_datum = maybeHex(datum)
if len(line) + len(str_datum) >= width:
result += line + '\n'
line = line_start
line += str_datum + ', '
result += line + '\n'
#subset, data = data[:width], data[width:]
#result += '\t' + ''.join(maybeHex(datum) + ', ' for datum in subset) + '\n'
result += '}'
return result
def convertVideo(images, delay, allow_deflate):
assert delay <= 0xFFFF
delay_hi = (delay >> 8) & 0xFF
delay_lo = delay & 0xFF
assert len(images) <= 0xFFFF
images_hi = (len(images) >> 8) & 0xFF
images_lo = len(images) & 0xFF
header = [delay_hi, delay_lo, 0x00, 0x00, images_hi, images_lo]
offset_table = []
frame_table = []
offset = 0
logger.info('Compressing %i frames...', len(images))
for image in images:
assert offset <= 0xFFFFFF
offset_table.extend((offset >> (i * 8)) & 0xFF for i in range(2, -1, -1))
frame_data = convertImage(image, allow_deflate)
frame_table.extend(frame_data)
offset += len(frame_data)
result = header + offset_table + frame_table
logger.info('Header is %i bytes.', len(header))
logger.info('Offset table is %i bytes.', len(offset_table))
logger.info('Frame table is %i bytes.', len(frame_table))
logger.info('Total video size is %i bytes.', len(result))
return result
def convertPaletteVideo(palette, images, delay, allow_deflate):
assert len(palette) == 16*4
assert delay <= 0xFFFF
delay_hi = (delay >> 8) & 0xFF
delay_lo = delay & 0xFF
assert len(images) <= 0xFFFF
images_hi = (len(images) >> 8) & 0xFF
images_lo = len(images) & 0xFF
header = [delay_hi, delay_lo, 0x00, 0x00, images_hi, images_lo]
offset_table = []
frame_table = []
offset = 0
logger.info('Compressing %i frames...', len(images))
for image in images:
assert offset <= 0xFFFFFF
offset_table.extend((offset >> (i * 8)) & 0xFF for i in range(2, -1, -1))
frame_data = convertPaletteImage(palette, image, allow_deflate)
frame_table.extend(frame_data)
offset += len(frame_data)
result = header + offset_table + frame_table
logger.info('Header is %i bytes.', len(header))
logger.info('Offset table is %i bytes.', len(offset_table))
logger.info('Frame table is %i bytes.', len(frame_table))
logger.info('Total video size is %i bytes.', len(result))
return result
def convertImage(image, allow_deflate=True, interpolate=False):
compression_method, data = smartCompressImage(image, allow_deflate, interpolate)
assert len(data) <= 0xFFFF
size_hi = (len(data) >> 8) & 0xFF
size_lo = len(data) & 0xFF
header = [image.size[0], image.size[1], compression_method, 0x00, 0x00, size_hi, size_lo]
return header + data
def convertPaletteImage(palette, image, allow_deflate=True):
compression_method, data = compressPaletteImage(palette, image, allow_deflate)
assert len(data) <= 0xFFFF
size_hi = (len(data) >> 8) & 0xFF
size_lo = len(data) & 0xFF
header = [image.size[0], image.size[1], compression_method, 0x00, 0x00, size_hi, size_lo]
return header + data
def smartCompressImage(image, allow_deflate=True, interpolate=False):
# Convert unknown modes to RGB
if image.mode not in ('RGBA', 'RGB', 'L'):
logger.warning('Image mode \'%s\' not supported. Converting to RGB.', image.mode)
image = image.convert('RGB')
# Apply image heuristics to optimize compression
if image.mode == 'RGBA':
logger.info('Processing RGBA image.')
stats = ImageStat.Stat(image)
if all(upper == 0 for lower, upper in stats.extrema[0:3]):
# Image is alpha-encoded monochrome: (0, 0, 0, w) -> (0, 0, 0, 255) + (w)
logger.info('Alpha-encoded monochrome detected.')
mono_image = image.split()[3]
method, data = compressMonochromeImage(mono_image, (0, 0, 0, 255), allow_deflate)
elif stats.extrema[3][0] == 255:
# Image is RGB with unused alpha channel: (r, g, b, 255)
logger.info('RGB with unused alpha channel detected.')
# Detect RGB monochrome by removing color cast
result = decastMonochrome(image)
if result is None:
# Full RGB image: (r, g, b) -> (r, g, b, 0)
# Optionally performs RGB -> RGBW interpolation
rgbw_image = convertRGBtoRGBW(image, interpolate)
method, data = compressColorImage(rgbw_image, allow_deflate)
else:
# One of:
# * RGB monochrome w/o cast: (w, w, w) -> (0, 0, 0, 255) + (w)
# * RGB monochrome w/ cast: (xr, xg, xb) -> (r, g, b, 0) + (x)
mono_image, color = result
method, data = compressMonochromeImage(mono_image, color, allow_deflate)
else:
# Image is full RGBW: (r, g, b, w) -> (r, g, b, w)
logger.info('Full RGBW image detected.')
method, data = compressColorImage(image, allow_deflate)
elif image.mode == 'L':
# Image is monochrome W, ready to go
logger.info('Monochrome W-channel image detected.')
method, data = compressMonochromeImage(image, (0, 0, 0, 255), allow_deflate)
elif image.mode == 'RGB':
logger.info('RGB image detected.')
# Detect RGB monochrome by removing color cast
result = decastMonochrome(image)
if result is None:
# Full RGB image: (r, g, b) -> (r, g, b, 0)
# Optionally performs RGB -> RGBW interpolation
rgbw_image = convertRGBtoRGBW(image, interpolate)
method, data = compressColorImage(rgbw_image, allow_deflate)
else:
# One of:
# * RGB monochrome w/o cast: (w, w, w) -> (0, 0, 0, 255) + (w)
# * RGB monochrome w/ cast: (xr, xg, xb) -> (r, g, b, 0) + (x)
mono_image, color = result
method, data = compressMonochromeImage(mono_image, color, allow_deflate)
else:
raise RuntimeError('Could not convert to known type.')
return method, data
def compressPaletteImage(palette, image, allow_deflate=True):
if image.mode != 'L':
raise RuntimeError('Palette image compression requires mode \'L\'.')
if len(palette) != 16 and len(palette) != 16*4:
raise RuntimeError('Palette image compression requires a 16-element RGBW color array')
if len(palette) == 16:
flat_palette = sum(palette, [])
else:
flat_palette = palette
assert len(flat_palette) == 16*4
logger.info('Compressing palette image...')
# Hold results
results = dict()
# First add the raw data
logger.info('Trying uncompressed...')
raw_data = list(flat_palette) + list(image.getdata())
assert len(raw_data) == (16*4 + image.size[0] * image.size[1])
results['IMAGE_UNCOMPRESSED_PALETTE'] = raw_data
logger.info('Size was %i.', len(raw_data))
# If permitted, try deflate compression
if allow_deflate:
logger.info('Trying deflate...')
deflate_data = list(zlib.compress(bytes(raw_data)))[2:]
results['IMAGE_DEFLATE_PALETTE'] = deflate_data
logger.info('Size was %i.', len(deflate_data))
# That's all we support for now!
# Compare methods
method, data = min(results.items(), key=lambda x: len(x[1]))
logger.info('Best method was %s with size %i.', method, len(data))
return method, data
def compressMonochromeImage(image, color=None, allow_deflate=True):
if image.mode != 'L':
raise RuntimeError('Monochrome image compression requires mode \'L\'.')
if len(color) != 4:
raise RuntimeError('Monochrome image compression requires an RGBW color as a 4-tuple.')
# Default color cast is full W
if color is None:
color = (0, 0, 0, 255)
logger.info('Compressing monochrome image...')
# Hold results
results = dict()
# First add the raw data
logger.info('Trying uncompressed...')
raw_data = list(color[0:4]) + list(image.getdata())
assert len(raw_data) == (4 + image.size[0] * image.size[1])
results['IMAGE_UNCOMPRESSED_MONOCHROME'] = raw_data
logger.info('Size was %i.', len(raw_data))
# If permitted, try deflate compression
if allow_deflate:
logger.info('Trying deflate...')
deflate_data = list(zlib.compress(bytes(raw_data)))[2:]
results['IMAGE_DEFLATE_MONOCHROME'] = deflate_data
logger.info('Size was %i.', len(deflate_data))
# That's all we support for now!
# Compare methods
method, data = min(results.items(), key=lambda x: len(x[1]))
logger.info('Best method was %s with size %i.', method, len(data))
return method, data
def compressColorImage(image, allow_deflate=True, allow_palette=True):
if image.mode != 'RGBA':
raise RuntimeError('Color image compression requires mode \'L\'.')
logger.info('Compressing color image...')
# Hold results
results = dict()
# First add the raw data
logger.info('Trying uncompressed...')
raw_data = list(sum(list(image.getdata()), ()))
assert len(raw_data) == (4 * image.size[0] * image.size[1])
results['IMAGE_UNCOMPRESSED_FULL'] = raw_data
logger.info('Size was %i.', len(raw_data))
# If permitted, try deflate compression
if allow_deflate:
logger.info('Trying deflate...')
deflate_data = list(zlib.compress(bytes(raw_data)))[2:]
results['IMAGE_DEFLATE_FULL'] = deflate_data
logger.info('Size was %i.', len(deflate_data))
# TODO: Need to make a custom solution for paletting
# image.colors(image.size[0] * image.size[1]) is helpful
# That's all we support for now!
# Compare methods
method, data = min(results.items(), key=lambda x: len(x[1]))
logger.info('Best method was %s with size %i.', method, len(data))
return method, data
def convertRGBtoRGBW(image, interpolate=False):
logger.info('Converting RGB to RGBW.')
if interpolate:
# TODO: Convert RGB to RGBW using http://blog.saikoled.com/post/44677718712/how-to-convert-from-hsi-to-rgb-white or something
raise NotImplementedError
else:
image.putalpha(0)
return image
# Analyze an RGB image to determine if it is color-cast monochrome
def decastMonochrome(image, correlation_thresh=0.95, white_thresh=0.99, rescale_values=False):
# TODO: Detect mask color and translate appropriately
# Ideas: remove mask pixels from analysis, then map 255 to mask afterwards
if image.mode != 'RGB':
raise RuntimeError('decastMonochrome requires RGB image.')
logger.info('Attempting to decast monochrome image...')
# Compute general stats (uses mean and extrema)
stats = ImageStat.Stat(image)
# Perform regression against highest mean
sorted_bands = sorted([0, 1, 2], key=lambda x: stats.mean[x]) # Band-indices sorted ascending by mean brightness
logger.info('Calculating correlation using \'%s\' band.', 'RGB'[sorted_bands[-1]])
min_band, med_band, max_band = (list(image.getdata(band)) for band in sorted_bands)
try:
max_min_regress = scipy.stats.linregress(max_band, min_band)
max_med_regress = scipy.stats.linregress(max_band, med_band)
except Exception:
logger.info('Regression failed.')
return None
# Check that the correlation is sufficient
if (max_min_regress.rvalue < correlation_thresh) or (max_med_regress.rvalue < correlation_thresh):
logger.info('Correlation insufficient: (%f, %f) < %f', max_min_regress.rvalue, max_med_regress.rvalue, correlation_thresh)
return None
else:
logger.info('Correlation above threshold: (%f, %f) >= %f', max_min_regress.rvalue, max_med_regress.rvalue, correlation_thresh)
# Build normalized color cast from regression slopes (max = 1.0)
norm_cast = [0.0, 0.0, 0.0]
norm_cast[sorted_bands[0]] = max_min_regress.slope
norm_cast[sorted_bands[1]] = max_med_regress.slope
norm_cast[sorted_bands[2]] = 1.0
if rescale_values:
# Find a value that makes the greatest brightness to encode be 255. This preserves maxiumum resolution
# This value will never be greater than 1.0
rescaler = min(bound[1] / cast_band for bound, cast_band in zip(stats.extrema, norm_cast)) / 255.0
else:
rescaler = 1.0
rescaled_cast = [cast_band * rescaler for cast_band in norm_cast]
logger.info('Using rescale value %f.', rescaler)
# Detect white-only images as a special case
if (max_min_regress.slope >= white_thresh) and (max_med_regress.slope >= white_thresh):
monochrome_color = [0, 0, 0, min(255, max(0, int(255 * rescaler)))]
logger.info('Using only W channel to encode, as (%f, %f) >= %f', max_min_regress.slope, max_med_regress.slope, white_thresh)
else:
monochrome_color = [min(255, max(0, int(255 * cast_band))) for cast_band in rescaled_cast]
monochrome_color.append(0) # Add in unused W channel
logger.info('Detected cast color: %s', str(monochrome_color))
# Use the brightest band to decast
encode_band = sorted_bands[-1]
greyscale_image_data = [min(255, max(0, int(pixel / rescaled_cast[encode_band]))) for pixel in image.getdata(encode_band)]
# Return the cast color and recovered greyscale image
return Image.frombytes('L', image.size, bytes(greyscale_image_data)), monochrome_color
| gpl-3.0 |
jalexvig/tensorflow | tensorflow/python/kernel_tests/summary_tensor_op_test.py | 43 | 5972 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BAvSIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops
from tensorflow.python.platform import test
class SummaryOpsTest(test.TestCase):
def _SummarySingleValue(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
self.assertEqual(len(summ.value), 1)
return summ.value[0]
def _AssertNumpyEq(self, actual, expected):
self.assertTrue(np.array_equal(actual, expected))
def testTags(self):
with self.test_session() as sess:
c = constant_op.constant(1)
s1 = summary_ops.tensor_summary("s1", c)
with ops.name_scope("foo"):
s2 = summary_ops.tensor_summary("s2", c)
with ops.name_scope("zod"):
s3 = summary_ops.tensor_summary("s3", c)
s4 = summary_ops.tensor_summary("TensorSummary", c)
summ1, summ2, summ3, summ4 = sess.run([s1, s2, s3, s4])
v1 = self._SummarySingleValue(summ1)
self.assertEqual(v1.tag, "s1")
v2 = self._SummarySingleValue(summ2)
self.assertEqual(v2.tag, "foo/s2")
v3 = self._SummarySingleValue(summ3)
self.assertEqual(v3.tag, "foo/zod/s3")
v4 = self._SummarySingleValue(summ4)
self.assertEqual(v4.tag, "foo/zod/TensorSummary")
def testScalarSummary(self):
with self.test_session() as sess:
const = constant_op.constant(10.0)
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, 10)
def testStringSummary(self):
s = six.b("foobar")
with self.test_session() as sess:
const = constant_op.constant(s)
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, s)
def testManyScalarSummary(self):
with self.test_session() as sess:
const = array_ops.ones([5, 5, 5])
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, np.ones([5, 5, 5]))
def testManyStringSummary(self):
strings = [[six.b("foo bar"), six.b("baz")], [six.b("zoink"), six.b("zod")]]
with self.test_session() as sess:
const = constant_op.constant(strings)
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, strings)
def testManyBools(self):
bools = [True, True, True, False, False, False]
with self.test_session() as sess:
const = constant_op.constant(bools)
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, bools)
def testSummaryDescriptionAndDisplayName(self):
with self.test_session() as sess:
def get_description(summary_op):
summ_str = sess.run(summary_op)
summ = summary_pb2.Summary()
summ.ParseFromString(summ_str)
return summ.value[0].metadata
const = constant_op.constant(1)
# Default case; no description or display name
simple_summary = summary_ops.tensor_summary("simple", const)
descr = get_description(simple_summary)
self.assertEqual(descr.display_name, "")
self.assertEqual(descr.summary_description, "")
# Values are provided via function args
with_values = summary_ops.tensor_summary(
"simple",
const,
display_name="my name",
summary_description="my description")
descr = get_description(with_values)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# Values are provided via the SummaryMetadata arg
metadata = summary_pb2.SummaryMetadata()
metadata.display_name = "my name"
metadata.summary_description = "my description"
with_metadata = summary_ops.tensor_summary(
"simple", const, summary_metadata=metadata)
descr = get_description(with_metadata)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# If both SummaryMetadata and explicit args are provided, the args win
overwrite = summary_ops.tensor_summary(
"simple",
const,
summary_metadata=metadata,
display_name="overwritten",
summary_description="overwritten")
descr = get_description(overwrite)
self.assertEqual(descr.display_name, "overwritten")
self.assertEqual(descr.summary_description, "overwritten")
if __name__ == "__main__":
test.main()
| apache-2.0 |
Tejal011089/osmosis_erpnext | erpnext/utilities/doctype/contact/contact.py | 8 | 2420 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, extract_email_id
from erpnext.controllers.status_updater import StatusUpdater
class Contact(StatusUpdater):
def autoname(self):
# concat first and last name
self.name = " ".join(filter(None,
[cstr(self.get(f)).strip() for f in ["first_name", "last_name"]]))
# concat party name if reqd
for fieldname in ("customer", "supplier", "sales_partner"):
if self.get(fieldname):
self.name = self.name + "-" + cstr(self.get(fieldname)).strip()
break
def validate(self):
self.set_status()
self.validate_primary_contact()
def validate_primary_contact(self):
if self.is_primary_contact == 1:
if self.customer:
frappe.db.sql("update tabContact set is_primary_contact=0 where customer = %s",
(self.customer))
elif self.supplier:
frappe.db.sql("update tabContact set is_primary_contact=0 where supplier = %s",
(self.supplier))
elif self.sales_partner:
frappe.db.sql("""update tabContact set is_primary_contact=0
where sales_partner = %s""", (self.sales_partner))
else:
if self.customer:
if not frappe.db.sql("select name from tabContact \
where is_primary_contact=1 and customer = %s", (self.customer)):
self.is_primary_contact = 1
elif self.supplier:
if not frappe.db.sql("select name from tabContact \
where is_primary_contact=1 and supplier = %s", (self.supplier)):
self.is_primary_contact = 1
elif self.sales_partner:
if not frappe.db.sql("select name from tabContact \
where is_primary_contact=1 and sales_partner = %s",
self.sales_partner):
self.is_primary_contact = 1
def on_trash(self):
frappe.db.sql("""update `tabIssue` set contact='' where contact=%s""",
self.name)
@frappe.whitelist()
def get_contact_details(contact):
contact = frappe.get_doc("Contact", contact)
out = {
"contact_person": contact.get("name"),
"contact_display": " ".join(filter(None,
[contact.get("first_name"), contact.get("last_name")])),
"contact_email": contact.get("email_id"),
"contact_mobile": contact.get("mobile_no"),
"contact_phone": contact.get("phone"),
"contact_designation": contact.get("designation"),
"contact_department": contact.get("department")
}
return out
| agpl-3.0 |
siemens/django-mantis-authoring | mantis_authoring/cybox_object_transformers/emailmessage.py | 1 | 6163 | import json
from cybox.objects import email_message_object, address_object
from cybox.common import String
from .__object_base__ import transformer_object, ObjectFormTemplate
from django import forms
from django.utils.dateparse import parse_datetime
import mantis_authoring.EmailObjectFunctions as EOF
class Base(transformer_object):
# We factor out helper functions that might be used
# by serveral variants (at time of writing, there
# is only the 'Default' variant.
def create_cybox_email_header_part(self, properties):
cybox_email_header = email_message_object.EmailHeader()
""" Sender """
if properties['from_'].strip():
cybox_email_header.from_ = address_object.EmailAddress(properties['from_'])
""" Recipients """
if properties['to'].strip():
recipient_list = email_message_object.EmailRecipients()
for recipient in properties['to'].splitlines(False):
recipient_list.append(address_object.EmailAddress(recipient.strip()))
cybox_email_header.to = recipient_list
""" Subject """
if properties['subject'].strip():
cybox_email_header.subject = String(properties['subject'])
""" In-Reply-To """
if properties['in_reply_to'].strip():
cybox_email_header.in_reply_to = String(properties['in_reply_to'])
""" Reply-To """
if properties['reply_to'].strip():
cybox_email_header.reply_to = String(properties['reply_to'])
""" Send date """
if properties['send_date']:
cybox_email_header.date = parse_datetime(properties['send_date'])
""" X-Mailer """
if properties['x_mailer']:
cybox_email_header.x_mailer = String(properties['x_mailer'])
""" Received Lines """
if properties['received_lines']:
# TODO
rdb = EOF.ReceivedDB()
try:
rl = json.loads(properties['received_lines'])
for line in rl:
(helo, ident, rDNS, ip, by) = rdb.parse(line)
except:
pass
return cybox_email_header
class TEMPLATE_Default(Base):
display_name = "Email"
class ObjectForm(ObjectFormTemplate):
from_ = forms.CharField(max_length=256,
required=False,
help_text="Email address of the sender of the email message.")
to = forms.CharField(widget=forms.Textarea(attrs={'placeholder':"Recipients line by line"}),
required=False,
help_text="Email addresses of the recipients of the email message.")
subject = forms.CharField(max_length=1024,
required=False,
help_text="Subject of email message.")
in_reply_to = forms.CharField(max_length=1024,
required=False,
help_text = "Message ID of the message that this email is a reply to." )
reply_to = forms.CharField(max_length=1024,
required=False,
help_text = "Reply-To address set in header." )
send_date = forms.DateTimeField(required=False,
help_text = "Date/time that the email message was sent.")
links = forms.CharField(widget=forms.Textarea(attrs={'placeholder':'URLs line by line'}),
required=False,
help_text = "Paste here URLs contained in email message; for each URL, a"
" URI object will be generated and associated as 'Link' with the"
" created email message object. Alternatively, create a URI object"
" in the observable pool and relate it to this EmailMessage using"
" the 'contained_in' relation. The latter is preferable if you may"
" want to also relate the URI with other objects, as well.")
received_lines = forms.CharField(widget=forms.HiddenInput(), required=False)
x_mailer = forms.CharField(widget=forms.HiddenInput(), required=False)
def process_form(self, properties,id_base=None,namespace_tag=None):
# Create the object
cybox_email = email_message_object.EmailMessage()
# Fill in header information from user input
cybox_email.header = self.create_cybox_email_header_part(properties)
# See whether there are URI objects to be created
link_objects = []
links = properties['links'].splitlines(False)
if len(links)>0:
# We need to generate identifiers for the URI objects. We
# do this by using the 'create_derived_id' function that
# is contained in the 'transformer_object' class.
counter = 0
for link in links:
if link.strip():
obj_id_base = self.create_derived_id(id_base,
fact=link.strip(),
counter=counter
)
counter +=1
uri_obj = self.create_cybox_uri_object(link.strip())
link_objects.append((obj_id_base,uri_obj))
if link_objects:
email_links = email_message_object.Links()
for (id_base,obj) in link_objects:
id_ref = self.form_id_from_id_base(obj,namespace_tag,id_base)
email_links.append(email_message_object.LinkReference(id_ref))
cybox_email.links = email_links
return {'type': 'obj_with_subobjects',
'main_obj_properties_instance': cybox_email,
'obj_properties_instances' : link_objects }
| gpl-2.0 |
jayceyxc/hue | desktop/core/ext-py/lxml-3.3.6/benchmark/bench_etree.py | 17 | 12154 | import sys, copy
from itertools import *
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
serialized, children, nochange, BytesIO)
TEXT = "some ASCII text"
UTEXT = u"some klingon: \F8D2"
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
@nochange
def bench_iter_children(self, root):
for child in root:
pass
@nochange
def bench_iter_children_reversed(self, root):
for child in reversed(root):
pass
@nochange
def bench_first_child(self, root):
for i in self.repeat1000:
child = root[0]
@nochange
def bench_last_child(self, root):
for i in self.repeat1000:
child = root[-1]
@nochange
def bench_middle_child(self, root):
pos = len(root) // 2
for i in self.repeat1000:
child = root[pos]
@nochange
@with_attributes(False)
@with_text(text=True)
def bench_tostring_text_ascii(self, root):
self.etree.tostring(root, method="text")
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
def bench_tostring_text_unicode(self, root):
self.etree.tostring(root, method="text", encoding='unicode')
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
def bench_tostring_text_utf16(self, root):
self.etree.tostring(root, method="text", encoding='UTF-16')
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe')
@children
def bench_tostring_text_utf8_with_tail(self, children):
for child in children:
self.etree.tostring(child, method="text",
encoding='UTF-8', with_tail=True)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8(self, root):
self.etree.tostring(root, encoding='UTF-8')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf16(self, root):
self.etree.tostring(root, encoding='UTF-16')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8_unicode_XML(self, root):
xml = self.etree.tostring(root, encoding='UTF-8').decode('UTF-8')
self.etree.XML(xml)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_write_utf8_parse_bytesIO(self, root):
f = BytesIO()
self.etree.ElementTree(root).write(f, encoding='UTF-8')
f.seek(0)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_parse_bytesIO(self, root_xml):
f = BytesIO(root_xml)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_XML(self, root_xml):
self.etree.XML(root_xml)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_bytesIO(self, root_xml):
f = BytesIO(root_xml)
for event, element in self.etree.iterparse(f):
pass
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_bytesIO_clear(self, root_xml):
f = BytesIO(root_xml)
for event, element in self.etree.iterparse(f):
element.clear()
def bench_append_from_document(self, root1, root2):
# == "1,2 2,3 1,3 3,1 3,2 2,1" # trees 1 and 2, or 2 and 3, or ...
for el in root2:
root1.append(el)
def bench_insert_from_document(self, root1, root2):
pos = len(root1)//2
for el in root2:
root1.insert(pos, el)
pos = pos + 1
def bench_rotate_children(self, root):
# == "1 2 3" # runs on any single tree independently
for i in range(100):
el = root[0]
del root[0]
root.append(el)
def bench_reorder(self, root):
for i in range(1,len(root)//2):
el = root[0]
del root[0]
root[-i:-i] = [ el ]
def bench_reorder_slice(self, root):
for i in range(1,len(root)//2):
els = root[0:1]
del root[0]
root[-i:-i] = els
def bench_clear(self, root):
root.clear()
@nochange
@children
def bench_has_children(self, children):
for child in children:
if child and child and child and child and child:
pass
@nochange
@children
def bench_len(self, children):
for child in children:
map(len, repeat(child, 20))
@children
def bench_create_subelements(self, children):
SubElement = self.etree.SubElement
for child in children:
SubElement(child, '{test}test')
def bench_append_elements(self, root):
Element = self.etree.Element
for child in root:
el = Element('{test}test')
child.append(el)
@nochange
@children
def bench_makeelement(self, children):
empty_attrib = {}
for child in children:
child.makeelement('{test}test', empty_attrib)
@nochange
@children
def bench_create_elements(self, children):
Element = self.etree.Element
for child in children:
Element('{test}test')
@children
def bench_replace_children_element(self, children):
Element = self.etree.Element
for child in children:
el = Element('{test}test')
child[:] = [el]
@children
def bench_replace_children(self, children):
els = [ self.etree.Element("newchild") ]
for child in children:
child[:] = els
def bench_remove_children(self, root):
for child in root:
root.remove(child)
def bench_remove_children_reversed(self, root):
for child in reversed(root):
root.remove(child)
@children
def bench_set_attributes(self, children):
for child in children:
child.set('a', 'bla')
@with_attributes(True)
@children
@nochange
def bench_get_attributes(self, children):
for child in children:
child.get('bla1')
child.get('{attr}test1')
@children
def bench_setget_attributes(self, children):
for child in children:
child.set('a', 'bla')
for child in children:
child.get('a')
@nochange
def bench_root_getchildren(self, root):
root.getchildren()
@nochange
def bench_root_list_children(self, root):
list(root)
@nochange
@children
def bench_getchildren(self, children):
for child in children:
child.getchildren()
@nochange
@children
def bench_get_children_slice(self, children):
for child in children:
child[:]
@nochange
@children
def bench_get_children_slice_2x(self, children):
for child in children:
child[:]
child[:]
@nochange
@children
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy(self, children):
for child in children:
copy.deepcopy(child)
@nochange
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy_all(self, root):
copy.deepcopy(root)
@nochange
@children
def bench_tag(self, children):
for child in children:
child.tag
@nochange
@children
def bench_tag_repeat(self, children):
for child in children:
for i in self.repeat100:
child.tag
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text(self, children):
for child in children:
child.text
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text_repeat(self, children):
for child in children:
for i in self.repeat500:
child.text
@children
def bench_set_text(self, children):
text = TEXT
for child in children:
child.text = text
@children
def bench_set_utext(self, children):
text = UTEXT
for child in children:
child.text = text
@nochange
@onlylib('lxe')
def bench_index(self, root):
for child in root:
root.index(child)
@nochange
@onlylib('lxe')
def bench_index_slice(self, root):
for child in root[5:100]:
root.index(child, 5, 100)
@nochange
@onlylib('lxe')
def bench_index_slice_neg(self, root):
for child in root[-100:-5]:
root.index(child, start=-100, stop=-5)
@nochange
def bench_iter_all(self, root):
list(root.iter())
@nochange
def bench_iter_one_at_a_time(self, root):
list(islice(root.iter(), 2**30, None))
@nochange
def bench_iter_islice(self, root):
list(islice(root.iter(), 10, 110))
@nochange
def bench_iter_tag(self, root):
list(islice(root.iter(self.SEARCH_TAG), 3, 10))
@nochange
def bench_iter_tag_all(self, root):
list(root.iter(self.SEARCH_TAG))
@nochange
def bench_iter_tag_one_at_a_time(self, root):
list(islice(root.iter(self.SEARCH_TAG), 2**30, None))
@nochange
def bench_iter_tag_none(self, root):
list(root.iter("{ThisShould}NeverExist"))
@nochange
def bench_iter_tag_text(self, root):
[ e.text for e in root.iter(self.SEARCH_TAG) ]
@nochange
def bench_findall(self, root):
root.findall(".//*")
@nochange
def bench_findall_child(self, root):
root.findall(".//*/" + self.SEARCH_TAG)
@nochange
def bench_findall_tag(self, root):
root.findall(".//" + self.SEARCH_TAG)
@nochange
def bench_findall_path(self, root):
root.findall(".//*[%s]/./%s/./*" % (self.SEARCH_TAG, self.SEARCH_TAG))
@nochange
@onlylib('lxe')
def bench_xpath_path(self, root):
ns, tag = self.SEARCH_TAG[1:].split('}')
root.xpath(".//*[p:%s]/./p:%s/./*" % (tag,tag),
namespaces = {'p':ns})
@nochange
def bench_iterfind(self, root):
list(root.iterfind(".//*"))
@nochange
def bench_iterfind_tag(self, root):
list(root.iterfind(".//" + self.SEARCH_TAG))
@nochange
def bench_iterfind_islice(self, root):
list(islice(root.iterfind(".//*"), 10, 110))
_bench_xpath_single_xpath = None
@nochange
@onlylib('lxe')
def bench_xpath_single(self, root):
xpath = self._bench_xpath_single_xpath
if xpath is None:
ns, tag = self.SEARCH_TAG[1:].split('}')
xpath = self._bench_xpath_single_xpath = self.etree.XPath(
'.//p:%s[1]' % tag, namespaces={'p': ns})
xpath(root)
@nochange
def bench_find_single(self, root):
root.find(".//%s" % self.SEARCH_TAG)
@nochange
def bench_iter_single(self, root):
next(root.iter(self.SEARCH_TAG))
_bench_xpath_two_xpath = None
@nochange
@onlylib('lxe')
def bench_xpath_two(self, root):
xpath = self._bench_xpath_two_xpath
if xpath is None:
ns, tag = self.SEARCH_TAG[1:].split('}')
xpath = self._bench_xpath_two_xpath = self.etree.XPath(
'.//p:%s[position() < 3]' % tag, namespaces={'p': ns})
xpath(root)
@nochange
def bench_iterfind_two(self, root):
it = root.iterfind(".//%s" % self.SEARCH_TAG)
next(it)
next(it)
@nochange
def bench_iter_two(self, root):
it = root.iter(self.SEARCH_TAG)
next(it)
next(it)
if __name__ == '__main__':
benchbase.main(BenchMark)
| apache-2.0 |
n0max/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_pytester.py | 203 | 3498 | import pytest
import os
from _pytest.pytester import HookRecorder
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED
def test_make_hook_recorder(testdir):
item = testdir.getitem("def test_func(): pass")
recorder = testdir.make_hook_recorder(item.config.pluginmanager)
assert not recorder.getfailures()
pytest.xfail("internal reportrecorder tests need refactoring")
class rep:
excinfo = None
passed = False
failed = True
skipped = False
when = "call"
recorder.hook.pytest_runtest_logreport(report=rep)
failures = recorder.getfailures()
assert failures == [rep]
failures = recorder.getfailures()
assert failures == [rep]
class rep:
excinfo = None
passed = False
failed = False
skipped = True
when = "call"
rep.passed = False
rep.skipped = True
recorder.hook.pytest_runtest_logreport(report=rep)
modcol = testdir.getmodulecol("")
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
rep.passed = False
rep.failed = True
rep.skipped = False
recorder.hook.pytest_collectreport(report=rep)
passed, skipped, failed = recorder.listoutcomes()
assert not passed and skipped and failed
numpassed, numskipped, numfailed = recorder.countoutcomes()
assert numpassed == 0
assert numskipped == 1
assert numfailed == 1
assert len(recorder.getfailedcollections()) == 1
recorder.unregister()
recorder.clear()
recorder.hook.pytest_runtest_logreport(report=rep)
pytest.raises(ValueError, "recorder.getfailures()")
def test_parseconfig(testdir):
config1 = testdir.parseconfig()
config2 = testdir.parseconfig()
assert config2 != config1
assert config1 != pytest.config
def test_testdir_runs_with_plugin(testdir):
testdir.makepyfile("""
pytest_plugins = "pytester"
def test_hello(testdir):
assert 1
""")
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def make_holder():
class apiclass:
def pytest_xyz(self, arg):
"x"
def pytest_xyz_noarg(self):
"x"
apimod = type(os)('api')
def pytest_xyz(arg):
"x"
def pytest_xyz_noarg():
"x"
apimod.pytest_xyz = pytest_xyz
apimod.pytest_xyz_noarg = pytest_xyz_noarg
return apiclass, apimod
@pytest.mark.parametrize("holder", make_holder())
def test_hookrecorder_basic(holder):
pm = PytestPluginManager()
pm.addhooks(holder)
rec = HookRecorder(pm)
pm.hook.pytest_xyz(arg=123)
call = rec.popcall("pytest_xyz")
assert call.arg == 123
assert call._name == "pytest_xyz"
pytest.raises(pytest.fail.Exception, "rec.popcall('abc')")
pm.hook.pytest_xyz_noarg()
call = rec.popcall("pytest_xyz_noarg")
assert call._name == "pytest_xyz_noarg"
def test_makepyfile_unicode(testdir):
global unichr
try:
unichr(65)
except NameError:
unichr = chr
testdir.makepyfile(unichr(0xfffd))
def test_inline_run_clean_modules(testdir):
test_mod = testdir.makepyfile("def test_foo(): assert True")
result = testdir.inline_run(str(test_mod))
assert result.ret == EXIT_OK
# rewrite module, now test should fail if module was re-imported
test_mod.write("def test_foo(): assert False")
result2 = testdir.inline_run(str(test_mod))
assert result2.ret == EXIT_TESTSFAILED
| mpl-2.0 |
LockScreen/Backend | venv/lib/python2.7/site-packages/jinja2/loaders.py | 333 | 17380 | # -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
| mit |
opensourcechipspark/platform_external_chromium_org | third_party/PRESUBMIT.py | 47 | 3817 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def _CheckThirdPartyReadmesUpdated(input_api, output_api):
"""
Checks to make sure that README.chromium files are properly updated
when dependancies in third_party are modified.
"""
readmes = []
files = []
errors = []
for f in input_api.AffectedFiles():
local_path = f.LocalPath()
if input_api.os_path.dirname(local_path) == 'third_party':
continue
if local_path.startswith('third_party' + input_api.os_path.sep):
files.append(f)
if local_path.endswith("README.chromium"):
readmes.append(f)
if files and not readmes:
errors.append(output_api.PresubmitPromptWarning(
'When updating or adding third party code the appropriate\n'
'\'README.chromium\' file should also be updated with the correct\n'
'version and package information.', files))
if not readmes:
return errors
name_pattern = input_api.re.compile(
r'^Name: [a-zA-Z0-9_\-\. \(\)]+\r?$',
input_api.re.IGNORECASE | input_api.re.MULTILINE)
shortname_pattern = input_api.re.compile(
r'^Short Name: [a-zA-Z0-9_\-\.]+\r?$',
input_api.re.IGNORECASE | input_api.re.MULTILINE)
version_pattern = input_api.re.compile(
r'^Version: [a-zA-Z0-9_\-\.:]+\r?$',
input_api.re.IGNORECASE | input_api.re.MULTILINE)
release_pattern = input_api.re.compile(
r'^Security Critical: (yes)|(no)\r?$',
input_api.re.IGNORECASE | input_api.re.MULTILINE)
license_pattern = input_api.re.compile(
r'^License: .+\r?$',
input_api.re.IGNORECASE | input_api.re.MULTILINE)
for f in readmes:
if 'D' in f.Action():
_IgnoreIfDeleting(input_api, output_api, f, errors)
continue
contents = input_api.ReadFile(f)
if (not shortname_pattern.search(contents)
and not name_pattern.search(contents)):
errors.append(output_api.PresubmitError(
'Third party README files should contain either a \'Short Name\' or\n'
'a \'Name\' which is the name under which the package is\n'
'distributed. Check README.chromium.template for details.',
[f]))
if not version_pattern.search(contents):
errors.append(output_api.PresubmitError(
'Third party README files should contain a \'Version\' field.\n'
'If the package is not versioned or the version is not known\n'
'list the version as \'unknown\'.\n'
'Check README.chromium.template for details.',
[f]))
if not release_pattern.search(contents):
errors.append(output_api.PresubmitError(
'Third party README files should contain a \'Security Critical\'\n'
'field. This field specifies whether the package is built with\n'
'Chromium. Check README.chromium.template for details.',
[f]))
if not license_pattern.search(contents):
errors.append(output_api.PresubmitError(
'Third party README files should contain a \'License\' field.\n'
'This field specifies the license used by the package. Check\n'
'README.chromium.template for details.',
[f]))
return errors
def _IgnoreIfDeleting(input_api, output_api, affected_file, errors):
third_party_dir = input_api.os_path.dirname(affected_file.LocalPath())
for f in input_api.AffectedFiles():
if f.LocalPath().startswith(third_party_dir):
if 'D' not in f.Action():
errors.append(output_api.PresubmitError(
'Third party README should only be removed when the whole\n'
'directory is being removed.\n', [f, affected_file]))
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckThirdPartyReadmesUpdated(input_api, output_api))
return results
| bsd-3-clause |
awesomest/text-checker | appengine/text-checker/node_modules/zlibjs/closure-primitives/depswriter.py | 247 | 6208 | #!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates out a Closure deps.js file given a list of JavaScript sources.
Paths can be specified as arguments or (more commonly) specifying trees
with the flags (call with --help for descriptions).
Usage: depswriter.py [path/to/js1.js [path/to/js2.js] ...]
"""
import logging
import optparse
import os
import posixpath
import shlex
import sys
import source
import treescan
__author__ = '[email protected] (Nathan Naze)'
def MakeDepsFile(source_map):
"""Make a generated deps file.
Args:
source_map: A dict map of the source path to source.Source object.
Returns:
str, A generated deps file source.
"""
# Write in path alphabetical order
paths = sorted(source_map.keys())
lines = []
for path in paths:
js_source = source_map[path]
# We don't need to add entries that don't provide anything.
if js_source.provides:
lines.append(_GetDepsLine(path, js_source))
return ''.join(lines)
def _GetDepsLine(path, js_source):
"""Get a deps.js file string for a source."""
provides = sorted(js_source.provides)
requires = sorted(js_source.requires)
return 'goog.addDependency(\'%s\', %s, %s);\n' % (path, provides, requires)
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
parser.add_option('--root',
dest='roots',
default=[],
action='append',
help='A root directory to scan for JS source files. '
'Paths of JS files in generated deps file will be '
'relative to this path. This flag may be specified '
'multiple times.')
parser.add_option('--root_with_prefix',
dest='roots_with_prefix',
default=[],
action='append',
help='A root directory to scan for JS source files, plus '
'a prefix (if either contains a space, surround with '
'quotes). Paths in generated deps file will be relative '
'to the root, but preceded by the prefix. This flag '
'may be specified multiple times.')
parser.add_option('--path_with_depspath',
dest='paths_with_depspath',
default=[],
action='append',
help='A path to a source file and an alternate path to '
'the file in the generated deps file (if either contains '
'a space, surround with whitespace). This flag may be '
'specified multiple times.')
return parser
def _NormalizePathSeparators(path):
"""Replaces OS-specific path separators with POSIX-style slashes.
Args:
path: str, A file path.
Returns:
str, The path with any OS-specific path separators (such as backslash on
Windows) replaced with URL-compatible forward slashes. A no-op on systems
that use POSIX paths.
"""
return path.replace(os.sep, posixpath.sep)
def _GetRelativePathToSourceDict(root, prefix=''):
"""Scans a top root directory for .js sources.
Args:
root: str, Root directory.
prefix: str, Prefix for returned paths.
Returns:
dict, A map of relative paths (with prefix, if given), to source.Source
objects.
"""
# Remember and restore the cwd when we're done. We work from the root so
# that paths are relative from the root.
start_wd = os.getcwd()
os.chdir(root)
path_to_source = {}
for path in treescan.ScanTreeForJsFiles('.'):
prefixed_path = _NormalizePathSeparators(os.path.join(prefix, path))
path_to_source[prefixed_path] = source.Source(source.GetFileContents(path))
os.chdir(start_wd)
return path_to_source
def _GetPair(s):
"""Return a string as a shell-parsed tuple. Two values expected."""
try:
# shlex uses '\' as an escape character, so they must be escaped.
s = s.replace('\\', '\\\\')
first, second = shlex.split(s)
return (first, second)
except:
raise Exception('Unable to parse input line as a pair: %s' % s)
def main():
"""CLI frontend to MakeDepsFile."""
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
path_to_source = {}
# Roots without prefixes
for root in options.roots:
path_to_source.update(_GetRelativePathToSourceDict(root))
# Roots with prefixes
for root_and_prefix in options.roots_with_prefix:
root, prefix = _GetPair(root_and_prefix)
path_to_source.update(_GetRelativePathToSourceDict(root, prefix=prefix))
# Source paths
for path in args:
path_to_source[path] = source.Source(source.GetFileContents(path))
# Source paths with alternate deps paths
for path_with_depspath in options.paths_with_depspath:
srcpath, depspath = _GetPair(path_with_depspath)
path_to_source[depspath] = source.Source(source.GetFileContents(srcpath))
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
out.write('// This file was autogenerated by %s.\n' % sys.argv[0])
out.write('// Please do not edit.\n')
out.write(MakeDepsFile(path_to_source))
if __name__ == '__main__':
main()
| apache-2.0 |
wackymaster/QTClock | Libraries/matplotlib/projections/geo.py | 8 | 21976 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
import matplotlib.spines as mspines
import matplotlib.axis as maxis
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return "%0.0f\u00b0" % degrees
RESOLUTION = 75
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Do not register xaxis or yaxis with spines -- as done in
# Axes._init_axis() -- until GeoAxes.xaxis.cla() works.
# self.spines['geo'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self,which='grid'):
if which not in ['tick1','tick2','grid']:
msg = "'which' must be on of [ 'tick1' | 'tick2' | 'grid' ]"
raise ValueError(msg)
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self,which='grid'):
if which not in ['tick1','tick2','grid']:
msg = "'which' must be one of [ 'tick1' | 'tick2' | 'grid' ]"
raise ValueError(msg)
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'geo':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
raise TypeError("It is not possible to change axes limits "
"for geographic projections. Please consider "
"using Basemap or Cartopy.")
set_ylim = set_xlim
def format_coord(self, lon, lat):
'return a format string formatting the coordinate'
lon = lon * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
return '%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(lon), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
This axes object does not support interactive zoom box.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
This axes object does not support interactive pan/zoom.
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# The numerators also need to be masked so that masked
# division will be invoked.
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * ma.sin(half_long)) / sinc_alpha
y = (ma.sin(latitude) / sinc_alpha)
return np.concatenate((x.filled(0), y.filled(0)), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = np.sqrt(1.0 + cos_latitude * np.cos(half_long))
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
def d(theta):
delta = -(theta + np.sin(theta) - pi_sin_l) / (1 + np.cos(theta))
return delta, np.abs(delta) > 0.001
longitude = ll[:, 0]
latitude = ll[:, 1]
clat = np.pi/2 - np.abs(latitude)
ihigh = clat < 0.087 # within 5 degrees of the poles
ilow = ~ihigh
aux = np.empty(latitude.shape, dtype=np.float)
if ilow.any(): # Newton-Raphson iteration
pi_sin_l = np.pi * np.sin(latitude[ilow])
theta = 2.0 * latitude[ilow]
delta, large_delta = d(theta)
while np.any(large_delta):
theta[large_delta] += delta[large_delta]
delta, large_delta = d(theta)
aux[ilow] = theta / 2
if ihigh.any(): # Taylor series-based approx. solution
e = clat[ihigh]
d = 0.5 * (3 * np.pi * e**2) ** (1.0/3)
aux[ihigh] = (np.pi/2 - d) * np.sign(latitude[ihigh])
xy = np.empty(ll.shape, dtype=np.float)
xy[:,0] = (2.0 * np.sqrt(2.0) / np.pi) * longitude * np.cos(aux)
xy[:,1] = np.sqrt(2.0) * np.sin(aux)
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
# from Equations (7, 8) of
# http://mathworld.wolfram.com/MollweideProjection.html
theta = np.arcsin(y / np.sqrt(2))
lon = (np.pi / (2 * np.sqrt(2))) * x / np.cos(theta)
lat = np.arcsin((2 * theta + np.sin(2 * theta)) / np.pi)
return np.concatenate((lon, lat), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
lon = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((lon, lat), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| mit |
isb-cgc/ISB-CGC-data-proc | data_upload/util/bq_wrapper.py | 1 | 1985 | '''
Created on Jan 22, 2017
Copyright 2017, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
from google.cloud import bigquery
def query_bq_table(query, use_legacy, project, log):
log.info('\t\tquerying bq for %s: %s' % (project, query))
client = bigquery.Client(project=project)
query_results = client.run_sync_query(query)
# Use standard SQL syntax for queries.
# See: https://cloud.google.com/bigquery/sql-reference/
query_results.use_legacy_sql = use_legacy
try:
query_results.run()
except:
log.exception('problem with query:\n{}'.format(query))
raise
log.info('\t\tdone querying bq: %s' % query)
return query_results
def fetch_paged_results(query_results, fetch_count, project_name, page_token, log):
log.info('\t\trequesting %d rows %s' % (fetch_count, (' for ' + project_name) if project_name else ''))
#
# Encountered this, which was the only error in a full load. Per the error response,
# a retry seems to be in order:
#
# ServiceUnavailable: 503 GET https://www.googleapis.com/bigquery/v2/projects/isb-cgc/queries/job_blah-?pageToken=blah%3D&maxResults=50:
# Error encountered during execution. Retrying may solve the problem.
#
rows = list(query_results.fetch_data(
max_results=fetch_count,
page_token=page_token))
return query_results.total_rows, rows, query_results.page_token
| apache-2.0 |
BeATz-UnKNoWN/python-for-android | python3-alpha/python3-src/Lib/plistlib.py | 50 | 14616 | r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The property list (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in bytes objects, you can use readPlistFromBytes()
and writePlistToBytes().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries (but only with string keys), Data or datetime.datetime objects.
String values (including dictionary keys) have to be unicode strings -- they
will be written out as UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python bytes object. Use 'Data' if your strings
contain control characters.
Generate Plist example:
pl = dict(
aString = "Doodah",
aList = ["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict = dict(
anotherString = "<hello & hi there!>",
aUnicodeValue = "M\xe4ssig, Ma\xdf",
aTrueValue = True,
aFalseValue = False,
),
someData = Data(b"<binary gunk>"),
someMoreData = Data(b"<lots of binary gunk>" * 10),
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromBytes", "writePlistToBytes",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
import datetime
from io import BytesIO
import re
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = False
try:
if isinstance(pathOrFile, str):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
p = PlistParser()
rootObject = p.parse(pathOrFile)
finally:
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = False
try:
if isinstance(pathOrFile, str):
pathOrFile = open(pathOrFile, 'wb')
didOpen = True
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
finally:
if didOpen:
pathOrFile.close()
def readPlistFromBytes(data):
"""Read a plist data from a bytes object. Return the root object.
"""
return readPlist(BytesIO(data))
def writePlistToBytes(rootObject):
"""Return 'rootObject' as a plist-formatted bytes object.
"""
f = BytesIO()
writePlist(rootObject, f)
return f.getvalue()
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escape(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
# plist has fixed encoding of utf-8
if isinstance(line, str):
line = line.encode('utf-8')
self.file.write(self.indentLevel * self.indent)
self.file.write(line)
self.file.write(b'\n')
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z", re.ASCII)
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escape(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text
PLISTHEADER = b"""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
class PlistWriter(DumbXMLWriter):
def __init__(self, file, indentLevel=0, indent=b"\t", writeHeader=1):
if writeHeader:
file.write(PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
def writeValue(self, value):
if isinstance(value, str):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, int):
self.simpleElement("integer", "%d" % value)
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime.datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
else:
raise TypeError("unsupported type: %s" % type(value))
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace(b"\t", b" " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split(b"\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = sorted(d.items())
for key, value in items:
if not isinstance(key, str):
raise TypeError("keys must be strings")
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError(attr)
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError(attr)
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning, 2)
super().__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning, 2)
super().__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodebytes(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return b''.join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
if not isinstance(data, bytes):
raise TypeError("data must be as bytes")
self.data = data
@classmethod
def fromBase64(cls, data):
# base64.decodebytes just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.data == other.data
elif isinstance(other, str):
return self.data == other
else:
return id(self) == id(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
self.parser = ParserCreate()
self.parser.StartElementHandler = self.handleBeginElement
self.parser.EndElementHandler = self.handleEndElement
self.parser.CharacterDataHandler = self.handleData
self.parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
if not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
if not isinstance(self.stack[-1], type([])):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1].append(value)
def getData(self):
data = ''.join(self.data)
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
if self.currentKey:
raise ValueError("missing value for key '%s' at line %d" %
(self.currentKey,self.parser.CurrentLineNumber))
self.stack.pop()
def end_key(self):
if self.currentKey or not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected key at line %d" %
self.parser.CurrentLineNumber)
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData().encode("utf-8")))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
| apache-2.0 |
emilio/servo | tests/wpt/web-platform-tests/tools/third_party/hyper/hyper/packages/hyperframe/flags.py | 41 | 1028 | # -*- coding: utf-8 -*-
"""
hyperframe/flags
~~~~~~~~~~~~~~~~
Defines basic Flag and Flags data structures.
"""
import collections
Flag = collections.namedtuple("Flag", ["name", "bit"])
class Flags(collections.MutableSet):
"""
A simple MutableSet implementation that will only accept known flags as elements.
Will behave like a regular set(), except that a ValueError will be thrown when .add()ing
unexpected flags.
"""
def __init__(self, defined_flags):
self._valid_flags = set(flag.name for flag in defined_flags)
self._flags = set()
def __contains__(self, x):
return self._flags.__contains__(x)
def __iter__(self):
return self._flags.__iter__()
def __len__(self):
return self._flags.__len__()
def discard(self, value):
return self._flags.discard(value)
def add(self, value):
if value not in self._valid_flags:
raise ValueError("Unexpected flag: {}".format(value))
return self._flags.add(value)
| mpl-2.0 |
tecan/xchat-rt | plugins/scripts/encryption/supybot-code-6361b1e856ebbc8e14d399019e2c53a35f4e0063/plugins/Games/config.py | 15 | 2149 | ###
# Copyright (c) 2003-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Games', True)
Games = conf.registerPlugin('Games')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| gpl-2.0 |
uclouvain/OSIS-Louvain | base/tests/factories/education_group_year.py | 1 | 7357 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import operator
import random
import string
import exrex
import factory.fuzzy
from base.models.education_group_year import EducationGroupYear
from base.models.enums import education_group_categories, active_status, schedule_type
from base.models.enums.constraint_type import CREDITS
from base.models.enums.duration_unit import DURATION_UNIT
from base.models.enums.education_group_types import TrainingType, MiniTrainingType
from base.models.learning_unit_year import MAXIMUM_CREDITS, MINIMUM_CREDITS
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.campus import CampusFactory
from base.tests.factories.education_group import EducationGroupFactory
from base.tests.factories.education_group_type import EducationGroupTypeFactory
from base.tests.factories.entity import EntityFactory
from reference.tests.factories.language import LanguageFactory
def string_generator(nb_char=8):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(nb_char))
def generate_title(education_group_year):
if education_group_year.acronym:
return '{obj.academic_year} {obj.acronym}'.format(obj=education_group_year).lower()
return '{obj.academic_year} {gen_str}'.format(obj=education_group_year, gen_str=string_generator()).lower()
class EducationGroupYearFactory(factory.django.DjangoModelFactory):
class Meta:
model = EducationGroupYear
django_get_or_create = ('partial_acronym', 'academic_year',)
education_group = factory.SubFactory(EducationGroupFactory)
academic_year = factory.SubFactory(AcademicYearFactory)
acronym = ""
partial_acronym = ""
title = factory.LazyAttribute(generate_title)
title_english = factory.LazyAttribute(generate_title)
partial_title = ""
partial_title_english = ""
education_group_type = factory.SubFactory(EducationGroupTypeFactory)
management_entity = factory.SubFactory(EntityFactory)
administration_entity = factory.SubFactory(EntityFactory)
main_teaching_campus = factory.SubFactory(CampusFactory)
credits = factory.fuzzy.FuzzyInteger(MINIMUM_CREDITS, MAXIMUM_CREDITS)
min_constraint = factory.fuzzy.FuzzyInteger(1, MAXIMUM_CREDITS)
max_constraint = factory.lazy_attribute(lambda a: a.min_constraint)
remark = factory.fuzzy.FuzzyText(length=255)
remark_english = factory.fuzzy.FuzzyText(length=255)
active = active_status.ACTIVE
schedule_type = schedule_type.DAILY
weighting = True
default_learning_unit_enrollment = False
duration_unit = factory.Iterator(DURATION_UNIT, getter=operator.itemgetter(0))
duration = factory.fuzzy.FuzzyInteger(1, 5)
constraint_type = CREDITS
linked_with_epc = False
primary_language = factory.SubFactory(LanguageFactory)
enrollment_campus = factory.SubFactory(CampusFactory)
diploma_printing_title = "Yolo"
@factory.post_generation
def gen_acronym(self, create, extracted, **kwargs):
try:
if self.acronym == '':
self.acronym = exrex.getone(self.rules['acronym'].regex_rule).upper()
except KeyError:
self.acronym = string_generator(6)
@factory.post_generation
def gen_partial_acronym(self, create, extracted, **kwargs):
try:
if self.partial_acronym == "":
self.partial_acronym = exrex.getone(self.rules['partial_acronym'].regex_rule).upper()
except KeyError:
self.partial_acronym = string_generator(7)
class MiniTrainingFactory(EducationGroupYearFactory):
education_group_type__minitraining = True
class TrainingFactory(EducationGroupYearFactory):
education_group_type = factory.SubFactory('base.tests.factories.education_group_type.EducationGroupTypeFactory',
category=education_group_categories.TRAINING)
class GroupFactory(EducationGroupYearFactory):
education_group_type__group = True
class EducationGroupYearCommonBachelorFactory(EducationGroupYearFactory):
acronym = 'common-1ba'
partial_acronym = 'common-1ba'
education_group_type = factory.SubFactory(
'base.tests.factories.education_group_type.EducationGroupTypeFactory',
name=TrainingType.BACHELOR.name,
category=education_group_categories.TRAINING
)
class EducationGroupYearBachelorFactory(EducationGroupYearCommonBachelorFactory):
acronym = 'actu1ba'
partial_acronym = 'actu1ba'
class EducationGroupYearCommonAgregationFactory(EducationGroupYearFactory):
acronym = 'common-2a'
partial_acronym = 'common-2a'
education_group_type = factory.SubFactory(
'base.tests.factories.education_group_type.EducationGroupTypeFactory',
name=TrainingType.AGGREGATION.name,
category=education_group_categories.TRAINING
)
class EducationGroupYearCommonSpecializedMasterFactory(EducationGroupYearFactory):
acronym = 'common-2mc'
partial_acronym = 'common-2mc'
education_group_type = factory.SubFactory(
'base.tests.factories.education_group_type.EducationGroupTypeFactory',
name=TrainingType.MASTER_MC.name,
category=education_group_categories.TRAINING
)
class EducationGroupYearCommonMasterFactory(EducationGroupYearFactory):
acronym = 'common-2m'
partial_acronym = 'common-2m'
education_group_type = factory.SubFactory(
'base.tests.factories.education_group_type.EducationGroupTypeFactory',
name=TrainingType.PGRM_MASTER_120.name,
category=education_group_categories.TRAINING
)
class EducationGroupYearMasterFactory(EducationGroupYearCommonMasterFactory):
acronym = 'actu2m'
partial_acronym = 'actu2m'
class EducationGroupYearCommonFactory(EducationGroupYearFactory):
acronym = 'common'
partial_acronym = 'common'
education_group_type = factory.SubFactory(
'base.tests.factories.education_group_type.EducationGroupTypeFactory',
name=MiniTrainingType.DEEPENING.name,
category=education_group_categories.MINI_TRAINING
)
| agpl-3.0 |
huguesv/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/mac_cyrillic.py | 593 | 13710 | """ Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-cyrillic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE
u'\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE
u'\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE
u'\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI
u'\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE
u'\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE
u'\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE
u'\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE
u'\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE
u'\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE
u'\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE
u'\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE
u'\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK
u'\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U
u'\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE
u'\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE
u'\u2116' # 0xDC -> NUMERO SIGN
u'\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO
u'\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u20ac' # 0xFF -> EURO SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
knittledan/solr_lxml_Example | thirdParty/requests/packages/urllib3/poolmanager.py | 550 | 8977 | # urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| mit |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/scripts/regsetup.py | 7 | 20020 | # A tool to setup the Python registry.
class error(Exception):
pass
import sys # at least we can count on this!
def FileExists(fname):
"""Check if a file exists. Returns true or false.
"""
import os
try:
os.stat(fname)
return 1
except os.error as details:
return 0
def IsPackageDir(path, packageName, knownFileName):
"""Given a path, a ni package name, and possibly a known file name in
the root of the package, see if this path is good.
"""
import os
if knownFileName is None:
knownFileName = "."
return FileExists(os.path.join(os.path.join(path, packageName),knownFileName))
def IsDebug():
"""Return "_d" if we're running a debug version.
This is to be used within DLL names when locating them.
"""
import imp
for suffix_item in imp.get_suffixes():
if suffix_item[0]=='_d.pyd':
return '_d'
return ''
def FindPackagePath(packageName, knownFileName, searchPaths):
"""Find a package.
Given a ni style package name, check the package is registered.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
import regutil, os
pathLook = regutil.GetRegisteredNamedPath(packageName)
if pathLook and IsPackageDir(pathLook, packageName, knownFileName):
return pathLook, None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if IsPackageDir(pathLook, packageName, knownFileName):
# Found it
ret = os.path.abspath(pathLook)
return ret, ret
raise error("The package %s can not be located" % packageName)
def FindHelpPath(helpFile, helpDesc, searchPaths):
# See if the current registry entry is OK
import os, win32api, win32con
try:
key = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Windows\\Help", 0, win32con.KEY_ALL_ACCESS)
try:
try:
path = win32api.RegQueryValueEx(key, helpDesc)[0]
if FileExists(os.path.join(path, helpFile)):
return os.path.abspath(path)
except win32api.error:
pass # no registry entry.
finally:
key.Close()
except win32api.error:
pass
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, helpFile)):
return os.path.abspath(pathLook)
pathLook = os.path.join(pathLook, "Help")
if FileExists(os.path.join( pathLook, helpFile)):
return os.path.abspath(pathLook)
raise error("The help file %s can not be located" % helpFile)
def FindAppPath(appName, knownFileName, searchPaths):
"""Find an application.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
# Look in the first path.
import regutil, string, os
regPath = regutil.GetRegisteredNamedPath(appName)
if regPath:
pathLook = regPath.split(";")[0]
if regPath and FileExists(os.path.join(pathLook, knownFileName)):
return None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, knownFileName)):
# Found it
return os.path.abspath(pathLook)
raise error("The file %s can not be located for application %s" % (knownFileName, appName))
def FindPythonExe(exeAlias, possibleRealNames, searchPaths):
"""Find an exe.
Returns the full path to the .exe, and a boolean indicating if the current
registered entry is OK. We don't trust the already registered version even
if it exists - it may be wrong (ie, for a different Python version)
"""
import win32api, regutil, string, os, sys
if possibleRealNames is None:
possibleRealNames = exeAlias
# Look first in Python's home.
found = os.path.join(sys.prefix, possibleRealNames)
if not FileExists(found): # for developers
if "64 bit" in sys.version:
found = os.path.join(sys.prefix, "PCBuild", "amd64", possibleRealNames)
else:
found = os.path.join(sys.prefix, "PCBuild", possibleRealNames)
if not FileExists(found):
found = LocateFileName(possibleRealNames, searchPaths)
registered_ok = 0
try:
registered = win32api.RegQueryValue(regutil.GetRootKey(), regutil.GetAppPathsKey() + "\\" + exeAlias)
registered_ok = found==registered
except win32api.error:
pass
return found, registered_ok
def QuotedFileName(fname):
"""Given a filename, return a quoted version if necessary
"""
import regutil, string
try:
fname.index(" ") # Other chars forcing quote?
return '"%s"' % fname
except ValueError:
# No space in name.
return fname
def LocateFileName(fileNamesString, searchPaths):
"""Locate a file name, anywhere on the search path.
If the file can not be located, prompt the user to find it for us
(using a common OpenFile dialog)
Raises KeyboardInterrupt if the user cancels.
"""
import regutil, string, os
fileNames = fileNamesString.split(";")
for path in searchPaths:
for fileName in fileNames:
try:
retPath = os.path.join(path, fileName)
os.stat(retPath)
break
except os.error:
retPath = None
if retPath:
break
else:
fileName = fileNames[0]
try:
import win32ui, win32con
except ImportError:
raise error("Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file." % fileName)
# Display a common dialog to locate the file.
flags=win32con.OFN_FILEMUSTEXIST
ext = os.path.splitext(fileName)[1]
filter = "Files of requested type (*%s)|*%s||" % (ext,ext)
dlg = win32ui.CreateFileDialog(1,None,fileName,flags,filter,None)
dlg.SetOFNTitle("Locate " + fileName)
if dlg.DoModal() != win32con.IDOK:
raise KeyboardInterrupt("User cancelled the process")
retPath = dlg.GetPathName()
return os.path.abspath(retPath)
def LocatePath(fileName, searchPaths):
"""Like LocateFileName, but returns a directory only.
"""
import os
return os.path.abspath(os.path.split(LocateFileName(fileName, searchPaths))[0])
def LocateOptionalPath(fileName, searchPaths):
"""Like LocatePath, but returns None if the user cancels.
"""
try:
return LocatePath(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocateOptionalFileName(fileName, searchPaths = None):
"""Like LocateFileName, but returns None if the user cancels.
"""
try:
return LocateFileName(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocatePythonCore(searchPaths):
"""Locate and validate the core Python directories. Returns a list
of paths that should be used as the core (ie, un-named) portion of
the Python path.
"""
import os, regutil
currentPath = regutil.GetRegisteredNamedPath(None)
if currentPath:
presearchPaths = currentPath.split(";")
else:
presearchPaths = [os.path.abspath(".")]
libPath = None
for path in presearchPaths:
if FileExists(os.path.join(path, "os.py")):
libPath = path
break
if libPath is None and searchPaths is not None:
libPath = LocatePath("os.py", searchPaths)
if libPath is None:
raise error("The core Python library could not be located.")
corePath = None
suffix = IsDebug()
for path in presearchPaths:
if FileExists(os.path.join(path, "unicodedata%s.pyd" % suffix)):
corePath = path
break
if corePath is None and searchPaths is not None:
corePath = LocatePath("unicodedata%s.pyd" % suffix, searchPaths)
if corePath is None:
raise error("The core Python path could not be located.")
installPath = os.path.abspath(os.path.join(libPath, ".."))
return installPath, [libPath, corePath]
def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName = None):
"""Find and Register a package.
Assumes the core registry setup correctly.
In addition, if the location located by the package is already
in the **core** path, then an entry is registered, but no path.
(no other paths are checked, as the application whose path was used
may later be uninstalled. This should not happen with the core)
"""
import regutil, string
if not packageName: raise error("A package name must be supplied")
corePaths = regutil.GetRegisteredNamedPath(None).split(";")
if not searchPaths: searchPaths = corePaths
registryAppName = registryAppName or packageName
try:
pathLook, pathAdd = FindPackagePath(packageName, knownFile, searchPaths)
if pathAdd is not None:
if pathAdd in corePaths:
pathAdd = ""
regutil.RegisterNamedPath(registryAppName, pathAdd)
return pathLook
except error as details:
print("*** The %s package could not be registered - %s" % (packageName, details))
print("*** Please ensure you have passed the correct paths on the command line.")
print("*** - For packages, you should pass a path to the packages parent directory,")
print("*** - and not the package directory itself...")
def FindRegisterApp(appName, knownFiles, searchPaths):
"""Find and Register a package.
Assumes the core registry setup correctly.
"""
import regutil, string
if type(knownFiles)==type(''):
knownFiles = [knownFiles]
paths=[]
try:
for knownFile in knownFiles:
pathLook = FindAppPath(appName, knownFile, searchPaths)
if pathLook:
paths.append(pathLook)
except error as details:
print("*** ", details)
return
regutil.RegisterNamedPath(appName, ";".join(paths))
def FindRegisterPythonExe(exeAlias, searchPaths, actualFileNames = None):
"""Find and Register a Python exe (not necessarily *the* python.exe)
Assumes the core registry setup correctly.
"""
import regutil, string
fname, ok = FindPythonExe(exeAlias, actualFileNames, searchPaths)
if not ok:
regutil.RegisterPythonExe(fname, exeAlias)
return fname
def FindRegisterHelpFile(helpFile, searchPaths, helpDesc = None ):
import regutil
try:
pathLook = FindHelpPath(helpFile, helpDesc, searchPaths)
except error as details:
print("*** ", details)
return
# print "%s found at %s" % (helpFile, pathLook)
regutil.RegisterHelpFile(helpFile, pathLook, helpDesc)
def SetupCore(searchPaths):
"""Setup the core Python information in the registry.
This function makes no assumptions about the current state of sys.path.
After this function has completed, you should have access to the standard
Python library, and the standard Win32 extensions
"""
import sys
for path in searchPaths:
sys.path.append(path)
import os
import regutil, win32api,win32con
installPath, corePaths = LocatePythonCore(searchPaths)
# Register the core Pythonpath.
print(corePaths)
regutil.RegisterNamedPath(None, ';'.join(corePaths))
# Register the install path.
hKey = win32api.RegCreateKey(regutil.GetRootKey() , regutil.BuildDefaultPythonKey())
try:
# Core Paths.
win32api.RegSetValue(hKey, "InstallPath", win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
# Register the win32 core paths.
win32paths = os.path.abspath( os.path.split(win32api.__file__)[0]) + ";" + \
os.path.abspath( os.path.split(LocateFileName("win32con.py;win32con.pyc", sys.path ) )[0] )
# Python has builtin support for finding a "DLLs" directory, but
# not a PCBuild. Having it in the core paths means it is ignored when
# an EXE not in the Python dir is hosting us - so we add it as a named
# value
check = os.path.join(sys.prefix, "PCBuild")
if "64 bit" in sys.version:
check = os.path.join(check, "amd64")
if os.path.isdir(check):
regutil.RegisterNamedPath("PCBuild",check)
def RegisterShellInfo(searchPaths):
"""Registers key parts of the Python installation with the Windows Shell.
Assumes a valid, minimal Python installation exists
(ie, SetupCore() has been previously successfully run)
"""
import regutil, win32con
suffix = IsDebug()
# Set up a pointer to the .exe's
exePath = FindRegisterPythonExe("Python%s.exe" % suffix, searchPaths)
regutil.SetRegistryDefaultValue(".py", "Python.File", win32con.HKEY_CLASSES_ROOT)
regutil.RegisterShellCommand("Open", QuotedFileName(exePath)+" \"%1\" %*", "&Run")
regutil.SetRegistryDefaultValue("Python.File\\DefaultIcon", "%s,0" % exePath, win32con.HKEY_CLASSES_ROOT)
FindRegisterHelpFile("Python.hlp", searchPaths, "Main Python Documentation")
FindRegisterHelpFile("ActivePython.chm", searchPaths, "Main Python Documentation")
# We consider the win32 core, as it contains all the win32 api type
# stuff we need.
# FindRegisterApp("win32", ["win32con.pyc", "win32api%s.pyd" % suffix], searchPaths)
usage = """\
regsetup.py - Setup/maintain the registry for Python apps.
Run without options, (but possibly search paths) to repair a totally broken
python registry setup. This should allow other options to work.
Usage: %s [options ...] paths ...
-p packageName -- Find and register a package. Looks in the paths for
a sub-directory with the name of the package, and
adds a path entry for the package.
-a appName -- Unconditionally add an application name to the path.
A new path entry is create with the app name, and the
paths specified are added to the registry.
-c -- Add the specified paths to the core Pythonpath.
If a path appears on the core path, and a package also
needs that same path, the package will not bother
registering it. Therefore, By adding paths to the
core path, you can avoid packages re-registering the same path.
-m filename -- Find and register the specific file name as a module.
Do not include a path on the filename!
--shell -- Register everything with the Win95/NT shell.
--upackage name -- Unregister the package
--uapp name -- Unregister the app (identical to --upackage)
--umodule name -- Unregister the module
--description -- Print a description of the usage.
--examples -- Print examples of usage.
""" % sys.argv[0]
description="""\
If no options are processed, the program attempts to validate and set
the standard Python path to the point where the standard library is
available. This can be handy if you move Python to a new drive/sub-directory,
in which case most of the options would fail (as they need at least string.py,
os.py etc to function.)
Running without options should repair Python well enough to run with
the other options.
paths are search paths that the program will use to seek out a file.
For example, when registering the core Python, you may wish to
provide paths to non-standard places to look for the Python help files,
library files, etc.
See also the "regcheck.py" utility which will check and dump the contents
of the registry.
"""
examples="""\
Examples:
"regsetup c:\\wierd\\spot\\1 c:\\wierd\\spot\\2"
Attempts to setup the core Python. Looks in some standard places,
as well as the 2 wierd spots to locate the core Python files (eg, Python.exe,
python14.dll, the standard library and Win32 Extensions.
"regsetup -a myappname . .\subdir"
Registers a new Pythonpath entry named myappname, with "C:\\I\\AM\\HERE" and
"C:\\I\\AM\\HERE\subdir" added to the path (ie, all args are converted to
absolute paths)
"regsetup -c c:\\my\\python\\files"
Unconditionally add "c:\\my\\python\\files" to the 'core' Python path.
"regsetup -m some.pyd \\windows\\system"
Register the module some.pyd in \\windows\\system as a registered
module. This will allow some.pyd to be imported, even though the
windows system directory is not (usually!) on the Python Path.
"regsetup --umodule some"
Unregister the module "some". This means normal import rules then apply
for that module.
"""
if __name__=='__main__':
if len(sys.argv)>1 and sys.argv[1] in ['/?','-?','-help','-h']:
print(usage)
elif len(sys.argv)==1 or not sys.argv[1][0] in ['/','-']:
# No args, or useful args.
searchPath = sys.path[:]
for arg in sys.argv[1:]:
searchPath.append(arg)
# Good chance we are being run from the "regsetup.py" directory.
# Typically this will be "\somewhere\win32\Scripts" and the
# "somewhere" and "..\Lib" should also be searched.
searchPath.append("..\\Build")
searchPath.append("..\\Lib")
searchPath.append("..")
searchPath.append("..\\..")
# for developers:
# also search somewhere\lib, ..\build, and ..\..\build
searchPath.append("..\\..\\lib")
searchPath.append("..\\build")
if "64 bit" in sys.version:
searchPath.append("..\\..\\pcbuild\\amd64")
else:
searchPath.append("..\\..\\pcbuild")
print("Attempting to setup/repair the Python core")
SetupCore(searchPath)
RegisterShellInfo(searchPath)
FindRegisterHelpFile("PyWin32.chm", searchPath, "Pythonwin Reference")
# Check the registry.
print("Registration complete - checking the registry...")
import regcheck
regcheck.CheckRegistry()
else:
searchPaths = []
import getopt, string
opts, args = getopt.getopt(sys.argv[1:], 'p:a:m:c',
['shell','upackage=','uapp=','umodule=','description','examples'])
for arg in args:
searchPaths.append(arg)
for o,a in opts:
if o=='--description':
print(description)
if o=='--examples':
print(examples)
if o=='--shell':
print("Registering the Python core.")
RegisterShellInfo(searchPaths)
if o=='-p':
print("Registering package", a)
FindRegisterPackage(a,None,searchPaths)
if o in ['--upackage', '--uapp']:
import regutil
print("Unregistering application/package", a)
regutil.UnregisterNamedPath(a)
if o=='-a':
import regutil
path = ";".join(searchPaths)
print("Registering application", a,"to path",path)
regutil.RegisterNamedPath(a,path)
if o=='-c':
if not len(searchPaths):
raise error("-c option must provide at least one additional path")
import win32api, regutil
currentPaths = regutil.GetRegisteredNamedPath(None).split(";")
oldLen = len(currentPaths)
for newPath in searchPaths:
if newPath not in currentPaths:
currentPaths.append(newPath)
if len(currentPaths)!=oldLen:
print("Registering %d new core paths" % (len(currentPaths)-oldLen))
regutil.RegisterNamedPath(None,";".join(currentPaths))
else:
print("All specified paths are already registered.")
| apache-2.0 |
yasir1brahim/OLiMS | lims/utils/samplepartition.py | 2 | 3119 | from lims.utils import tmpID
from dependencies.dependency import _createObjectByType
from magnitude import mg
def compare_containers(a, b):
a_capacity = a.getCapacity().lower().split(" ", 1)
b_capacity = b.getCapacity().lower().split(" ", 1)
a_magnitude = mg(float(a_capacity[0]), a_capacity[1])
b_magnitude = mg(float(b_capacity[0]), b_capacity[1])
return cmp(
a.getCapacity() and a_magnitude or mg(0, 'ml'),
b.getCapacity() and b_magnitude or mg(0, 'ml')
)
def set_container_preservation(context, container, data):
# If container is pre-preserved, set the partition's preservation,
# and flag the partition to be transitioned below.
if container:
if type(container) in (list, tuple):
container = container[0]
proxies = context.bika_setup_catalog(UID=container)
container = [_p.getObject() for _p in proxies]
container = container[0] if container else None
if container:
prepreserved = container.getPrePreserved()
preservation = container.getPreservation()
data['prepreserved'] = prepreserved
if prepreserved and preservation:
return preservation.UID()
return data.get('preservation_uid', '')
def create_samplepartition(context, data, analyses=[]):
partition = _createObjectByType('SamplePartition', context, data['part_id'])
partition.unmarkCreationFlag()
# Determine if the sampling workflow is enabled
workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
# Sort containers and select smallest
container = data.get('container_uid', None)
if container:
containers = []
if type(container[0]) is str:
# UIDs
containers = context.bika_setup_catalog(UID=container)
containers = [_p.getObject() for _p in containers]
elif hasattr(container[0], 'getObject'):
# Brains
containers = [_p.getObject() for _p in container]
elif hasattr(container[0], 'portal_type'):
containers = [c for c in container]
if containers:
try: containers.sort(lambda a, b: compare_containers(a, b))
except: pass
container = containers[0]
# Set the container and preservation
preservation = set_container_preservation(context, container, data)
# Add analyses
partition_services = data['services']
analyses = [a for a in analyses if a.getServiceUID() in partition_services]
if analyses:
partition.edit(Analyses=analyses)
# Set some generated values
partition.edit(
Container=container,
Preservation=preservation,
)
# Attach partition to analyses
if analyses:
for analysis in analyses:
analysis.setSamplePartition(partition)
# Perform the appropriate workflow action
workflow_action = 'sampling_workflow' if workflow_enabled \
else 'no_sampling_workflow'
context.portal_workflow.doActionFor(partition, workflow_action)
# Return the created partition
return partition
| agpl-3.0 |
antoviaque/edx-platform | common/djangoapps/course_action_state/migrations/0001_initial.py | 50 | 1808 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
import xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseRerunState',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
('course_key', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('action', models.CharField(max_length=100, db_index=True)),
('state', models.CharField(max_length=50)),
('should_display', models.BooleanField(default=False)),
('message', models.CharField(max_length=1000)),
('source_course_key', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('display_name', models.CharField(default=b'', max_length=255, blank=True)),
('created_user', models.ForeignKey(related_name='created_by_user+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('updated_user', models.ForeignKey(related_name='updated_by_user+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AlterUniqueTogether(
name='coursererunstate',
unique_together=set([('course_key', 'action')]),
),
]
| agpl-3.0 |
c3m3gyanesh/RouteFlow-OpenConfig | pox/pox/log/color.py | 26 | 5403 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# NOTE: Not platform independent -- uses VT escape codes
# Magic sequence used to introduce a command or color
MAGIC = "@@@"
# Colors for log levels
LEVEL_COLORS = {
'DEBUG': 'CYAN',
'INFO': 'GREEN',
'WARNING': 'YELLOW',
'ERROR': 'RED',
'CRITICAL': 'blink@@@RED',
}
# Will get set to True if module is initialized
enabled = False
# Gets set to True if we should strip special sequences but
# not actually try to colorize
_strip_only = False
import logging
import sys
# Name to (intensity, base_value) (more colors added later)
COLORS = {
'black' : (0,0),
'red' : (0,1),
'green' : (0,2),
'yellow' : (0,3),
'blue' : (0,4),
'magenta' : (0,5),
'cyan' : (0,6),
'gray' : (0,7),
'darkgray' : (1,0),
'pink' : (1,1),
'white' : (1,7),
}
# Add intense/bold colors (names it capitals)
for _c in [_n for _n,_v in COLORS.items() if _v[0] == 0]:
COLORS[_c.upper()] = (1,COLORS[_c][1])
COMMANDS = {
'reset' : 0,
'bold' : 1,
'dim' : 2,
'bright' : 1,
'dull' : 2,
'bright:' : 1,
'dull:' : 2,
'blink' : 5,
'BLINK' : 6,
'invert' : 7,
'bg:' : -1, # Special
'level' : -2, # Special -- color of current level
'normal' : 22,
'underline' : 4,
'nounderline' : 24,
}
# Control Sequence Introducer
CSI = "\033["
def _color (color, msg):
""" Colorizes the given text """
return _proc(MAGIC + color) + msg + _proc(MAGIC + 'reset').lower()
def _proc (msg, level_color = "DEBUG"):
"""
Do some replacements on the text
"""
msg = msg.split(MAGIC)
#print "proc:",msg
r = ''
i = 0
cmd = False
while i < len(msg):
m = msg[i]
#print i,m
i += 1
if cmd:
best = None
bestlen = 0
for k,v in COMMANDS.iteritems():
if len(k) > bestlen:
if m.startswith(k):
best = (k,v)
bestlen = len(k)
special = None
if best is not None and best[0].endswith(':'):
special = best
m = m[bestlen:]
best = None
bestlen = 0
for k,v in COLORS.iteritems():
if len(k) > bestlen:
if m.startswith(k):
best = (k,v)
bestlen = len(k)
if best is not None:
#print "COMMAND", best
m = m[bestlen:]
if type(best[1]) is tuple:
# Color
brightness,color = best[1]
if special is not None:
if special[1] == -1:
brightness = None
color += 10
color += 30
if not _strip_only:
r += CSI
if brightness is not None:
r += str(brightness) + ";"
r += str(color) + "m"
elif not _strip_only:
# Command
if best[1] == -2:
r += _proc(MAGIC + LEVEL_COLORS.get(level_color, ""), level_color)
else:
r += CSI + str(best[1]) + "m"
cmd = True
r += m
return r
def launch (entire=False):
"""
If --entire then the whole message is color-coded, otherwise just the
log level.
Also turns on interpretation of some special sequences in the log
format string. For example, try:
log --format="%(levelname)s: @@@bold%(message)s@@@normal" log.color
"""
global enabled
if enabled: return
from pox.core import core
log = core.getLogger()
windows_hack = False
# Try to work on Windows
if sys.platform == "win32":
try:
from colorama import init
windows_hack = True
init()
except:
log.info("You need colorama if you want color logging on Windows")
global _strip_only
_strip_only = True
from pox.core import _default_log_handler as dlf
if not dlf:
log.warning("Color logging disabled -- no default logger found")
return
#if not hasattr(dlf, 'formatter'):
# log.warning("Color logging disabled -- no formatter found")
# return
#if not hasattr(dlf.formatter, '_fmt'):
# log.warning("Color logging disabled -- formatter unrecognized")
# return
# Monkeypatch in a new format function...
old_format = dlf.format
if entire:
def new_format (record):
msg = _proc(old_format(record), record.levelname)
color = LEVEL_COLORS.get(record.levelname)
if color is None:
return msg
return _color(color, msg)
else:
def new_format (record):
color = LEVEL_COLORS.get(record.levelname)
oldlevelname = record.levelname
if color is not None:
record.levelname = _color(color, record.levelname)
r = _proc(old_format(record), oldlevelname)
record.levelname = oldlevelname
return r
dlf.format = new_format
if windows_hack:
if hasattr(dlf, "stream"):
if dlf.stream is sys.__stderr__:
dlf.stream = sys.stderr
enabled = True
else:
enabled = True
| apache-2.0 |
yoer/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlrd/formula.py | 64 | 89648 | # -*- coding: cp1252 -*-
##
# Module for parsing/evaluating Microsoft Excel formulas.
#
# <p>Copyright © 2005-2009 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under
# a BSD-style licence.</p>
##
# No part of the content of this file was derived from the works of David Giffin.
import copy
from struct import unpack
from timemachine import *
from biffh import unpack_unicode_update_pos, unpack_string_update_pos, \
XLRDError, hex_char_dump, error_text_from_code, BaseObject
__all__ = [
'oBOOL', 'oERR', 'oNUM', 'oREF', 'oREL', 'oSTRG', 'oUNK',
'decompile_formula',
'dump_formula',
'evaluate_name_formula',
'okind_dict',
'rangename3d', 'rangename3drel', 'cellname', 'cellnameabs', 'colname',
]
# sztabN[opcode] -> the number of bytes to consume.
# -1 means variable
# -2 means this opcode not implemented in this version.
# Which N to use? Depends on biff_version; see szdict.
sztab0 = [-2, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 8, 4, 2, 2, 3, 9, 8, 2, 3, 8, 4, 7, 5, 5, 5, 2, 4, 7, 4, 7, 2, 2, -2, -2, -2, -2, -2, -2, -2, -2, 3, -2, -2, -2, -2, -2, -2, -2]
sztab1 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 11, 5, 2, 2, 3, 9, 9, 2, 3, 11, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, 3, -2, -2, -2, -2, -2, -2, -2]
sztab2 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 11, 5, 2, 2, 3, 9, 9, 3, 4, 11, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2]
sztab3 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, -2, -2, 2, 2, 3, 9, 9, 3, 4, 15, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, 25, 18, 21, 18, 21, -2, -2]
sztab4 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -2, -2, 2, 2, 3, 9, 9, 3, 4, 5, 5, 9, 7, 7, 7, 3, 5, 9, 5, 9, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, 7, 7, 11, 7, 11, -2, -2]
szdict = {
20 : sztab0,
30 : sztab1,
40 : sztab2,
45 : sztab2,
50 : sztab3,
70 : sztab3,
80 : sztab4,
}
# For debugging purposes ... the name for each opcode
# (without the prefix "t" used on OOo docs)
onames = ['Unk00', 'Exp', 'Tbl', 'Add', 'Sub', 'Mul', 'Div', 'Power', 'Concat', 'LT', 'LE', 'EQ', 'GE', 'GT', 'NE', 'Isect', 'List', 'Range', 'Uplus', 'Uminus', 'Percent', 'Paren', 'MissArg', 'Str', 'Extended', 'Attr', 'Sheet', 'EndSheet', 'Err', 'Bool', 'Int', 'Num', 'Array', 'Func', 'FuncVar', 'Name', 'Ref', 'Area', 'MemArea', 'MemErr', 'MemNoMem', 'MemFunc', 'RefErr', 'AreaErr', 'RefN', 'AreaN', 'MemAreaN', 'MemNoMemN', '', '', '', '', '', '', '', '', 'FuncCE', 'NameX', 'Ref3d', 'Area3d', 'RefErr3d', 'AreaErr3d', '', '']
func_defs = {
# index: (name, min#args, max#args, flags, #known_args, return_type, kargs)
0 : ('COUNT', 0, 30, 0x04, 1, 'V', 'R'),
1 : ('IF', 2, 3, 0x04, 3, 'V', 'VRR'),
2 : ('ISNA', 1, 1, 0x02, 1, 'V', 'V'),
3 : ('ISERROR', 1, 1, 0x02, 1, 'V', 'V'),
4 : ('SUM', 0, 30, 0x04, 1, 'V', 'R'),
5 : ('AVERAGE', 1, 30, 0x04, 1, 'V', 'R'),
6 : ('MIN', 1, 30, 0x04, 1, 'V', 'R'),
7 : ('MAX', 1, 30, 0x04, 1, 'V', 'R'),
8 : ('ROW', 0, 1, 0x04, 1, 'V', 'R'),
9 : ('COLUMN', 0, 1, 0x04, 1, 'V', 'R'),
10 : ('NA', 0, 0, 0x02, 0, 'V', ''),
11 : ('NPV', 2, 30, 0x04, 2, 'V', 'VR'),
12 : ('STDEV', 1, 30, 0x04, 1, 'V', 'R'),
13 : ('DOLLAR', 1, 2, 0x04, 1, 'V', 'V'),
14 : ('FIXED', 2, 3, 0x04, 3, 'V', 'VVV'),
15 : ('SIN', 1, 1, 0x02, 1, 'V', 'V'),
16 : ('COS', 1, 1, 0x02, 1, 'V', 'V'),
17 : ('TAN', 1, 1, 0x02, 1, 'V', 'V'),
18 : ('ATAN', 1, 1, 0x02, 1, 'V', 'V'),
19 : ('PI', 0, 0, 0x02, 0, 'V', ''),
20 : ('SQRT', 1, 1, 0x02, 1, 'V', 'V'),
21 : ('EXP', 1, 1, 0x02, 1, 'V', 'V'),
22 : ('LN', 1, 1, 0x02, 1, 'V', 'V'),
23 : ('LOG10', 1, 1, 0x02, 1, 'V', 'V'),
24 : ('ABS', 1, 1, 0x02, 1, 'V', 'V'),
25 : ('INT', 1, 1, 0x02, 1, 'V', 'V'),
26 : ('SIGN', 1, 1, 0x02, 1, 'V', 'V'),
27 : ('ROUND', 2, 2, 0x02, 2, 'V', 'VV'),
28 : ('LOOKUP', 2, 3, 0x04, 2, 'V', 'VR'),
29 : ('INDEX', 2, 4, 0x0c, 4, 'R', 'RVVV'),
30 : ('REPT', 2, 2, 0x02, 2, 'V', 'VV'),
31 : ('MID', 3, 3, 0x02, 3, 'V', 'VVV'),
32 : ('LEN', 1, 1, 0x02, 1, 'V', 'V'),
33 : ('VALUE', 1, 1, 0x02, 1, 'V', 'V'),
34 : ('TRUE', 0, 0, 0x02, 0, 'V', ''),
35 : ('FALSE', 0, 0, 0x02, 0, 'V', ''),
36 : ('AND', 1, 30, 0x04, 1, 'V', 'R'),
37 : ('OR', 1, 30, 0x04, 1, 'V', 'R'),
38 : ('NOT', 1, 1, 0x02, 1, 'V', 'V'),
39 : ('MOD', 2, 2, 0x02, 2, 'V', 'VV'),
40 : ('DCOUNT', 3, 3, 0x02, 3, 'V', 'RRR'),
41 : ('DSUM', 3, 3, 0x02, 3, 'V', 'RRR'),
42 : ('DAVERAGE', 3, 3, 0x02, 3, 'V', 'RRR'),
43 : ('DMIN', 3, 3, 0x02, 3, 'V', 'RRR'),
44 : ('DMAX', 3, 3, 0x02, 3, 'V', 'RRR'),
45 : ('DSTDEV', 3, 3, 0x02, 3, 'V', 'RRR'),
46 : ('VAR', 1, 30, 0x04, 1, 'V', 'R'),
47 : ('DVAR', 3, 3, 0x02, 3, 'V', 'RRR'),
48 : ('TEXT', 2, 2, 0x02, 2, 'V', 'VV'),
49 : ('LINEST', 1, 4, 0x04, 4, 'A', 'RRVV'),
50 : ('TREND', 1, 4, 0x04, 4, 'A', 'RRRV'),
51 : ('LOGEST', 1, 4, 0x04, 4, 'A', 'RRVV'),
52 : ('GROWTH', 1, 4, 0x04, 4, 'A', 'RRRV'),
56 : ('PV', 3, 5, 0x04, 5, 'V', 'VVVVV'),
57 : ('FV', 3, 5, 0x04, 5, 'V', 'VVVVV'),
58 : ('NPER', 3, 5, 0x04, 5, 'V', 'VVVVV'),
59 : ('PMT', 3, 5, 0x04, 5, 'V', 'VVVVV'),
60 : ('RATE', 3, 6, 0x04, 6, 'V', 'VVVVVV'),
61 : ('MIRR', 3, 3, 0x02, 3, 'V', 'RVV'),
62 : ('IRR', 1, 2, 0x04, 2, 'V', 'RV'),
63 : ('RAND', 0, 0, 0x0a, 0, 'V', ''),
64 : ('MATCH', 2, 3, 0x04, 3, 'V', 'VRR'),
65 : ('DATE', 3, 3, 0x02, 3, 'V', 'VVV'),
66 : ('TIME', 3, 3, 0x02, 3, 'V', 'VVV'),
67 : ('DAY', 1, 1, 0x02, 1, 'V', 'V'),
68 : ('MONTH', 1, 1, 0x02, 1, 'V', 'V'),
69 : ('YEAR', 1, 1, 0x02, 1, 'V', 'V'),
70 : ('WEEKDAY', 1, 2, 0x04, 2, 'V', 'VV'),
71 : ('HOUR', 1, 1, 0x02, 1, 'V', 'V'),
72 : ('MINUTE', 1, 1, 0x02, 1, 'V', 'V'),
73 : ('SECOND', 1, 1, 0x02, 1, 'V', 'V'),
74 : ('NOW', 0, 0, 0x0a, 0, 'V', ''),
75 : ('AREAS', 1, 1, 0x02, 1, 'V', 'R'),
76 : ('ROWS', 1, 1, 0x02, 1, 'V', 'R'),
77 : ('COLUMNS', 1, 1, 0x02, 1, 'V', 'R'),
78 : ('OFFSET', 3, 5, 0x04, 5, 'R', 'RVVVV'),
82 : ('SEARCH', 2, 3, 0x04, 3, 'V', 'VVV'),
83 : ('TRANSPOSE', 1, 1, 0x02, 1, 'A', 'A'),
86 : ('TYPE', 1, 1, 0x02, 1, 'V', 'V'),
92 : ('SERIESSUM', 4, 4, 0x02, 4, 'V', 'VVVA'),
97 : ('ATAN2', 2, 2, 0x02, 2, 'V', 'VV'),
98 : ('ASIN', 1, 1, 0x02, 1, 'V', 'V'),
99 : ('ACOS', 1, 1, 0x02, 1, 'V', 'V'),
100: ('CHOOSE', 2, 30, 0x04, 2, 'V', 'VR'),
101: ('HLOOKUP', 3, 4, 0x04, 4, 'V', 'VRRV'),
102: ('VLOOKUP', 3, 4, 0x04, 4, 'V', 'VRRV'),
105: ('ISREF', 1, 1, 0x02, 1, 'V', 'R'),
109: ('LOG', 1, 2, 0x04, 2, 'V', 'VV'),
111: ('CHAR', 1, 1, 0x02, 1, 'V', 'V'),
112: ('LOWER', 1, 1, 0x02, 1, 'V', 'V'),
113: ('UPPER', 1, 1, 0x02, 1, 'V', 'V'),
114: ('PROPER', 1, 1, 0x02, 1, 'V', 'V'),
115: ('LEFT', 1, 2, 0x04, 2, 'V', 'VV'),
116: ('RIGHT', 1, 2, 0x04, 2, 'V', 'VV'),
117: ('EXACT', 2, 2, 0x02, 2, 'V', 'VV'),
118: ('TRIM', 1, 1, 0x02, 1, 'V', 'V'),
119: ('REPLACE', 4, 4, 0x02, 4, 'V', 'VVVV'),
120: ('SUBSTITUTE', 3, 4, 0x04, 4, 'V', 'VVVV'),
121: ('CODE', 1, 1, 0x02, 1, 'V', 'V'),
124: ('FIND', 2, 3, 0x04, 3, 'V', 'VVV'),
125: ('CELL', 1, 2, 0x0c, 2, 'V', 'VR'),
126: ('ISERR', 1, 1, 0x02, 1, 'V', 'V'),
127: ('ISTEXT', 1, 1, 0x02, 1, 'V', 'V'),
128: ('ISNUMBER', 1, 1, 0x02, 1, 'V', 'V'),
129: ('ISBLANK', 1, 1, 0x02, 1, 'V', 'V'),
130: ('T', 1, 1, 0x02, 1, 'V', 'R'),
131: ('N', 1, 1, 0x02, 1, 'V', 'R'),
140: ('DATEVALUE', 1, 1, 0x02, 1, 'V', 'V'),
141: ('TIMEVALUE', 1, 1, 0x02, 1, 'V', 'V'),
142: ('SLN', 3, 3, 0x02, 3, 'V', 'VVV'),
143: ('SYD', 4, 4, 0x02, 4, 'V', 'VVVV'),
144: ('DDB', 4, 5, 0x04, 5, 'V', 'VVVVV'),
148: ('INDIRECT', 1, 2, 0x0c, 2, 'R', 'VV'),
162: ('CLEAN', 1, 1, 0x02, 1, 'V', 'V'),
163: ('MDETERM', 1, 1, 0x02, 1, 'V', 'A'),
164: ('MINVERSE', 1, 1, 0x02, 1, 'A', 'A'),
165: ('MMULT', 2, 2, 0x02, 2, 'A', 'AA'),
167: ('IPMT', 4, 6, 0x04, 6, 'V', 'VVVVVV'),
168: ('PPMT', 4, 6, 0x04, 6, 'V', 'VVVVVV'),
169: ('COUNTA', 0, 30, 0x04, 1, 'V', 'R'),
183: ('PRODUCT', 0, 30, 0x04, 1, 'V', 'R'),
184: ('FACT', 1, 1, 0x02, 1, 'V', 'V'),
189: ('DPRODUCT', 3, 3, 0x02, 3, 'V', 'RRR'),
190: ('ISNONTEXT', 1, 1, 0x02, 1, 'V', 'V'),
193: ('STDEVP', 1, 30, 0x04, 1, 'V', 'R'),
194: ('VARP', 1, 30, 0x04, 1, 'V', 'R'),
195: ('DSTDEVP', 3, 3, 0x02, 3, 'V', 'RRR'),
196: ('DVARP', 3, 3, 0x02, 3, 'V', 'RRR'),
197: ('TRUNC', 1, 2, 0x04, 2, 'V', 'VV'),
198: ('ISLOGICAL', 1, 1, 0x02, 1, 'V', 'V'),
199: ('DCOUNTA', 3, 3, 0x02, 3, 'V', 'RRR'),
204: ('USDOLLAR', 1, 2, 0x04, 2, 'V', 'VV'),
205: ('FINDB', 2, 3, 0x04, 3, 'V', 'VVV'),
206: ('SEARCHB', 2, 3, 0x04, 3, 'V', 'VVV'),
207: ('REPLACEB', 4, 4, 0x02, 4, 'V', 'VVVV'),
208: ('LEFTB', 1, 2, 0x04, 2, 'V', 'VV'),
209: ('RIGHTB', 1, 2, 0x04, 2, 'V', 'VV'),
210: ('MIDB', 3, 3, 0x02, 3, 'V', 'VVV'),
211: ('LENB', 1, 1, 0x02, 1, 'V', 'V'),
212: ('ROUNDUP', 2, 2, 0x02, 2, 'V', 'VV'),
213: ('ROUNDDOWN', 2, 2, 0x02, 2, 'V', 'VV'),
214: ('ASC', 1, 1, 0x02, 1, 'V', 'V'),
215: ('DBCS', 1, 1, 0x02, 1, 'V', 'V'),
216: ('RANK', 2, 3, 0x04, 3, 'V', 'VRV'),
219: ('ADDRESS', 2, 5, 0x04, 5, 'V', 'VVVVV'),
220: ('DAYS360', 2, 3, 0x04, 3, 'V', 'VVV'),
221: ('TODAY', 0, 0, 0x0a, 0, 'V', ''),
222: ('VDB', 5, 7, 0x04, 7, 'V', 'VVVVVVV'),
227: ('MEDIAN', 1, 30, 0x04, 1, 'V', 'R'),
228: ('SUMPRODUCT', 1, 30, 0x04, 1, 'V', 'A'),
229: ('SINH', 1, 1, 0x02, 1, 'V', 'V'),
230: ('COSH', 1, 1, 0x02, 1, 'V', 'V'),
231: ('TANH', 1, 1, 0x02, 1, 'V', 'V'),
232: ('ASINH', 1, 1, 0x02, 1, 'V', 'V'),
233: ('ACOSH', 1, 1, 0x02, 1, 'V', 'V'),
234: ('ATANH', 1, 1, 0x02, 1, 'V', 'V'),
235: ('DGET', 3, 3, 0x02, 3, 'V', 'RRR'),
244: ('INFO', 1, 1, 0x02, 1, 'V', 'V'),
247: ('DB', 4, 5, 0x04, 5, 'V', 'VVVVV'),
252: ('FREQUENCY', 2, 2, 0x02, 2, 'A', 'RR'),
261: ('ERROR.TYPE', 1, 1, 0x02, 1, 'V', 'V'),
269: ('AVEDEV', 1, 30, 0x04, 1, 'V', 'R'),
270: ('BETADIST', 3, 5, 0x04, 1, 'V', 'V'),
271: ('GAMMALN', 1, 1, 0x02, 1, 'V', 'V'),
272: ('BETAINV', 3, 5, 0x04, 1, 'V', 'V'),
273: ('BINOMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
274: ('CHIDIST', 2, 2, 0x02, 2, 'V', 'VV'),
275: ('CHIINV', 2, 2, 0x02, 2, 'V', 'VV'),
276: ('COMBIN', 2, 2, 0x02, 2, 'V', 'VV'),
277: ('CONFIDENCE', 3, 3, 0x02, 3, 'V', 'VVV'),
278: ('CRITBINOM', 3, 3, 0x02, 3, 'V', 'VVV'),
279: ('EVEN', 1, 1, 0x02, 1, 'V', 'V'),
280: ('EXPONDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
281: ('FDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
282: ('FINV', 3, 3, 0x02, 3, 'V', 'VVV'),
283: ('FISHER', 1, 1, 0x02, 1, 'V', 'V'),
284: ('FISHERINV', 1, 1, 0x02, 1, 'V', 'V'),
285: ('FLOOR', 2, 2, 0x02, 2, 'V', 'VV'),
286: ('GAMMADIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
287: ('GAMMAINV', 3, 3, 0x02, 3, 'V', 'VVV'),
288: ('CEILING', 2, 2, 0x02, 2, 'V', 'VV'),
289: ('HYPGEOMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
290: ('LOGNORMDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
291: ('LOGINV', 3, 3, 0x02, 3, 'V', 'VVV'),
292: ('NEGBINOMDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
293: ('NORMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
294: ('NORMSDIST', 1, 1, 0x02, 1, 'V', 'V'),
295: ('NORMINV', 3, 3, 0x02, 3, 'V', 'VVV'),
296: ('NORMSINV', 1, 1, 0x02, 1, 'V', 'V'),
297: ('STANDARDIZE', 3, 3, 0x02, 3, 'V', 'VVV'),
298: ('ODD', 1, 1, 0x02, 1, 'V', 'V'),
299: ('PERMUT', 2, 2, 0x02, 2, 'V', 'VV'),
300: ('POISSON', 3, 3, 0x02, 3, 'V', 'VVV'),
301: ('TDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
302: ('WEIBULL', 4, 4, 0x02, 4, 'V', 'VVVV'),
303: ('SUMXMY2', 2, 2, 0x02, 2, 'V', 'AA'),
304: ('SUMX2MY2', 2, 2, 0x02, 2, 'V', 'AA'),
305: ('SUMX2PY2', 2, 2, 0x02, 2, 'V', 'AA'),
306: ('CHITEST', 2, 2, 0x02, 2, 'V', 'AA'),
307: ('CORREL', 2, 2, 0x02, 2, 'V', 'AA'),
308: ('COVAR', 2, 2, 0x02, 2, 'V', 'AA'),
309: ('FORECAST', 3, 3, 0x02, 3, 'V', 'VAA'),
310: ('FTEST', 2, 2, 0x02, 2, 'V', 'AA'),
311: ('INTERCEPT', 2, 2, 0x02, 2, 'V', 'AA'),
312: ('PEARSON', 2, 2, 0x02, 2, 'V', 'AA'),
313: ('RSQ', 2, 2, 0x02, 2, 'V', 'AA'),
314: ('STEYX', 2, 2, 0x02, 2, 'V', 'AA'),
315: ('SLOPE', 2, 2, 0x02, 2, 'V', 'AA'),
316: ('TTEST', 4, 4, 0x02, 4, 'V', 'AAVV'),
317: ('PROB', 3, 4, 0x04, 3, 'V', 'AAV'),
318: ('DEVSQ', 1, 30, 0x04, 1, 'V', 'R'),
319: ('GEOMEAN', 1, 30, 0x04, 1, 'V', 'R'),
320: ('HARMEAN', 1, 30, 0x04, 1, 'V', 'R'),
321: ('SUMSQ', 0, 30, 0x04, 1, 'V', 'R'),
322: ('KURT', 1, 30, 0x04, 1, 'V', 'R'),
323: ('SKEW', 1, 30, 0x04, 1, 'V', 'R'),
324: ('ZTEST', 2, 3, 0x04, 2, 'V', 'RV'),
325: ('LARGE', 2, 2, 0x02, 2, 'V', 'RV'),
326: ('SMALL', 2, 2, 0x02, 2, 'V', 'RV'),
327: ('QUARTILE', 2, 2, 0x02, 2, 'V', 'RV'),
328: ('PERCENTILE', 2, 2, 0x02, 2, 'V', 'RV'),
329: ('PERCENTRANK', 2, 3, 0x04, 2, 'V', 'RV'),
330: ('MODE', 1, 30, 0x04, 1, 'V', 'A'),
331: ('TRIMMEAN', 2, 2, 0x02, 2, 'V', 'RV'),
332: ('TINV', 2, 2, 0x02, 2, 'V', 'VV'),
336: ('CONCATENATE', 0, 30, 0x04, 1, 'V', 'V'),
337: ('POWER', 2, 2, 0x02, 2, 'V', 'VV'),
342: ('RADIANS', 1, 1, 0x02, 1, 'V', 'V'),
343: ('DEGREES', 1, 1, 0x02, 1, 'V', 'V'),
344: ('SUBTOTAL', 2, 30, 0x04, 2, 'V', 'VR'),
345: ('SUMIF', 2, 3, 0x04, 3, 'V', 'RVR'),
346: ('COUNTIF', 2, 2, 0x02, 2, 'V', 'RV'),
347: ('COUNTBLANK', 1, 1, 0x02, 1, 'V', 'R'),
350: ('ISPMT', 4, 4, 0x02, 4, 'V', 'VVVV'),
351: ('DATEDIF', 3, 3, 0x02, 3, 'V', 'VVV'),
352: ('DATESTRING', 1, 1, 0x02, 1, 'V', 'V'),
353: ('NUMBERSTRING', 2, 2, 0x02, 2, 'V', 'VV'),
354: ('ROMAN', 1, 2, 0x04, 2, 'V', 'VV'),
358: ('GETPIVOTDATA', 2, 2, 0x02, 2, 'V', 'RV'),
359: ('HYPERLINK', 1, 2, 0x04, 2, 'V', 'VV'),
360: ('PHONETIC', 1, 1, 0x02, 1, 'V', 'V'),
361: ('AVERAGEA', 1, 30, 0x04, 1, 'V', 'R'),
362: ('MAXA', 1, 30, 0x04, 1, 'V', 'R'),
363: ('MINA', 1, 30, 0x04, 1, 'V', 'R'),
364: ('STDEVPA', 1, 30, 0x04, 1, 'V', 'R'),
365: ('VARPA', 1, 30, 0x04, 1, 'V', 'R'),
366: ('STDEVA', 1, 30, 0x04, 1, 'V', 'R'),
367: ('VARA', 1, 30, 0x04, 1, 'V', 'R'),
368: ('BAHTTEXT', 1, 1, 0x02, 1, 'V', 'V'),
369: ('THAIDAYOFWEEK', 1, 1, 0x02, 1, 'V', 'V'),
370: ('THAIDIGIT', 1, 1, 0x02, 1, 'V', 'V'),
371: ('THAIMONTHOFYEAR', 1, 1, 0x02, 1, 'V', 'V'),
372: ('THAINUMSOUND', 1, 1, 0x02, 1, 'V', 'V'),
373: ('THAINUMSTRING', 1, 1, 0x02, 1, 'V', 'V'),
374: ('THAISTRINGLENGTH', 1, 1, 0x02, 1, 'V', 'V'),
375: ('ISTHAIDIGIT', 1, 1, 0x02, 1, 'V', 'V'),
376: ('ROUNDBAHTDOWN', 1, 1, 0x02, 1, 'V', 'V'),
377: ('ROUNDBAHTUP', 1, 1, 0x02, 1, 'V', 'V'),
378: ('THAIYEAR', 1, 1, 0x02, 1, 'V', 'V'),
379: ('RTD', 2, 5, 0x04, 1, 'V', 'V'),
}
tAttrNames = {
0x00: "Skip??", # seen in SAMPLES.XLS which shipped with Excel 5.0
0x01: "Volatile",
0x02: "If",
0x04: "Choose",
0x08: "Skip",
0x10: "Sum",
0x20: "Assign",
0x40: "Space",
0x41: "SpaceVolatile",
}
_error_opcodes = {}
for _x in [0x07, 0x08, 0x0A, 0x0B, 0x1C, 0x1D, 0x2F]:
_error_opcodes[_x] = 1
is_error_opcode = _error_opcodes.has_key
tRangeFuncs = (min, max, min, max, min, max)
tIsectFuncs = (max, min, max, min, max, min)
def do_box_funcs(box_funcs, boxa, boxb):
return tuple([
func(numa, numb)
for func, numa, numb in zip(box_funcs, boxa.coords, boxb.coords)
])
def adjust_cell_addr_biff8(rowval, colval, reldelta, browx=None, bcolx=None):
row_rel = (colval >> 15) & 1
col_rel = (colval >> 14) & 1
rowx = rowval
colx = colval & 0xff
if reldelta:
if row_rel and rowx >= 32768:
rowx -= 65536
if col_rel and colx >= 128:
colx -= 256
else:
if row_rel:
rowx -= browx
if col_rel:
colx -= bcolx
return rowx, colx, row_rel, col_rel
def adjust_cell_addr_biff_le7(
rowval, colval, reldelta, browx=None, bcolx=None):
row_rel = (rowval >> 15) & 1
col_rel = (rowval >> 14) & 1
rowx = rowval & 0x3fff
colx = colval
if reldelta:
if row_rel and rowx >= 8192:
rowx -= 16384
if col_rel and colx >= 128:
colx -= 256
else:
if row_rel:
rowx -= browx
if col_rel:
colx -= bcolx
return rowx, colx, row_rel, col_rel
def get_cell_addr(data, pos, bv, reldelta, browx=None, bcolx=None):
if bv >= 80:
rowval, colval = unpack("<HH", data[pos:pos+4])
# print " rv=%04xh cv=%04xh" % (rowval, colval)
return adjust_cell_addr_biff8(rowval, colval, reldelta, browx, bcolx)
else:
rowval, colval = unpack("<HB", data[pos:pos+3])
# print " rv=%04xh cv=%04xh" % (rowval, colval)
return adjust_cell_addr_biff_le7(
rowval, colval, reldelta, browx, bcolx)
def get_cell_range_addr(data, pos, bv, reldelta, browx=None, bcolx=None):
if bv >= 80:
row1val, row2val, col1val, col2val = unpack("<HHHH", data[pos:pos+8])
# print " rv=%04xh cv=%04xh" % (row1val, col1val)
# print " rv=%04xh cv=%04xh" % (row2val, col2val)
res1 = adjust_cell_addr_biff8(row1val, col1val, reldelta, browx, bcolx)
res2 = adjust_cell_addr_biff8(row2val, col2val, reldelta, browx, bcolx)
return res1, res2
else:
row1val, row2val, col1val, col2val = unpack("<HHBB", data[pos:pos+6])
# print " rv=%04xh cv=%04xh" % (row1val, col1val)
# print " rv=%04xh cv=%04xh" % (row2val, col2val)
res1 = adjust_cell_addr_biff_le7(
row1val, col1val, reldelta, browx, bcolx)
res2 = adjust_cell_addr_biff_le7(
row2val, col2val, reldelta, browx, bcolx)
return res1, res2
def get_externsheet_local_range(bk, refx, blah=0):
try:
info = bk._externsheet_info[refx]
except IndexError:
print "!!! get_externsheet_local_range: refx=%d, not in range(%d)" \
% (refx, len(bk._externsheet_info))
return (-101, -101)
ref_recordx, ref_first_sheetx, ref_last_sheetx = info
if ref_recordx == bk._supbook_addins_inx:
if blah:
print "/// get_externsheet_local_range(refx=%d) -> addins %r" % (refx, info)
assert ref_first_sheetx == 0xFFFE == ref_last_sheetx
return (-5, -5)
if ref_recordx != bk._supbook_locals_inx:
if blah:
print "/// get_externsheet_local_range(refx=%d) -> external %r" % (refx, info)
return (-4, -4) # external reference
if ref_first_sheetx == 0xFFFE == ref_last_sheetx:
if blah:
print "/// get_externsheet_local_range(refx=%d) -> unspecified sheet %r" % (refx, info)
return (-1, -1) # internal reference, any sheet
if ref_first_sheetx == 0xFFFF == ref_last_sheetx:
if blah:
print "/// get_externsheet_local_range(refx=%d) -> deleted sheet(s)" % (refx, )
return (-2, -2) # internal reference, deleted sheet(s)
nsheets = len(bk._all_sheets_map)
if not(0 <= ref_first_sheetx <= ref_last_sheetx < nsheets):
if blah:
print "/// get_externsheet_local_range(refx=%d) -> %r" % (refx, info)
print "--- first/last sheet not in range(%d)" % nsheets
return (-102, -102) # stuffed up somewhere :-(
xlrd_sheetx1 = bk._all_sheets_map[ref_first_sheetx]
xlrd_sheetx2 = bk._all_sheets_map[ref_last_sheetx]
if not(0 <= xlrd_sheetx1 <= xlrd_sheetx2):
return (-3, -3) # internal reference, but to a macro sheet
return xlrd_sheetx1, xlrd_sheetx2
def get_externsheet_local_range_b57(
bk, raw_extshtx, ref_first_sheetx, ref_last_sheetx, blah=0):
if raw_extshtx > 0:
if blah:
print "/// get_externsheet_local_range_b57(raw_extshtx=%d) -> external" % raw_extshtx
return (-4, -4) # external reference
if ref_first_sheetx == -1 and ref_last_sheetx == -1:
return (-2, -2) # internal reference, deleted sheet(s)
nsheets = len(bk._all_sheets_map)
if not(0 <= ref_first_sheetx <= ref_last_sheetx < nsheets):
if blah:
print "/// get_externsheet_local_range_b57(%d, %d, %d) -> ???" \
% (raw_extshtx, ref_first_sheetx, ref_last_sheetx)
print "--- first/last sheet not in range(%d)" % nsheets
return (-103, -103) # stuffed up somewhere :-(
xlrd_sheetx1 = bk._all_sheets_map[ref_first_sheetx]
xlrd_sheetx2 = bk._all_sheets_map[ref_last_sheetx]
if not(0 <= xlrd_sheetx1 <= xlrd_sheetx2):
return (-3, -3) # internal reference, but to a macro sheet
return xlrd_sheetx1, xlrd_sheetx2
class FormulaError(Exception):
pass
oBOOL = 3
oERR = 4
oMSNG = 5 # tMissArg
oNUM = 2
oREF = -1
oREL = -2
oSTRG = 1
oUNK = 0
okind_dict = {
-2: "oREL",
-1: "oREF",
0 : "oUNK",
1 : "oSTRG",
2 : "oNUM",
3 : "oBOOL",
4 : "oERR",
5 : "oMSNG",
}
listsep = ',' #### probably should depend on locale
##
# Used in evaluating formulas.
# The following table describes the kinds and how their values
# are represented.</p>
#
# <table border="1" cellpadding="7">
# <tr>
# <th>Kind symbol</th>
# <th>Kind number</th>
# <th>Value representation</th>
# </tr>
# <tr>
# <td>oBOOL</td>
# <td align="center">3</td>
# <td>integer: 0 => False; 1 => True</td>
# </tr>
# <tr>
# <td>oERR</td>
# <td align="center">4</td>
# <td>None, or an int error code (same as XL_CELL_ERROR in the Cell class).
# </td>
# </tr>
# <tr>
# <td>oMSNG</td>
# <td align="center">5</td>
# <td>Used by Excel as a placeholder for a missing (not supplied) function
# argument. Should *not* appear as a final formula result. Value is None.</td>
# </tr>
# <tr>
# <td>oNUM</td>
# <td align="center">2</td>
# <td>A float. Note that there is no way of distinguishing dates.</td>
# </tr>
# <tr>
# <td>oREF</td>
# <td align="center">-1</td>
# <td>The value is either None or a non-empty list of
# absolute Ref3D instances.<br>
# </td>
# </tr>
# <tr>
# <td>oREL</td>
# <td align="center">-2</td>
# <td>The value is None or a non-empty list of
# fully or partially relative Ref3D instances.
# </td>
# </tr>
# <tr>
# <td>oSTRG</td>
# <td align="center">1</td>
# <td>A Unicode string.</td>
# </tr>
# <tr>
# <td>oUNK</td>
# <td align="center">0</td>
# <td>The kind is unknown or ambiguous. The value is None</td>
# </tr>
# </table>
#<p></p>
class Operand(object):
##
# None means that the actual value of the operand is a variable
# (depends on cell data), not a constant.
value = None
##
# oUNK means that the kind of operand is not known unambiguously.
kind = oUNK
##
# The reconstituted text of the original formula. Function names will be
# in English irrespective of the original language, which doesn't seem
# to be recorded anywhere. The separator is ",", not ";" or whatever else
# might be more appropriate for the end-user's locale; patches welcome.
text = '?'
def __init__(self, akind=None, avalue=None, arank=0, atext='?'):
if akind is not None:
self.kind = akind
if avalue is not None:
self.value = avalue
self.rank = arank
# rank is an internal gizmo (operator precedence);
# it's used in reconstructing formula text.
self.text = atext
def __repr__(self):
kind_text = okind_dict.get(self.kind, "?Unknown kind?")
return "Operand(kind=%s, value=%r, text=%r)" \
% (kind_text, self.value, self.text)
if CAN_SUBCLASS_BUILTIN:
_ref3d_base = tuple
else:
_ref3d_base = object
##
# <p>Represents an absolute or relative 3-dimensional reference to a box
# of one or more cells.<br />
# -- New in version 0.6.0
# </p>
#
# <p>The <i>coords</i> attribute is a tuple of the form:<br />
# (shtxlo, shtxhi, rowxlo, rowxhi, colxlo, colxhi)<br />
# where 0 <= thingxlo <= thingx < thingxhi.<br />
# Note that it is quite possible to have thingx > nthings; for example
# Print_Titles could have colxhi == 256 and/or rowxhi == 65536
# irrespective of how many columns/rows are actually used in the worksheet.
# The caller will need to decide how to handle this situation.
# Keyword: IndexError :-)
# </p>
#
# <p>The components of the coords attribute are also available as individual
# attributes: shtxlo, shtxhi, rowxlo, rowxhi, colxlo, and colxhi.</p>
#
# <p>The <i>relflags</i> attribute is a 6-tuple of flags which indicate whether
# the corresponding (sheet|row|col)(lo|hi) is relative (1) or absolute (0).<br>
# Note that there is necessarily no information available as to what cell(s)
# the reference could possibly be relative to. The caller must decide what if
# any use to make of oREL operands. Note also that a partially relative
# reference may well be a typo.
# For example, define name A1Z10 as $a$1:$z10 (missing $ after z)
# while the cursor is on cell Sheet3!A27.<br>
# The resulting Ref3D instance will have coords = (2, 3, 0, -16, 0, 26)
# and relflags = (0, 0, 0, 1, 0, 0).<br>
# So far, only one possibility of a sheet-relative component in
# a reference has been noticed: a 2D reference located in the "current sheet".
# <br /> This will appear as coords = (0, 1, ...) and relflags = (1, 1, ...).
class Ref3D(_ref3d_base):
def __init__(self, atuple):
self.coords = atuple[0:6]
self.relflags = atuple[6:12]
if not self.relflags:
self.relflags = (0, 0, 0, 0, 0, 0)
(self.shtxlo, self.shtxhi,
self.rowxlo, self.rowxhi,
self.colxlo, self.colxhi) = self.coords
def __repr__(self):
if not self.relflags or self.relflags == (0, 0, 0, 0, 0, 0):
return "Ref3D(coords=%r)" % (self.coords, )
else:
return "Ref3D(coords=%r, relflags=%r)" \
% (self.coords, self.relflags)
tAdd = 0x03
tSub = 0x04
tMul = 0x05
tDiv = 0x06
tPower = 0x07
tConcat = 0x08
tLT, tLE, tEQ, tGE, tGT, tNE = range(0x09, 0x0F)
import operator as opr
def nop(x):
return x
def _opr_pow(x, y): return x ** y
def _opr_lt(x, y): return x < y
def _opr_le(x, y): return x <= y
def _opr_eq(x, y): return x == y
def _opr_ge(x, y): return x >= y
def _opr_gt(x, y): return x > y
def _opr_ne(x, y): return x != y
def num2strg(num):
"""Attempt to emulate Excel's default conversion
from number to string.
"""
s = str(num)
if s.endswith(".0"):
s = s[:-2]
return s
_arith_argdict = {oNUM: nop, oSTRG: float}
_cmp_argdict = {oNUM: nop, oSTRG: nop}
# Seems no conversions done on relops; in Excel, "1" > 9 produces TRUE.
_strg_argdict = {oNUM:num2strg, oSTRG:nop}
binop_rules = {
tAdd: (_arith_argdict, oNUM, opr.add, 30, '+'),
tSub: (_arith_argdict, oNUM, opr.sub, 30, '-'),
tMul: (_arith_argdict, oNUM, opr.mul, 40, '*'),
tDiv: (_arith_argdict, oNUM, opr.div, 40, '/'),
tPower: (_arith_argdict, oNUM, _opr_pow, 50, '^',),
tConcat:(_strg_argdict, oSTRG, opr.add, 20, '&'),
tLT: (_cmp_argdict, oBOOL, _opr_lt, 10, '<'),
tLE: (_cmp_argdict, oBOOL, _opr_le, 10, '<='),
tEQ: (_cmp_argdict, oBOOL, _opr_eq, 10, '='),
tGE: (_cmp_argdict, oBOOL, _opr_ge, 10, '>='),
tGT: (_cmp_argdict, oBOOL, _opr_gt, 10, '>'),
tNE: (_cmp_argdict, oBOOL, _opr_ne, 10, '<>'),
}
unop_rules = {
0x13: (lambda x: -x, 70, '-', ''), # unary minus
0x12: (lambda x: x, 70, '+', ''), # unary plus
0x14: (lambda x: x / 100.0, 60, '', '%'),# percent
}
LEAF_RANK = 90
FUNC_RANK = 90
STACK_ALARM_LEVEL = 5
STACK_PANIC_LEVEL = 10
def evaluate_name_formula(bk, nobj, namex, blah=0, level=0):
if level > STACK_ALARM_LEVEL:
blah = 1
data = nobj.raw_formula
fmlalen = nobj.basic_formula_len
bv = bk.biff_version
reldelta = 1 # All defined name formulas use "Method B" [OOo docs]
if blah:
print "::: evaluate_name_formula %r %r %d %d %r level=%d" \
% (namex, nobj.name, fmlalen, bv, data, level)
hex_char_dump(data, 0, fmlalen)
if level > STACK_PANIC_LEVEL:
raise XLRDError("Excessive indirect references in NAME formula")
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
any_external = 0
unk_opnd = Operand(oUNK, None)
error_opnd = Operand(oERR, None)
spush = stack.append
def do_binop(opcd, stk):
assert len(stk) >= 2
bop = stk.pop()
aop = stk.pop()
argdict, result_kind, func, rank, sym = binop_rules[opcd]
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
resop = Operand(result_kind, None, rank, otext)
try:
bconv = argdict[bop.kind]
aconv = argdict[aop.kind]
except KeyError:
stk.append(resop)
return
if bop.value is None or aop.value is None:
stk.append(resop)
return
bval = bconv(bop.value)
aval = aconv(aop.value)
result = func(aval, bval)
if result_kind == oBOOL:
result = intbool(result) # -> 1 or 0
resop.value = result
stk.append(resop)
def do_unaryop(opcode, arglist, result_kind, stk):
assert len(stk) >= 1
aop = stk.pop()
assert aop.kind in arglist
val = aop.value
func, rank, sym1, sym2 = unop_rules[opcode]
otext = ''.join([
sym1,
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym2,
])
if val is not None:
val = func(val)
stk.append(Operand(result_kind, val, rank, otext))
def not_in_name_formula(op_arg, oname_arg):
msg = "ERROR *** Token 0x%02x (%s) found in NAME formula" \
% (op_arg, oname_arg)
raise FormulaError(msg)
if fmlalen == 0:
stack = [unk_opnd]
while 0 <= pos < fmlalen:
op = ord(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print "Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype)
print "Stack =", stack
if sz == -2:
msg = 'ERROR *** Unexpected token 0x%02x ("%s"); biff_version=%d' \
% (op, oname, bv)
raise FormulaError(msg)
if not optype:
if 0x00 <= opcode <= 0x02: # unk_opnd, tExp, tTbl
not_in_name_formula(op, oname)
elif 0x03 <= opcode <= 0x0E:
# Add, Sub, Mul, Div, Power
# tConcat
# tLT, ..., tNE
do_binop(opcode, stack)
elif opcode == 0x0F: # tIsect
if blah: print >> bk.logfile, "tIsect pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ' '
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF)
res.text = otext
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind == oUNK or aop.kind == oUNK:
# This can happen with undefined
# (go search in the current sheet) labels.
# For example =Bob Sales
# Each label gets a NAME record with an empty formula (!)
# Evaluation of the tName token classifies it as oUNK
# res.kind = oREF
pass
elif bop.kind == oREF == aop.kind:
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tIsectFuncs, aop.value[0], bop.value[0])
res.value = [Ref3D(coords)]
elif bop.kind == oREL == aop.kind:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tIsectFuncs, aop.value[0], bop.value[0])
relfa = aop.value[0].relflags
relfb = bop.value[0].relflags
if relfa == relfb:
res.value = [Ref3D(coords + relfa)]
else:
pass
spush(res)
if blah: print >> bk.logfile, "tIsect post", stack
elif opcode == 0x10: # tList
if blah: print >> bk.logfile, "tList pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ','
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind in (oREF, oREL) and aop.kind in (oREF, oREL):
res.kind = oREF
if aop.kind == oREL or bop.kind == oREL:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) >= 1
assert len(bop.value) == 1
res.value = aop.value + bop.value
else:
pass
spush(res)
if blah: print >> bk.logfile, "tList post", stack
elif opcode == 0x11: # tRange
if blah: print >> bk.logfile, "tRange pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ':'
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res = oERR
elif bop.kind == oREF == aop.kind:
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tRangeFuncs, aop.value[0], bop.value[0])
res.value = [Ref3D(coords)]
elif bop.kind == oREL == aop.kind:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tRangeFuncs, aop.value[0], bop.value[0])
relfa = aop.value[0].relflags
relfb = bop.value[0].relflags
if relfa == relfb:
res.value = [Ref3D(coords + relfa)]
else:
pass
spush(res)
if blah: print >> bk.logfile, "tRange post", stack
elif 0x12 <= opcode <= 0x14: # tUplus, tUminus, tPercent
do_unaryop(opcode, (oUNK, oNUM,), oNUM, stack)
elif opcode == 0x15: # tParen
# source cosmetics
pass
elif opcode == 0x16: # tMissArg
spush(Operand(oMSNG, None, LEAF_RANK, ''))
elif opcode == 0x17: # tStr
if bv <= 70:
strg, newpos = unpack_string_update_pos(
data, pos+1, bk.encoding, lenlen=1)
else:
strg, newpos = unpack_unicode_update_pos(
data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print >> bk.logfile, " sz=%d strg=%r" % (sz, strg)
text = '"' + strg.replace('"', '""') + '"'
spush(Operand(oSTRG, strg, LEAF_RANK, text))
elif opcode == 0x18: # tExtended
# new with BIFF 8
assert bv >= 80
# not in OOo docs
raise FormulaError("tExtended token not implemented")
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
elif subop == 0x10: # Sum (single arg)
sz = 4
if blah: print >> bk.logfile, "tAttrSum", stack
assert len(stack) >= 1
aop = stack[-1]
otext = 'SUM(%s)' % aop.text
stack[-1] = Operand(oNUM, None, FUNC_RANK, otext)
else:
sz = 4
if blah:
print " subop=%02xh subname=t%s sz=%d nc=%02xh" \
% (subop, subname, sz, nc)
elif 0x1A <= opcode <= 0x1B: # tSheet, tEndSheet
assert bv < 50
raise FormulaError("tSheet & tEndsheet tokens not implemented")
elif 0x1C <= opcode <= 0x1F: # tErr, tBool, tInt, tNum
inx = opcode - 0x1C
nb = [1, 1, 2, 8][inx]
kind = [oERR, oBOOL, oNUM, oNUM][inx]
value, = unpack("<" + "BBHd"[inx], data[pos+1:pos+1+nb])
if inx == 2: # tInt
value = float(value)
text = str(value)
elif inx == 3: # tNum
text = str(value)
elif inx == 1: # tBool
text = ('FALSE', 'TRUE')[value]
else:
text = '"' +error_text_from_code[value] + '"'
spush(Operand(kind, value, LEAF_RANK, text))
else:
raise FormulaError("Unhandled opcode: 0x%02x" % opcode)
if sz <= 0:
raise FormulaError("Size not set for opcode 0x%02x" % opcode)
pos += sz
continue
if opcode == 0x00: # tArray
spush(unk_opnd)
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])[0]
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print >> bk.logfile, "*** formula/tFunc unknown FuncID:%d" \
% funcx
spush(unk_opnd)
else:
func_name, nargs = func_attrs[:2]
if blah:
print " FuncID=%d name=%s nargs=%d" \
% (funcx, func_name, nargs)
assert len(stack) >= nargs
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
del stack[-nargs:]
res = Operand(oUNK, None, FUNC_RANK, otext)
spush(res)
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah:
print " FuncID=%d nargs=%d macro=%d prompt=%d" \
% (funcx, nargs, macro, prompt)
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print >> bk.logfile, "*** formula/tFuncVar unknown FuncID:%d" \
% funcx
spush(unk_opnd)
else:
func_name, minargs, maxargs = func_attrs[:3]
if blah:
print " name: %r, min~max args: %d~%d" \
% (func_name, minargs, maxargs)
assert minargs <= nargs <= maxargs
assert len(stack) >= nargs
assert len(stack) >= nargs
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
res = Operand(oUNK, None, FUNC_RANK, otext)
if funcx == 1: # IF
testarg = stack[-nargs]
if testarg.kind not in (oNUM, oBOOL):
if blah and testarg.kind != oUNK:
print "IF testarg kind?"
elif testarg.value not in (0, 1):
if blah and testarg.value is not None:
print "IF testarg value?"
else:
if nargs == 2 and not testarg.value:
# IF(FALSE, tv) => FALSE
res.kind, res.value = oBOOL, 0
else:
respos = -nargs + 2 - int(testarg.value)
chosen = stack[respos]
if chosen.kind == oMSNG:
res.kind, res.value = oNUM, 0
else:
res.kind, res.value = chosen.kind, chosen.value
if blah:
print "$$$$$$ IF => constant"
elif funcx == 100: # CHOOSE
testarg = stack[-nargs]
if testarg.kind == oNUM:
if 1 <= testarg.value < nargs:
chosen = stack[-nargs + int(testarg.value)]
if chosen.kind == oMSNG:
res.kind, res.value = oNUM, 0
else:
res.kind, res.value = chosen.kind, chosen.value
del stack[-nargs:]
spush(res)
elif opcode == 0x03: #tName
tgtnamex = unpack("<H", data[pos+1:pos+3])[0] - 1
# Only change with BIFF version is number of trailing UNUSED bytes!
if blah: print >> bk.logfile, " tgtnamex=%d" % tgtnamex
tgtobj = bk.name_obj_list[tgtnamex]
if not tgtobj.evaluated:
### recursive ###
evaluate_name_formula(bk, tgtobj, tgtnamex, blah, level+1)
if tgtobj.macro or tgtobj.binary \
or tgtobj.any_err:
if blah:
tgtobj.dump(
bk.logfile,
header="!!! tgtobj has problems!!!",
footer="----------- --------",
)
res = Operand(oUNK, None)
any_err = any_err or tgtobj.macro or tgtobj.binary or tgtobj.any_err
any_rel = any_rel or tgtobj.any_rel
else:
assert len(tgtobj.stack) == 1
res = copy.deepcopy(tgtobj.stack[0])
res.rank = LEAF_RANK
if tgtobj.scope == -1:
res.text = tgtobj.name
else:
res.text = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print >> bk.logfile, " tName: setting text to", repr(res.text)
spush(res)
elif opcode == 0x04: # tRef
# not_in_name_formula(op, oname)
res = get_cell_addr(data, pos+1, bv, reldelta)
if blah: print >> bk.logfile, " ", res
rowx, colx, row_rel, col_rel = res
shx1 = shx2 = 0 ####### N.B. relative to the CURRENT SHEET
any_rel = 1
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
if blah: print >> bk.logfile, " ", coords
res = Operand(oUNK, None)
if optype == 1:
relflags = (1, 1, row_rel, row_rel, col_rel, col_rel)
res = Operand(oREL, [Ref3D(coords + relflags)])
spush(res)
elif opcode == 0x05: # tArea
# not_in_name_formula(op, oname)
res1, res2 = get_cell_range_addr(data, pos+1, bv, reldelta)
if blah: print >> bk.logfile, " ", res1, res2
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
shx1 = shx2 = 0 ####### N.B. relative to the CURRENT SHEET
any_rel = 1
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print >> bk.logfile, " ", coords
res = Operand(oUNK, None)
if optype == 1:
relflags = (1, 1, row_rel1, row_rel2, col_rel1, col_rel2)
res = Operand(oREL, [Ref3D(coords + relflags)])
spush(res)
elif opcode == 0x06: # tMemArea
not_in_name_formula(op, oname)
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print >> bk.logfile, " %d bytes of cell ref formula" % nb
# no effect on stack
elif opcode == 0x0C: #tRefN
not_in_name_formula(op, oname)
# res = get_cell_addr(data, pos+1, bv, reldelta=1)
# # note *ALL* tRefN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
# spush(res)
elif opcode == 0x0D: #tAreaN
not_in_name_formula(op, oname)
# res = get_cell_range_addr(data, pos+1, bv, reldelta=1)
# # note *ALL* tAreaN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
elif opcode == 0x1A: # tRef3d
if bv >= 80:
res = get_cell_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res = get_cell_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print >> bk.logfile, "tRef3d", raw_extshtx, raw_shx1, raw_shx2
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
any_err |= shx1 < -1
if blah: print >> bk.logfile, " ", coords
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel, row_rel, col_rel, col_rel)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
if optype == 1:
res.value = [ref3d]
spush(res)
elif opcode == 0x1B: # tArea3d
if bv >= 80:
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res1, res2 = get_cell_range_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print >> bk.logfile, "tArea3d", raw_extshtx, raw_shx1, raw_shx2
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
any_err |= shx1 < -1
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
is_rel = row_rel1 or col_rel1 or row_rel2 or col_rel2
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print >> bk.logfile, " ", coords
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel1, row_rel2, col_rel1, col_rel2)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
if optype == 1:
res.value = [ref3d]
spush(res)
elif opcode == 0x19: # tNameX
dodgy = 0
res = Operand(oUNK, None)
if bv >= 80:
refx, tgtnamex = unpack("<HH", data[pos+1:pos+5])
tgtnamex -= 1
origrefx = refx
else:
refx, tgtnamex = unpack("<hxxxxxxxxH", data[pos+1:pos+13])
tgtnamex -= 1
origrefx = refx
if refx > 0:
refx -= 1
elif refx < 0:
refx = -refx - 1
else:
dodgy = 1
if blah:
print >> bk.logfile, \
" origrefx=%d refx=%d tgtnamex=%d dodgy=%d" \
% (origrefx, refx, tgtnamex, dodgy)
if tgtnamex == namex:
if blah: print >> bk.logfile, "!!!! Self-referential !!!!"
dodgy = any_err = 1
if not dodgy:
if bv >= 80:
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
elif origrefx > 0:
shx1, shx2 = (-4, -4) # external ref
else:
exty = bk._externsheet_type_b57[refx]
if exty == 4: # non-specific sheet in own doc't
shx1, shx2 = (-1, -1) # internal, any sheet
else:
shx1, shx2 = (-666, -666)
if dodgy or shx1 < -1:
otext = "<<Name #%d in external(?) file #%d>>" \
% (tgtnamex, origrefx)
res = Operand(oUNK, None, LEAF_RANK, otext)
else:
tgtobj = bk.name_obj_list[tgtnamex]
if not tgtobj.evaluated:
### recursive ###
evaluate_name_formula(bk, tgtobj, tgtnamex, blah, level+1)
if tgtobj.macro or tgtobj.binary \
or tgtobj.any_err:
if blah:
tgtobj.dump(
bk.logfile,
header="!!! bad tgtobj !!!",
footer="------------------",
)
res = Operand(oUNK, None)
any_err = any_err or tgtobj.macro or tgtobj.binary or tgtobj.any_err
any_rel = any_rel or tgtobj.any_rel
else:
assert len(tgtobj.stack) == 1
res = copy.deepcopy(tgtobj.stack[0])
res.rank = LEAF_RANK
if tgtobj.scope == -1:
res.text = tgtobj.name
else:
res.text = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print >> bk.logfile, " tNameX: setting text to", repr(res.text)
spush(res)
elif is_error_opcode(opcode):
any_err = 1
spush(error_opnd)
else:
if blah:
print >> bk.logfile, "FORMULA: /// Not handled yet: t" + oname
any_err = 1
if sz <= 0:
raise FormulaError("Fatal: token size is not positive")
pos += sz
any_rel = not not any_rel
if blah:
print "End of formula. level=%d any_rel=%d any_err=%d stack=%r" % \
(level, not not any_rel, any_err, stack)
if len(stack) >= 2:
print "*** Stack has unprocessed args"
print
nobj.stack = stack
if len(stack) != 1:
nobj.result = None
else:
nobj.result = stack[0]
nobj.any_rel = any_rel
nobj.any_err = any_err
nobj.any_external = any_external
nobj.evaluated = 1
#### under construction ####
def decompile_formula(bk, fmla, fmlalen,
reldelta, browx=None, bcolx=None,
# browx & bcolx are required when reldelta == 0
blah=0, level=0):
if level > STACK_ALARM_LEVEL:
blah = 1
data = fmla
bv = bk.biff_version
if blah:
print "::: decompile_formula len=%d reldelta=%d %r level=%d" \
% (fmlalen, reldelta, data, level)
hex_char_dump(data, 0, fmlalen)
if level > STACK_PANIC_LEVEL:
raise XLRDError("Excessive indirect references in formula")
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
any_external = 0
unk_opnd = Operand(oUNK, None)
error_opnd = Operand(oERR, None)
spush = stack.append
def do_binop(opcd, stk):
assert len(stk) >= 2
bop = stk.pop()
aop = stk.pop()
argdict, result_kind, func, rank, sym = binop_rules[opcd]
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
resop = Operand(result_kind, None, rank, otext)
stk.append(resop)
def do_unaryop(opcode, arglist, result_kind, stk):
assert len(stk) >= 1
aop = stk.pop()
assert aop.kind in arglist
func, rank, sym1, sym2 = unop_rules[opcode]
otext = ''.join([
sym1,
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym2,
])
stk.append(Operand(result_kind, None, rank, otext))
def not_in_name_formula(op_arg, oname_arg):
msg = "ERROR *** Unexpected token 0x%02x (%s) found in formula" \
% (op_arg, oname_arg)
# print msg
raise FormulaError(msg)
if fmlalen == 0:
stack = [unk_opnd]
while 0 <= pos < fmlalen:
op = ord(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print "Pos:%d Op:0x%02x opname:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype)
print "Stack =", stack
if sz == -2:
msg = 'ERROR *** Unexpected token 0x%02x ("%s"); biff_version=%d' \
% (op, oname, bv)
raise FormulaError(msg)
if not optype:
if 0x00 <= opcode <= 0x02: # unk_opnd, tExp, tTbl
not_in_name_formula(op, oname)
elif 0x03 <= opcode <= 0x0E:
# Add, Sub, Mul, Div, Power
# tConcat
# tLT, ..., tNE
do_binop(opcode, stack)
elif opcode == 0x0F: # tIsect
if blah: print >> bk.logfile, "tIsect pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ' '
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF)
res.text = otext
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind == oUNK or aop.kind == oUNK:
# This can happen with undefined
# (go search in the current sheet) labels.
# For example =Bob Sales
# Each label gets a NAME record with an empty formula (!)
# Evaluation of the tName token classifies it as oUNK
# res.kind = oREF
pass
elif bop.kind == oREF == aop.kind:
pass
elif bop.kind == oREL == aop.kind:
res.kind = oREL
else:
pass
spush(res)
if blah: print >> bk.logfile, "tIsect post", stack
elif opcode == 0x10: # tList
if blah: print >> bk.logfile, "tList pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ','
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind in (oREF, oREL) and aop.kind in (oREF, oREL):
res.kind = oREF
if aop.kind == oREL or bop.kind == oREL:
res.kind = oREL
else:
pass
spush(res)
if blah: print >> bk.logfile, "tList post", stack
elif opcode == 0x11: # tRange
if blah: print >> bk.logfile, "tRange pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ':'
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res = oERR
elif bop.kind == oREF == aop.kind:
pass
else:
pass
spush(res)
if blah: print >> bk.logfile, "tRange post", stack
elif 0x12 <= opcode <= 0x14: # tUplus, tUminus, tPercent
do_unaryop(opcode, (oUNK, oNUM,), oNUM, stack)
elif opcode == 0x15: # tParen
# source cosmetics
pass
elif opcode == 0x16: # tMissArg
spush(Operand(oMSNG, None, LEAF_RANK, ''))
elif opcode == 0x17: # tStr
if bv <= 70:
strg, newpos = unpack_string_update_pos(
data, pos+1, bk.encoding, lenlen=1)
else:
strg, newpos = unpack_unicode_update_pos(
data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print >> bk.logfile, " sz=%d strg=%r" % (sz, strg)
text = '"' + strg.replace('"', '""') + '"'
spush(Operand(oSTRG, None, LEAF_RANK, text))
elif opcode == 0x18: # tExtended
# new with BIFF 8
assert bv >= 80
# not in OOo docs
raise FormulaError("tExtended token not implemented")
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
elif subop == 0x10: # Sum (single arg)
sz = 4
if blah: print >> bk.logfile, "tAttrSum", stack
assert len(stack) >= 1
aop = stack[-1]
otext = 'SUM(%s)' % aop.text
stack[-1] = Operand(oNUM, None, FUNC_RANK, otext)
else:
sz = 4
if blah:
print " subop=%02xh subname=t%s sz=%d nc=%02xh" \
% (subop, subname, sz, nc)
elif 0x1A <= opcode <= 0x1B: # tSheet, tEndSheet
assert bv < 50
raise FormulaError("tSheet & tEndsheet tokens not implemented")
elif 0x1C <= opcode <= 0x1F: # tErr, tBool, tInt, tNum
inx = opcode - 0x1C
nb = [1, 1, 2, 8][inx]
kind = [oERR, oBOOL, oNUM, oNUM][inx]
value, = unpack("<" + "BBHd"[inx], data[pos+1:pos+1+nb])
if inx == 2: # tInt
value = float(value)
text = str(value)
elif inx == 3: # tNum
text = str(value)
elif inx == 1: # tBool
text = ('FALSE', 'TRUE')[value]
else:
text = '"' +error_text_from_code[value] + '"'
spush(Operand(kind, None, LEAF_RANK, text))
else:
raise FormulaError("Unhandled opcode: 0x%02x" % opcode)
if sz <= 0:
raise FormulaError("Size not set for opcode 0x%02x" % opcode)
pos += sz
continue
if opcode == 0x00: # tArray
spush(unk_opnd)
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])[0]
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print >> bk.logfile, "*** formula/tFunc unknown FuncID:%d" % funcx
spush(unk_opnd)
else:
func_name, nargs = func_attrs[:2]
if blah:
print " FuncID=%d name=%s nargs=%d" \
% (funcx, func_name, nargs)
assert len(stack) >= nargs
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
del stack[-nargs:]
res = Operand(oUNK, None, FUNC_RANK, otext)
spush(res)
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah:
print " FuncID=%d nargs=%d macro=%d prompt=%d" \
% (funcx, nargs, macro, prompt)
#### TODO #### if funcx == 255: # call add-in function
if funcx == 255:
func_attrs = ("CALL_ADDIN", 1, 30)
else:
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print >> bk.logfile, "*** formula/tFuncVar unknown FuncID:%d" \
% funcx
spush(unk_opnd)
else:
func_name, minargs, maxargs = func_attrs[:3]
if blah:
print " name: %r, min~max args: %d~%d" \
% (func_name, minargs, maxargs)
assert minargs <= nargs <= maxargs
assert len(stack) >= nargs
assert len(stack) >= nargs
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
res = Operand(oUNK, None, FUNC_RANK, otext)
del stack[-nargs:]
spush(res)
elif opcode == 0x03: #tName
tgtnamex = unpack("<H", data[pos+1:pos+3])[0] - 1
# Only change with BIFF version is number of trailing UNUSED bytes!
if blah: print >> bk.logfile, " tgtnamex=%d" % tgtnamex
tgtobj = bk.name_obj_list[tgtnamex]
if tgtobj.scope == -1:
otext = tgtobj.name
else:
otext = "%s!%s" % (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print >> bk.logfile, " tName: setting text to", repr(otext)
res = Operand(oUNK, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x04: # tRef
res = get_cell_addr(data, pos+1, bv, reldelta, browx, bcolx)
if blah: print >> bk.logfile, " ", res
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
if is_rel:
okind = oREL
else:
okind = oREF
otext = cellnamerel(rowx, colx, row_rel, col_rel)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x05: # tArea
res1, res2 = get_cell_range_addr(
data, pos+1, bv, reldelta, browx, bcolx)
if blah: print >> bk.logfile, " ", res1, res2
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
coords = (rowx1, rowx2+1, colx1, colx2+1)
relflags = (row_rel1, row_rel2, col_rel1, col_rel2)
is_rel = intbool(sum(relflags))
if is_rel:
okind = oREL
else:
okind = oREF
if blah: print >> bk.logfile, " ", coords, relflags
otext = rangename2drel(coords, relflags)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x06: # tMemArea
not_in_name_formula(op, oname)
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print >> bk.logfile, " %d bytes of cell ref formula" % nb
# no effect on stack
elif opcode == 0x0C: #tRefN
not_in_name_formula(op, oname)
# res = get_cell_addr(data, pos+1, bv, reldelta=1)
# # note *ALL* tRefN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
# spush(res)
elif opcode == 0x0D: #tAreaN
not_in_name_formula(op, oname)
# res = get_cell_range_addr(data, pos+1, bv, reldelta=1)
# # note *ALL* tAreaN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
elif opcode == 0x1A: # tRef3d
if bv >= 80:
res = get_cell_addr(data, pos+3, bv, reldelta, browx, bcolx)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res = get_cell_addr(data, pos+15, bv, reldelta, browx, bcolx)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print >> bk.logfile, "tRef3d", raw_extshtx, raw_shx1, raw_shx2
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
any_err |= shx1 < -1
if blah: print >> bk.logfile, " ", coords
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel, row_rel, col_rel, col_rel)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
res.value = None
spush(res)
elif opcode == 0x1B: # tArea3d
if bv >= 80:
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res1, res2 = get_cell_range_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print >> bk.logfile, "tArea3d", raw_extshtx, raw_shx1, raw_shx2
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
any_err |= shx1 < -1
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
is_rel = row_rel1 or col_rel1 or row_rel2 or col_rel2
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print >> bk.logfile, " ", coords
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel1, row_rel2, col_rel1, col_rel2)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
spush(res)
elif opcode == 0x19: # tNameX
dodgy = 0
res = Operand(oUNK, None)
if bv >= 80:
refx, tgtnamex = unpack("<HH", data[pos+1:pos+5])
tgtnamex -= 1
origrefx = refx
else:
refx, tgtnamex = unpack("<hxxxxxxxxH", data[pos+1:pos+13])
tgtnamex -= 1
origrefx = refx
if refx > 0:
refx -= 1
elif refx < 0:
refx = -refx - 1
else:
dodgy = 1
if blah:
print >> bk.logfile, \
" origrefx=%d refx=%d tgtnamex=%d dodgy=%d" \
% (origrefx, refx, tgtnamex, dodgy)
# if tgtnamex == namex:
# if blah: print >> bk.logfile, "!!!! Self-referential !!!!"
# dodgy = any_err = 1
if not dodgy:
if bv >= 80:
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
elif origrefx > 0:
shx1, shx2 = (-4, -4) # external ref
else:
exty = bk._externsheet_type_b57[refx]
if exty == 4: # non-specific sheet in own doc't
shx1, shx2 = (-1, -1) # internal, any sheet
else:
shx1, shx2 = (-666, -666)
okind = oUNK
ovalue = None
if shx1 == -5: # addin func name
okind = oSTRG
ovalue = bk.addin_func_names[tgtnamex]
otext = '"' + ovalue.replace('"', '""') + '"'
elif dodgy or shx1 < -1:
otext = "<<Name #%d in external(?) file #%d>>" \
% (tgtnamex, origrefx)
else:
tgtobj = bk.name_obj_list[tgtnamex]
if tgtobj.scope == -1:
otext = tgtobj.name
else:
otext = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print >> bk.logfile, " tNameX: setting text to", repr(res.text)
res = Operand(okind, ovalue, LEAF_RANK, otext)
spush(res)
elif is_error_opcode(opcode):
any_err = 1
spush(error_opnd)
else:
if blah:
print >> bk.logfile, "FORMULA: /// Not handled yet: t" + oname
any_err = 1
if sz <= 0:
raise FormulaError("Fatal: token size is not positive")
pos += sz
any_rel = not not any_rel
if blah:
print "End of formula. level=%d any_rel=%d any_err=%d stack=%r" % \
(level, not not any_rel, any_err, stack)
if len(stack) >= 2:
print "*** Stack has unprocessed args"
print
if len(stack) != 1:
result = None
else:
result = stack[0].text
return result
#### under deconstruction ###
def dump_formula(bk, data, fmlalen, bv, reldelta, blah=0, isname=0):
if blah:
print "dump_formula", fmlalen, bv, len(data)
hex_char_dump(data, 0, fmlalen)
assert bv >= 80 #### this function needs updating ####
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
spush = stack.append
while 0 <= pos < fmlalen:
op = ord(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print "Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype)
if not optype:
if 0x01 <= opcode <= 0x02: # tExp, tTbl
# reference to a shared formula or table record
rowx, colx = unpack("<HH", data[pos+1:pos+5])
if blah: print >> bk.logfile, " ", (rowx, colx)
elif opcode == 0x10: # tList
if blah: print >> bk.logfile, "tList pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
spush(aop + bop)
if blah: print >> bk.logfile, "tlist post", stack
elif opcode == 0x11: # tRange
if blah: print >> bk.logfile, "tRange pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
assert len(aop) == 1
assert len(bop) == 1
result = do_box_funcs(tRangeFuncs, aop[0], bop[0])
spush(result)
if blah: print >> bk.logfile, "tRange post", stack
elif opcode == 0x0F: # tIsect
if blah: print >> bk.logfile, "tIsect pre", stack
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
assert len(aop) == 1
assert len(bop) == 1
result = do_box_funcs(tIsectFuncs, aop[0], bop[0])
spush(result)
if blah: print >> bk.logfile, "tIsect post", stack
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
else:
sz = 4
if blah: print >> bk.logfile, " subop=%02xh subname=t%s sz=%d nc=%02xh" % (subop, subname, sz, nc)
elif opcode == 0x17: # tStr
if bv <= 70:
nc = ord(data[pos+1])
strg = data[pos+2:pos+2+nc] # left in 8-bit encoding
sz = nc + 2
else:
strg, newpos = unpack_unicode_update_pos(data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print >> bk.logfile, " sz=%d strg=%r" % (sz, strg)
else:
if sz <= 0:
print "**** Dud size; exiting ****"
return
pos += sz
continue
if opcode == 0x00: # tArray
pass
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])
if blah: print >> bk.logfile, " FuncID=%d" % funcx
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah: print >> bk.logfile, " FuncID=%d nargs=%d macro=%d prompt=%d" % (funcx, nargs, macro, prompt)
elif opcode == 0x03: #tName
namex = unpack("<H", data[pos+1:pos+3])
# Only change with BIFF version is the number of trailing UNUSED bytes!!!
if blah: print >> bk.logfile, " namex=%d" % namex
elif opcode == 0x04: # tRef
res = get_cell_addr(data, pos+1, bv, reldelta)
if blah: print >> bk.logfile, " ", res
elif opcode == 0x05: # tArea
res = get_cell_range_addr(data, pos+1, bv, reldelta)
if blah: print >> bk.logfile, " ", res
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print >> bk.logfile, " %d bytes of cell ref formula" % nb
elif opcode == 0x0C: #tRefN
res = get_cell_addr(data, pos+1, bv, reldelta=1)
# note *ALL* tRefN usage has signed offset for relative addresses
any_rel = 1
if blah: print >> bk.logfile, " ", res
elif opcode == 0x0D: #tAreaN
res = get_cell_range_addr(data, pos+1, bv, reldelta=1)
# note *ALL* tAreaN usage has signed offset for relative addresses
any_rel = 1
if blah: print >> bk.logfile, " ", res
elif opcode == 0x1A: # tRef3d
refx = unpack("<H", data[pos+1:pos+3])[0]
res = get_cell_addr(data, pos+3, bv, reldelta)
if blah: print >> bk.logfile, " ", refx, res
rowx, colx, row_rel, col_rel = res
any_rel = any_rel or row_rel or col_rel
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
any_err |= shx1 < -1
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
if blah: print >> bk.logfile, " ", coords
if optype == 1: spush([coords])
elif opcode == 0x1B: # tArea3d
refx = unpack("<H", data[pos+1:pos+3])[0]
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
if blah: print >> bk.logfile, " ", refx, res1, res2
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
any_rel = any_rel or row_rel1 or col_rel1 or row_rel2 or col_rel2
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
any_err |= shx1 < -1
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print >> bk.logfile, " ", coords
if optype == 1: spush([coords])
elif opcode == 0x19: # tNameX
refx, namex = unpack("<HH", data[pos+1:pos+5])
if blah: print >> bk.logfile, " refx=%d namex=%d" % (refx, namex)
elif is_error_opcode(opcode):
any_err = 1
else:
if blah: print >> bk.logfile, "FORMULA: /// Not handled yet: t" + oname
any_err = 1
if sz <= 0:
print "**** Dud size; exiting ****"
return
pos += sz
if blah:
print >> bk.logfile, "End of formula. any_rel=%d any_err=%d stack=%r" % \
(not not any_rel, any_err, stack)
if len(stack) >= 2:
print >> bk.logfile, "*** Stack has unprocessed args"
# === Some helper functions for displaying cell references ===
# Note that a "non-standard" syntax is used in row and column
# components in relative references.
# For example, consider a relative reference: up two rows, right 3 columns.
# On screen, with cursor in cell D10, this would appear as G8.
# On screen, with cursor in cell Z100, this would appear as AC98.
# On screen, with cursor in cell A1, this would appear as D65535.
# These functions will display such a reference as [@+3,#-2].
# "@" refers to the unknown base column.
# "#" refers to the unknown base row.
#
# I'm aware of only one possibility of a sheet-relative component in
# a reference: a 2D reference located in the "current sheet".
# xlrd stores this internally with bounds of (0, 1, ...) and
# relative flags of (1, 1, ...). These functions display the
# sheet component as empty, just like Excel etc.
def rownamerel(rowx, rowxrel):
if not rowxrel:
return "$%d" % rowx
if rowx > 0:
return "#+%d" % rowx
if rowx < 0:
return "#-%d" % (-rowx)
return "#"
def colnamerel(colx, colxrel):
if not colxrel:
return "$" + colname(colx)
if colx > 0:
return "@+%d" % colx
if colx < 0:
return "@-%d" % (-colx)
return "@"
##
# Utility function: (5, 7) => 'H6'
def cellname(rowx, colx):
""" (5, 7) => 'H6' """
return "%s%d" % (colname(colx), rowx+1)
##
# Utility function: (5, 7) => '$H$6'
def cellnameabs(rowx, colx):
""" (5, 7) => '$H$6' """
return "$%s$%d" % (colname(colx), rowx+1)
def cellnamerel(rowx, colx, rowxrel, colxrel):
if not rowxrel and not colxrel:
return cellnameabs(rowx, colx)
return "[%s,%s]" % (
colnamerel(colx, colxrel),
rownamerel(rowx, rowxrel))
##
# Utility function: 7 => 'H', 27 => 'AB'
def colname(colx):
""" 7 => 'H', 27 => 'AB' """
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if colx <= 25:
return alphabet[colx]
else:
xdiv26, xmod26 = divmod(colx, 26)
return alphabet[xdiv26 - 1] + alphabet[xmod26]
def rangename2d(rlo, rhi, clo, chi):
""" (5, 20, 7, 10) => '$H$6:$J$20' """
if rhi == rlo+1 and chi == clo+1:
return cellnameabs(rlo, clo)
return "%s:%s" % (cellnameabs(rlo, clo), cellnameabs(rhi-1, chi-1))
def rangename2drel((rlo, rhi, clo, chi), (rlorel, rhirel, clorel, chirel)):
return "%s:%s" % (
cellnamerel(rlo, clo, rlorel, clorel),
cellnamerel(rhi-1, chi-1, rhirel, chirel)
)
##
# Utility function:
# <br /> Ref3D((1, 4, 5, 20, 7, 10)) => 'Sheet2:Sheet3!$H$6:$J$20'
def rangename3d(book, ref3d):
""" Ref3D(1, 4, 5, 20, 7, 10) => 'Sheet2:Sheet3!$H$6:$J$20'
(assuming Excel's default sheetnames) """
coords = ref3d.coords
return "%s!%s" % (
sheetrange(book, *coords[:2]),
rangename2d(*coords[2:6]))
##
# Utility function:
# <br /> Ref3D(coords=(0, 1, -32, -22, -13, 13), relflags=(0, 0, 1, 1, 1, 1))
# => 'Sheet1![@-13,#-32]:[@+12,#-23]'
# where '@' refers to the current or base column and '#'
# refers to the current or base row.
def rangename3drel(book, ref3d):
coords = ref3d.coords
relflags = ref3d.relflags
shdesc = sheetrangerel(book, coords[:2], relflags[:2])
rngdesc = rangename2drel(coords[2:6], relflags[2:6])
if not shdesc:
return rngdesc
return "%s!%s" % (shdesc, rngdesc)
def quotedsheetname(shnames, shx):
if shx >= 0:
shname = shnames[shx]
else:
shname = {
-1: "?internal; any sheet?",
-2: "internal; deleted sheet",
-3: "internal; macro sheet",
-4: "<<external>>",
}.get(shx, "?error %d?" % shx)
if "'" in shname:
return "'" + shname.replace("'", "''") + "'"
if " " in shname:
return "'" + shname + "'"
return shname
def sheetrange(book, slo, shi):
shnames = book.sheet_names()
shdesc = quotedsheetname(shnames, slo)
if slo != shi-1:
shdesc += ":" + quotedsheetname(shnames, shi-1)
return shdesc
def sheetrangerel(book, (slo, shi), (slorel, shirel)):
if not slorel and not shirel:
return sheetrange(book, slo, shi)
assert (slo == 0 == shi-1) and slorel and shirel
return ""
# ==============================================================
| apache-2.0 |
sepehr125/pybrain | pybrain/tools/plotting/multiline.py | 25 | 7884 | # $Id$
__author__ = 'Martin Felder and Frank Sehnke'
import math, imp
from matplotlib.lines import Line2D
from pylab import clf, plot, axes, show, xlabel, ylabel, savefig, ioff, draw_if_interactive
class MultilinePlotter:
""" Basic plotting class build on pylab
Implementing by instancing the class with the number of different plots to show.
Every plot has an id so adding data is done by addData(id, xValue, yValue) of the given data point
:todo: Add possibility to stick markers to the plots
:todo: Some error checking and documentation
:todo: Derive from this to make classes for trn/tst data plotting with different linestyles
"""
# some nice color definitions for graphs (from colorbrewer.org)
graphColor = [(0.894117647, 0.101960784, 0.109803922), \
(0.215686275, 0.494117647, 0.721568627), \
(0.301960784, 0.68627451, 0.290196078), \
(0.596078431, 0.305882353, 0.639215686), \
(1, 0.498039216, 0), \
(1, 1, 0.2), \
(0.650980392, 0.337254902, 0.156862745), \
(0.968627451, 0.505882353, 0.749019608), \
(0.6, 0.6, 0.6)]
def __init__(self, maxLines=1, autoscale=0.0, **kwargs):
"""
:key maxLines: Number of Plots to draw and so max ID.
:key autoscale: If set to a factor > 1, axes are automatically expanded whenever out-range data points are added
:var indexList: The x-component of the data points
:var DataList: The y-component of the data points"""
self.indexList = []
self.dataList = []
self.Lines = []
self.autoscale = autoscale
clf()
self.Axes = axes(**kwargs)
self.nbLines = 0
self.defaultLineStyle = {}
self._checkMaxId(maxLines - 1)
self.replot = True # is the plot still current?
self.currentID = None
self.offset = 0 # external references to IDs are modified by this
def setOffset(self, offs):
""" Set an offset that modifies all subsequent references to line IDs
:key offs: The desired offset """
self.offset = offs
#def createFigure(self, size=[12,8], interactive=True):
#""" initialize the graphics output window """
## FIXME: doesn work, because axes() in the constructor already creates a figure
#pylab.figure(figsize=size)
#if interactive: pylab.ion()
def _checkMaxId(self, id):
""" Appends additional lines as necessary
:key id: Lines up to this id are added automatically """
if id >= self.nbLines:
for i in range(self.nbLines, id + 1):
# create a new line with corresponding x/y data, and attach it to the plot
l = Line2D([], [], color=self.graphColor[i % 9], **self.defaultLineStyle)
self.Lines.append(l)
self.Axes.add_line(l)
self.indexList.append([])
self.dataList.append([])
self.nbLines = id + 1
def addData(self, id0, x, y):
""" The given data point or points is appended to the given line.
:key id0: The plot ID (counted from 0) the data point(s) belong to.
:key x: The x-component of the data point(s)
:key y: The y-component of the data point(s)"""
id = id0 + self.offset
if not (isinstance(x, list) | isinstance(x, tuple)):
self._checkMaxId(id)
self.indexList[id].append(x)
self.dataList[id].append(y)
self.currentID = id
else:
for i, xi in enumerate(x):
self.addData(id0, xi, y[i])
self.replot = True
def setData(self, id0, x, y):
""" Data series id0 is replaced by the given lists
:key id0: The plot ID (counted from 0) the data point(s) belong to.
:key x: The x-component of the data points
:key y: The y-component of the data points"""
id = id0 + self.offset
self._checkMaxId(id)
self.indexList[id] = x
self.dataList[id] = y
self.replot = True
def saveData(self, filename):
""" Writes the data series for all points to a file
:key filename: The name of the output file """
file = open(filename, "w")
for i in range(self.nbLines):
datLen = len(self.indexList[i])
for j in range(datLen):
file.write(repr(self.indexList[i][j]) + "\n")
file.write(repr(self.dataList[i][j]) + "\n")
file.close()
def setLabels(self, x='', y='', title=''):
""" set axis labels and title """
self.Axes.set_xlabel(x)
self.Axes.set_ylabel(y)
self.Axes.set_title(title)
def setLegend(self, *args, **kwargs):
""" hand parameters to the legend """
self.Axes.legend(*args, **kwargs)
def setLineStyle(self, id=None, **kwargs):
""" hand parameters to the specified line(s), and set them as default for new lines
:key id: The line or lines (list!) to be modified - defaults to last one added """
if id is None:
id = self.currentID
if isinstance(id, list) | isinstance(id, tuple):
# apply to specified list of lines
self._checkMaxId(max(id) + self.offset)
for i in id:
self.Lines[i + self.offset].set(**kwargs)
elif id >= 0:
# apply to selected line
self._checkMaxId(id + self.offset)
self.Lines[id + self.offset].set(**kwargs)
else:
# apply to all lines
for l in self.Lines:
l.set(**kwargs)
# set as new default linestyle
if 'color' in kwargs:
kwargs.popitem('color')
self.defaultLineStyle = kwargs
def update(self):
""" Updates the current plot, if necessary """
if not self.replot:
return
xr = list(self.Axes.get_xlim())
yr = list(self.Axes.get_ylim())
for i in range(self.nbLines):
self.Lines[i].set_data(self.indexList[i], self.dataList[i])
if self.autoscale > 1.0:
if self.indexList[i][0] < xr[0]:
xr[0] = self.indexList[i][0]
ymn = min(self.dataList[i])
if ymn < yr[0]:
yr[0] = ymn
while self.indexList[i][-1] > xr[1]:
xr[1] = (xr[1] - xr[0]) * self.autoscale + xr[0]
ymx = max(self.dataList[i])
while ymx > yr[1]:
yr[1] = (yr[1] - yr[0]) * self.autoscale + yr[0]
if self.autoscale > 1.0:
self.Axes.set_xlim(tuple(xr))
self.Axes.set_ylim(tuple(yr))
#self.Axes.draw()
#pylab.show()
draw_if_interactive()
self.replot = False
def show(self, xLabel='', yLabel='', Title='', popup=False, imgfile=None):
""" Plots the data internally and saves an image of it to the plotting directory.
:key title: The title of the plot.
:key xLable: The label for the x-axis
:key yLable: The label for the y-axis
:key popup: also produce a popup window with the image?"""
clf()
for i in range(self.nbLines):
plot(self.indexList[i], self.dataList[i])
xlabel(xLabel)
ylabel(yLabel)
title(Title)
if imgfile == None:
imgfile = imp.find_module('pybrain')[1] + "/tools/plotting/plot.png"
savefig(imgfile)
if popup:
ioff()
show()
"""Small example to demonstrate how the plot class can be used"""
if __name__ == "__main__":
pbplot = MultilinePlotter(7)
for i in range(400000):
if i / 100000 == i / 100000.0:
for j in range(7):
pbplot.addData(j, i, math.sqrt(float(i * (j + 1))))
pbplot.show("WorldInteractions", "Fitness", "Example Plot", True)
| bsd-3-clause |
dscdac/Proyecto-IV-modulo2 | lib/python2.7/site-packages/setuptools/tests/doctest.py | 332 | 99828 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
try:
basestring
except NameError:
basestring = str
try:
enumerate
except NameError:
def enumerate(seq):
return zip(range(len(seq)),seq)
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from setuptools.compat import StringIO, execfile, func_code, im_func
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, basestring):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if want.find(ELLIPSIS_MARKER)==-1:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is func_globals(object)
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = im_func(getattr(obj, valname))
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = im_func(obj)
if inspect.isfunction(obj): obj = func_code(obj)
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
elif func_code(self.save_linecache_getlines).co_argcount > 1:
return self.save_linecache_getlines(filename, module_globals)
else:
return self.save_linecache_getlines(filename)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print(len(notests), "items had no tests:")
notests.sort()
for thing in notests:
print(" ", thing)
if passed:
print(len(passed), "items passed all tests:")
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print(len(failed), "items had failures:")
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print(totalt, "tests in", len(self._name2ft), "items.")
print(totalt - totalf, "passed and", totalf, "failed.")
if totalf:
print("***Test Failed***", totalf, "failures.")
elif verbose:
print("Test passed.")
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print("*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
f = open(filename)
s = f.read()
f.close()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print("Running string", name)
(f,t) = self.testrunner.run(test)
if self.verbose:
print(f, "of", t, "examples failed in string", name)
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
f = open(path)
doc = f.read()
f.close()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print(sys.exc_info()[1])
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| gpl-2.0 |
TalShafir/ansible | lib/ansible/modules/database/mongodb/mongodb_user.py | 18 | 15461 | #!/usr/bin/python
# (c) 2012, Elliott Foster <[email protected]>
# Sponsored by Four Kitchens http://fourkitchens.com.
# (c) 2014, Epic Games, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mongodb_user
short_description: Adds or removes a user from a MongoDB database.
description:
- Adds or removes a user from a MongoDB database.
version_added: "1.1"
options:
login_user:
description:
- The username used to authenticate with
login_password:
description:
- The password used to authenticate with
login_host:
description:
- The host running the database
default: localhost
login_port:
description:
- The port to connect to
default: 27017
login_database:
version_added: "2.0"
description:
- The database where login credentials are stored
replica_set:
version_added: "1.6"
description:
- Replica set to connect to (automatically connects to primary for writes)
database:
description:
- The name of the database to add/remove the user from
required: true
name:
description:
- The name of the user to add or remove
required: true
aliases: [ 'user' ]
password:
description:
- The password to use for the user
ssl:
version_added: "1.8"
description:
- Whether to use an SSL connection when connecting to the database
ssl_cert_reqs:
version_added: "2.2"
description:
- Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided.
default: "CERT_REQUIRED"
choices: ["CERT_REQUIRED", "CERT_OPTIONAL", "CERT_NONE"]
roles:
version_added: "1.3"
description:
- >
The database user roles valid values could either be one or more of the following strings:
'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
'dbAdminAnyDatabase'
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
default: "readWrite"
state:
description:
- The database user state
default: present
choices: [ "present", "absent" ]
update_password:
default: always
choices: ['always', 'on_create']
version_added: "2.1"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author:
- "Elliott Foster (@elliotttf)"
- "Julien Thebault (@lujeni)"
'''
EXAMPLES = '''
# Create 'burgers' database user with name 'bob' and password '12345'.
- mongodb_user:
database: burgers
name: bob
password: 12345
state: present
# Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
- mongodb_user:
database: burgers
name: bob
password: 12345
state: present
ssl: True
# Delete 'burgers' database user with name 'bob'.
- mongodb_user:
database: burgers
name: bob
state: absent
# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
- mongodb_user:
database: burgers
name: ben
password: 12345
roles: read
state: present
- mongodb_user:
database: burgers
name: jim
password: 12345
roles: readWrite,dbAdmin,userAdmin
state: present
- mongodb_user:
database: burgers
name: joe
password: 12345
roles: readWriteAnyDatabase
state: present
# add a user to database in a replica set, the primary server is automatically discovered and written to
- mongodb_user:
database: burgers
name: bob
replica_set: belcher
password: 12345
roles: readWriteAnyDatabase
state: present
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
# please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
# This syntax requires mongodb 2.6+ and pymongo 2.5+
- mongodb_user:
login_user: root
login_password: root_password
database: admin
user: oplog_reader
password: oplog_reader_password
state: present
replica_set: belcher
roles:
- db: local
role: read
'''
RETURN = '''
user:
description: The name of the user to add or remove.
returned: success
type: string
'''
import os
import ssl as ssl_lib
import traceback
from distutils.version import LooseVersion
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_native
# =========================================
# MongoDB module specific support methods.
#
def check_compatibility(module, client):
"""Check the compatibility between the driver and the database.
See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
Args:
module: Ansible module.
client (cursor): Mongodb cursor on admin database.
"""
loose_srv_version = LooseVersion(client.server_info()['version'])
loose_driver_version = LooseVersion(PyMongoVersion)
if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
def user_find(client, user, db_name):
"""Check if the user exists.
Args:
client (cursor): Mongodb cursor on admin database.
user (str): User to check.
db_name (str): User's database.
Returns:
dict: when user exists, False otherwise.
"""
for mongo_user in client["admin"].system.users.find():
if mongo_user['user'] == user:
# NOTE: there is no 'db' field in mongo 2.4.
if 'db' not in mongo_user:
return mongo_user
if mongo_user["db"] == db_name:
return mongo_user
return False
def user_add(module, client, db_name, user, password, roles):
# pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
# without reproducing a lot of the logic in database.py of pymongo
db = client[db_name]
if roles is None:
db.add_user(user, password, False)
else:
db.add_user(user, password, None, roles=roles)
def user_remove(module, client, db_name, user):
exists = user_find(client, user, db_name)
if exists:
if module.check_mode:
module.exit_json(changed=True, user=user)
db = client[db_name]
db.remove_user(user)
else:
module.exit_json(changed=False, user=user)
def load_mongocnf():
config = configparser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (configparser.NoOptionError, IOError):
return False
return creds
def check_if_roles_changed(uinfo, roles, db_name):
# We must be aware of users which can read the oplog on a replicaset
# Such users must have access to the local DB, but since this DB does not store users credentials
# and is not synchronized among replica sets, the user must be stored on the admin db
# Therefore their structure is the following :
# {
# "_id" : "admin.oplog_reader",
# "user" : "oplog_reader",
# "db" : "admin", # <-- admin DB
# "roles" : [
# {
# "role" : "read",
# "db" : "local" # <-- local DB
# }
# ]
# }
def make_sure_roles_are_a_list_of_dict(roles, db_name):
output = list()
for role in roles:
if isinstance(role, (binary_type, text_type)):
new_role = {"role": role, "db": db_name}
output.append(new_role)
else:
output.append(role)
return output
roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
uinfo_roles = uinfo.get('roles', [])
if sorted(roles_as_list_of_dict) == sorted(uinfo_roles):
return False
return True
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
password=dict(aliases=['pass'], no_log=True),
ssl=dict(default=False, type='bool'),
roles=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default="always", choices=["always", "on_create"]),
ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
),
supports_check_mode=True
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
roles = module.params['roles'] or []
state = module.params['state']
update_password = module.params['update_password']
try:
connection_params = {
"host": login_host,
"port": int(login_port),
}
if replica_set:
connection_params["replicaset"] = replica_set
if ssl:
connection_params["ssl"] = ssl
connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
client = MongoClient(**connection_params)
# NOTE: this check must be done ASAP.
# We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
check_compatibility(module, client)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
# else: this has to be the first admin user added
except Exception as e:
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
if state == 'present':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
if update_password != 'always':
uinfo = user_find(client, user, db_name)
if uinfo:
password = None
if not check_if_roles_changed(uinfo, roles, db_name):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
user_add(module, client, db_name, user, password, roles)
except Exception as e:
module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
# newuinfo = user_find(client, user, db_name)
# if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
# module.exit_json(changed=False, user=user)
elif state == 'absent':
try:
user_remove(module, client, db_name, user)
except Exception as e:
module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=True, user=user)
if __name__ == '__main__':
main()
| gpl-3.0 |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/werkzeug/datastructures.py | 314 | 86050 | # -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import codecs
import mimetypes
from itertools import repeat
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setmethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(k, v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def keys(self):
rv = set()
for d in self.dicts:
rv.update(d.keys())
return iter(rv)
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self.keys())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
| mit |
xydinesh/flask-restful | werkzeug/testsuite/contrib/securecookie.py | 66 | 1764 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the secure cookie.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.utils import parse_cookie
from werkzeug.wrappers import Request, Response
from werkzeug.contrib.securecookie import SecureCookie
class SecureCookieTestCase(WerkzeugTestCase):
def test_basic_support(self):
c = SecureCookie(secret_key='foo')
assert c.new
print c.modified, c.should_save
assert not c.modified
assert not c.should_save
c['x'] = 42
assert c.modified
assert c.should_save
s = c.serialize()
c2 = SecureCookie.unserialize(s, 'foo')
assert c is not c2
assert not c2.new
assert not c2.modified
assert not c2.should_save
assert c2 == c
c3 = SecureCookie.unserialize(s, 'wrong foo')
assert not c3.modified
assert not c3.new
assert c3 == {}
def test_wrapper_support(self):
req = Request.from_values()
resp = Response()
c = SecureCookie.load_cookie(req, secret_key='foo')
assert c.new
c['foo'] = 42
assert c.secret_key == 'foo'
c.save_cookie(resp)
req = Request.from_values(headers={
'Cookie': 'session="%s"' % parse_cookie(resp.headers['set-cookie'])['session']
})
c2 = SecureCookie.load_cookie(req, secret_key='foo')
assert not c2.new
assert c2 == c
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SecureCookieTestCase))
return suite
| apache-2.0 |
disruptek/boto | tests/unit/glacier/test_vault.py | 114 | 7480 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import StringIO
from tests.compat import mock, unittest
ANY = mock.ANY
from boto.glacier import vault
from boto.glacier.job import Job
from boto.glacier.response import GlacierResponse
class TestVault(unittest.TestCase):
def setUp(self):
self.size_patch = mock.patch('os.path.getsize')
self.getsize = self.size_patch.start()
self.api = mock.Mock()
self.vault = vault.Vault(self.api, None)
self.vault.name = 'myvault'
self.mock_open = mock.mock_open()
stringio = StringIO('content')
self.mock_open.return_value.read = stringio.read
def tearDown(self):
self.size_patch.stop()
@mock.patch('boto.glacier.vault.compute_hashes_from_fileobj',
return_value=[b'abc', b'123'])
def test_upload_archive_small_file(self, compute_hashes):
self.getsize.return_value = 1
self.api.upload_archive.return_value = {'ArchiveId': 'archive_id'}
with mock.patch('boto.glacier.vault.open', self.mock_open,
create=True):
archive_id = self.vault.upload_archive(
'filename', 'my description')
self.assertEqual(archive_id, 'archive_id')
self.api.upload_archive.assert_called_with(
'myvault', self.mock_open.return_value,
mock.ANY, mock.ANY, 'my description')
def test_small_part_size_is_obeyed(self):
self.vault.DefaultPartSize = 2 * 1024 * 1024
self.vault.create_archive_writer = mock.Mock()
self.getsize.return_value = 1
with mock.patch('boto.glacier.vault.open', self.mock_open,
create=True):
self.vault.create_archive_from_file('myfile')
# The write should be created with the default part size of the
# instance (2 MB).
self.vault.create_archive_writer.assert_called_with(
description=mock.ANY, part_size=self.vault.DefaultPartSize)
def test_large_part_size_is_obeyed(self):
self.vault.DefaultPartSize = 8 * 1024 * 1024
self.vault.create_archive_writer = mock.Mock()
self.getsize.return_value = 1
with mock.patch('boto.glacier.vault.open', self.mock_open,
create=True):
self.vault.create_archive_from_file('myfile')
# The write should be created with the default part size of the
# instance (8 MB).
self.vault.create_archive_writer.assert_called_with(
description=mock.ANY, part_size=self.vault.DefaultPartSize)
def test_part_size_needs_to_be_adjusted(self):
# If we have a large file (400 GB)
self.getsize.return_value = 400 * 1024 * 1024 * 1024
self.vault.create_archive_writer = mock.Mock()
# When we try to upload the file.
with mock.patch('boto.glacier.vault.open', self.mock_open,
create=True):
self.vault.create_archive_from_file('myfile')
# We should automatically bump up the part size used to
# 64 MB.
expected_part_size = 64 * 1024 * 1024
self.vault.create_archive_writer.assert_called_with(
description=mock.ANY, part_size=expected_part_size)
def test_retrieve_inventory(self):
class FakeResponse(object):
status = 202
def getheader(self, key, default=None):
if key == 'x-amz-job-id':
return 'HkF9p6'
elif key == 'Content-Type':
return 'application/json'
return 'something'
def read(self, amt=None):
return b"""{
"Action": "ArchiveRetrieval",
"ArchiveId": "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-EXAMPLEArchiveId",
"ArchiveSizeInBytes": 16777216,
"ArchiveSHA256TreeHash": "beb0fe31a1c7ca8c6c04d574ea906e3f97",
"Completed": false,
"CreationDate": "2012-05-15T17:21:39.339Z",
"CompletionDate": "2012-05-15T17:21:43.561Z",
"InventorySizeInBytes": null,
"JobDescription": "My ArchiveRetrieval Job",
"JobId": "HkF9p6",
"RetrievalByteRange": "0-16777215",
"SHA256TreeHash": "beb0fe31a1c7ca8c6c04d574ea906e3f97b31fd",
"SNSTopic": "arn:aws:sns:us-east-1:012345678901:mytopic",
"StatusCode": "InProgress",
"StatusMessage": "Operation in progress.",
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault"
}"""
raw_resp = FakeResponse()
init_resp = GlacierResponse(raw_resp, [('x-amz-job-id', 'JobId')])
raw_resp_2 = FakeResponse()
desc_resp = GlacierResponse(raw_resp_2, [])
with mock.patch.object(self.vault.layer1, 'initiate_job',
return_value=init_resp):
with mock.patch.object(self.vault.layer1, 'describe_job',
return_value=desc_resp):
# The old/back-compat variant of the call.
self.assertEqual(self.vault.retrieve_inventory(), 'HkF9p6')
# The variant the returns a full ``Job`` object.
job = self.vault.retrieve_inventory_job()
self.assertTrue(isinstance(job, Job))
self.assertEqual(job.id, 'HkF9p6')
class TestConcurrentUploads(unittest.TestCase):
def test_concurrent_upload_file(self):
v = vault.Vault(None, None)
with mock.patch('boto.glacier.vault.ConcurrentUploader') as c:
c.return_value.upload.return_value = 'archive_id'
archive_id = v.concurrent_create_archive_from_file(
'filename', 'my description')
c.return_value.upload.assert_called_with('filename',
'my description')
self.assertEqual(archive_id, 'archive_id')
def test_concurrent_upload_forwards_kwargs(self):
v = vault.Vault(None, None)
with mock.patch('boto.glacier.vault.ConcurrentUploader') as c:
c.return_value.upload.return_value = 'archive_id'
archive_id = v.concurrent_create_archive_from_file(
'filename', 'my description', num_threads=10,
part_size=1024 * 1024 * 1024 * 8)
c.assert_called_with(None, None, num_threads=10,
part_size=1024 * 1024 * 1024 * 8)
if __name__ == '__main__':
unittest.main()
| mit |
chuckinator0/Projects | scripts/log_practice.py | 1 | 1730 | """
Parse a log file! See log_practice.log.
The log is a schedule of classes on different days.
Each line has the form:
HH:MM Topic
Days are separated by blank lines.
"""
def hours(string):
return int(string[0:2])
def minutes(string):
return int(string[3:5])
def duration(string1, string2):
minutes1 = 60 * hours(string1) + minutes(string1)
minutes2 = 60 * hours(string2) + minutes(string2)
difference = abs(minutes2 - minutes1)
return difference
topic_dict = {} # {topic: duration of that topic}
total_duration = 0
last_event = []
with open("./log_practice.log", "r") as log_file:
for line in log_file:
# if we reach a blank line, it means we start a new day
if line == "\n":
print('\n')
last_event = []
continue
# Turn the line into an event where index 0 is the stat time and
# index 1 is the topic
event = line.rstrip('\n').split(" ", 1)
start = event[0]
topic = event[1]
# if this is the first event of the day, set the last_event variable
if last_event == []:
last_event = event
continue
dur = duration(last_event[0], start)
# increase total duration
total_duration += dur
output = last_event[0] + '-' + start + " " + last_event[1]
print(output)
# update the topic dictionary
if last_event[1] in topic_dict:
topic_dict[last_event[1]] += dur
else:
topic_dict[last_event[1]] = dur
last_event = event
for topic in topic_dict:
print(topic + ' ' + str(topic_dict[topic]) + ' minutes' + ' ' + str(100*topic_dict[topic]/total_duration) + '%')
print(topic_dict)
| gpl-3.0 |
iohannez/gnuradio | gr-qtgui/examples/pyqt_time_raster_f.py | 7 | 2823 | #!/usr/bin/env python
#
# Copyright 2012,2013,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt5 import QtWidgets, Qt
import sip
except ImportError:
print("Error: Program requires PyQt5 and gr-qtgui.")
sys.exit(1)
class dialog_box(QtWidgets.QWidget):
def __init__(self, display):
QtWidgets.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtWidgets.QBoxLayout(QtWidgets.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.resize(800, 500)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self.qapp = QtWidgets.QApplication(sys.argv)
data0 = 10*[0,] + 40*[1,0] + 10*[0,]
data0 += 10*[0,] + 40*[0,1] + 10*[0,]
data1 = 20*[0,] + [0,0,0,1,1,1,0,0,0,0] + 70*[0,]
# Adjust these to change the layout of the plot.
# Can be set to fractions.
ncols = 100.25
nrows = 100
fs = 200
src0 = blocks.vector_source_f(data0, True)
src1 = blocks.vector_source_f(data1, True)
thr = blocks.throttle(gr.sizeof_float, 50000)
hed = blocks.head(gr.sizeof_float, 10000000)
self.snk1 = qtgui.time_raster_sink_f(fs, nrows, ncols, [], [],
"Float Time Raster Example", 2)
self.connect(src0, thr, (self.snk1, 0))
self.connect(src1, (self.snk1, 1))
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt5.QtWidgets.QWidget
pyWin = sip.wrapinstance(pyQt, QtWidgets.QWidget)
self.main_box = dialog_box(pyWin)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | 1 | 9361 | # -*- coding: utf-8 -*-
# git+git://github.com/Sheeprider/BitBucket-api.git
__all__ = ['Bitbucket', ]
try:
from urlparse import parse_qs
except ImportError:
from urllib.parse import parse_qs
import json
import re
from requests import Request, Session
from requests_oauthlib import OAuth1
import requests
from .issue import Issue
from .repository import Repository
from .service import Service
from .ssh import SSH
from .deploy_key import DeployKey
# ========
# = URLs =
# ========
URLS = {
'BASE': 'https://bitbucket.org/!api/1.0/%s',
# Get user profile and repos
'GET_USER': 'users/%(username)s/',
'GET_USER_PRIVILEGES': 'user/privileges',
# Search repo
# 'SEARCH_REPO': 'repositories/?name=%(search)s',
# Get tags & branches
'GET_TAGS': 'repositories/%(username)s/%(repo_slug)s/tags/',
'GET_BRANCHES': 'repositories/%(username)s/%(repo_slug)s/branches/',
'REQUEST_TOKEN': 'oauth/request_token/',
'AUTHENTICATE': 'oauth/authenticate?oauth_token=%(token)s',
'ACCESS_TOKEN': 'oauth/access_token/'
}
class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
| isc |
noba3/KoTos | addons/script.module.turtlex/lib/snapvideo/StageVU.py | 3 | 1285 | '''
Created on Jan 3, 2012
@author: ajju
'''
from common import HttpUtils
from common.DataObjects import VideoHostingInfo, VideoInfo, VIDEO_QUAL_SD
import re
def getVideoHostingInfo():
video_hosting_info = VideoHostingInfo()
video_hosting_info.set_video_hosting_image('http://userlogos.org/files/logos/jumpordie/stagevu-iphone.png')
video_hosting_info.set_video_hosting_name('StageVU')
return video_hosting_info
def retrieveVideoInfo(video_id):
video_info = VideoInfo()
video_info.set_video_hosting_info(getVideoHostingInfo())
video_info.set_video_id(video_id)
try:
video_info_link = 'http://stagevu.com/video/' + str(video_id)
html = HttpUtils.HttpClient().getHtmlContent(url=video_info_link)
html = ''.join(html.splitlines()).replace('\t', '').replace('\'', '"')
match = re.compile('<param name="src" value="(.+?)"(.+?)<param name="movieTitle" value="(.+?)"(.+?)<param name="previewImage" value="(.+?)"').findall(html)
video_info.add_video_link(VIDEO_QUAL_SD, match[0][0])
video_info.set_video_name(match[0][2])
video_info.set_video_image(match[0][4])
video_info.set_video_stopped(False)
except:
video_info.set_video_stopped(True)
return video_info
| gpl-2.0 |
chokepoint/Ataraxpy | plugins/commands.py | 1 | 2504 | #!/usr/bin/env python
from plugins import ValidationError
class CommandTemplate:
def __init__(self, name, conn=None):
self.name = name
self.conn = conn
self.cmds = {
"help": {"args": [], "func": self.help},
"die": {"args": [], "func": self.die}
}
def help(self, src, args):
"""Generic help command sends a list of commands as well as
argument types that it accepts.
"""
self.send(src, "Bot name: %s" % self.name)
for cmd in self.cmds:
self.send(src, "Command: %s Args: (%d) %s" % (cmd,
len(self.cmds[cmd]["args"]),
[str(arg) for arg in self.cmds[cmd]["args"]]))
def die(self, src, args):
"""Generic command to shut down the bot."""
from sys import exit
exit(0)
def validate(self, cmd, args):
"""Validate a given command based off the definitions in self.cmds.
This is kind of hacky, and needs some additional cases for things
like option arguments,
Arguments:
cmd - (string) command to be called
args - (list) of arguments being passed
Return:
True on success
Exception:
Raises ValidationError on any issues with a description.
"""
if cmd not in self.cmds:
raise ValidationError("Invalid command")
if not self.cmds[cmd]["args"]:
if not args:
return True
raise ValidationError("Invalid number of arguments")
if "*" in self.cmds[cmd]["args"]:
return True
if len(args) != len(self.cmds[cmd]["args"]):
raise ValidationError("Invalid number of arguments")
for arg in xrange(len(args)):
if not isinstance(args[arg], self.cmds[cmd]["args"][arg]):
try:
self.cmds[cmd]["args"][arg](args[arg])
except ValueError:
raise ValidationError("Invalid argument type")
return True
def send(self, dst, msg):
"""conn.privmsg wrapper splits msg into single lines and sends them.
Arguments:
dst - (string) nick or channel that should receive the msg.
msg - (string castable) message to be delivered
Return:
None
"""
for line in str(msg).split('\n'):
if self.conn:
self.conn.privmsg(dst, line)
| mit |
mdavid/horizon | tools/install_venv_common.py | 166 | 5958 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
| apache-2.0 |
xiangel/hue | desktop/core/ext-py/boto-2.38.0/boto/sdb/db/blob.py | 153 | 2437 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.compat import six
class Blob(object):
"""Blob object"""
def __init__(self, value=None, file=None, id=None):
self._file = file
self.id = id
self.value = value
@property
def file(self):
from StringIO import StringIO
if self._file:
f = self._file
else:
f = StringIO(self.value)
return f
def __str__(self):
return six.text_type(self).encode('utf-8')
def __unicode__(self):
if hasattr(self.file, "get_contents_as_string"):
value = self.file.get_contents_as_string()
else:
value = self.file.getvalue()
if isinstance(value, six.text_type):
return value
else:
return value.decode('utf-8')
def read(self):
if hasattr(self.file, "get_contents_as_string"):
return self.file.get_contents_as_string()
else:
return self.file.read()
def readline(self):
return self.file.readline()
def next(self):
return next(self.file)
def __iter__(self):
return iter(self.file)
@property
def size(self):
if self._file:
return self._file.size
elif self.value:
return len(self.value)
else:
return 0
| apache-2.0 |
dmeulen/home-assistant | homeassistant/components/http/auth.py | 17 | 1998 | """Authentication for HTTP component."""
import asyncio
import hmac
import logging
from homeassistant.const import HTTP_HEADER_HA_AUTH
from .util import get_real_ip
from .const import KEY_TRUSTED_NETWORKS, KEY_AUTHENTICATED
DATA_API_PASSWORD = 'api_password'
_LOGGER = logging.getLogger(__name__)
@asyncio.coroutine
def auth_middleware(app, handler):
"""Authentication middleware."""
# If no password set, just always set authenticated=True
if app['hass'].http.api_password is None:
@asyncio.coroutine
def no_auth_middleware_handler(request):
"""Auth middleware to approve all requests."""
request[KEY_AUTHENTICATED] = True
return handler(request)
return no_auth_middleware_handler
@asyncio.coroutine
def auth_middleware_handler(request):
"""Auth middleware to check authentication."""
# Auth code verbose on purpose
authenticated = False
if (HTTP_HEADER_HA_AUTH in request.headers and
validate_password(request,
request.headers[HTTP_HEADER_HA_AUTH])):
# A valid auth header has been set
authenticated = True
elif (DATA_API_PASSWORD in request.GET and
validate_password(request, request.GET[DATA_API_PASSWORD])):
authenticated = True
elif is_trusted_ip(request):
authenticated = True
request[KEY_AUTHENTICATED] = authenticated
return handler(request)
return auth_middleware_handler
def is_trusted_ip(request):
"""Test if request is from a trusted ip."""
ip_addr = get_real_ip(request)
return ip_addr and any(
ip_addr in trusted_network for trusted_network
in request.app[KEY_TRUSTED_NETWORKS])
def validate_password(request, api_password):
"""Test if password is valid."""
return hmac.compare_digest(api_password,
request.app['hass'].http.api_password)
| mit |
ingokegel/intellij-community | python/testData/highlighting/declarations.py | 22 | 1221 | # bg is always black.
# effect is white
# func decl: red bold
# class decl: blue bold
# predefined decl: green bold
def <info descr="PY.FUNC_DEFINITION" type="INFORMATION" foreground="0xff0000" background="0x000000" effectcolor="0xffffff" effecttype="BOXED" fonttype="1">foo</info>():
pass
class <info descr="PY.CLASS_DEFINITION" type="INFORMATION" foreground="0x0000ff" background="0x000000" effectcolor="0xffffff" effecttype="BOXED" fonttype="1">Moo</info>:
def <info descr="PY.PREDEFINED_DEFINITION" type="INFORMATION" foreground="0x00ff00" background="0x000000" effectcolor="0xffffff" effecttype="BOXED" fonttype="1">__init__</info>(<info descr="PY.SELF_PARAMETER">self</info>):
pass
def <info descr="PY.FUNC_DEFINITION" type="INFORMATION" foreground="0xff0000" background="0x000000" effectcolor="0xffffff" effecttype="BOXED" fonttype="1">doodle</info>(<info descr="PY.SELF_PARAMETER">self</info>):
pass
def <info descr="PY.FUNC_DEFINITION" type="INFORMATION" foreground="0xff0000" background="0x000000" effectcolor="0xffffff" effecttype="BOXED" fonttype="1">__made_up__</info>(<info descr="PY.SELF_PARAMETER">self</info>):
return <info descr="PY.BUILTIN_NAME" type="INFORMATION">None</info>
| apache-2.0 |
CoDEmanX/ArangoDB | 3rdParty/V8-4.3.61/third_party/python_26/Lib/user.py | 313 | 1627 | """Hook to allow user-specified customization code to run.
As a policy, Python doesn't run user-specified code on startup of
Python programs (interactive sessions execute the script specified in
the PYTHONSTARTUP environment variable if it exists).
However, some programs or sites may find it convenient to allow users
to have a standard customization file, which gets run when a program
requests it. This module implements such a mechanism. A program
that wishes to use the mechanism must execute the statement
import user
The user module looks for a file .pythonrc.py in the user's home
directory and if it can be opened, execfile()s it in its own global
namespace. Errors during this phase are not caught; that's up to the
program that imports the user module, if it wishes.
The user's .pythonrc.py could conceivably test for sys.version if it
wishes to do different things depending on the Python version.
"""
from warnings import warnpy3k
warnpy3k("the user module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import os
home = os.curdir # Default
if 'HOME' in os.environ:
home = os.environ['HOME']
elif os.name == 'posix':
home = os.path.expanduser("~/")
elif os.name == 'nt': # Contributed by Jeff Bauer
if 'HOMEPATH' in os.environ:
if 'HOMEDRIVE' in os.environ:
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
else:
home = os.environ['HOMEPATH']
pythonrc = os.path.join(home, ".pythonrc.py")
try:
f = open(pythonrc)
except IOError:
pass
else:
f.close()
execfile(pythonrc)
| apache-2.0 |
ntuecon/server | pyenv/Lib/site-packages/django/template/loaders/filesystem.py | 418 | 2158 | """
Wrapper for loading templates from the filesystem.
"""
import errno
import io
import warnings
from django.core.exceptions import SuspiciousFileOperation
from django.template import Origin, TemplateDoesNotExist
from django.utils._os import safe_join
from django.utils.deprecation import RemovedInDjango20Warning
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def get_dirs(self):
return self.engine.dirs
def get_contents(self, origin):
try:
with io.open(origin.name, encoding=self.engine.file_charset) as fp:
return fp.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise TemplateDoesNotExist(origin)
raise
def get_template_sources(self, template_name, template_dirs=None):
"""
Return an Origin object pointing to an absolute path in each directory
in template_dirs. For security reasons, if a path doesn't lie inside
one of the template_dirs it is excluded from the result set.
"""
if not template_dirs:
template_dirs = self.get_dirs()
for template_dir in template_dirs:
try:
name = safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
continue
yield Origin(
name=name,
template_name=template_name,
loader=self,
)
def load_template_source(self, template_name, template_dirs=None):
warnings.warn(
'The load_template_sources() method is deprecated. Use '
'get_template() or get_contents() instead.',
RemovedInDjango20Warning,
)
for origin in self.get_template_sources(template_name, template_dirs):
try:
return self.get_contents(origin), origin.name
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
| bsd-3-clause |
pjsports/kernel-2.6.39.4-A500-OC1.5G | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
sudheesh001/oh-mainline | vendor/packages/kombu/kombu/tests/transport/virtual/test_exchange.py | 24 | 4859 | from __future__ import absolute_import
from kombu import Connection
from kombu.transport.virtual import exchange
from kombu.tests.case import Case, Mock
from kombu.tests.mocks import Transport
class ExchangeCase(Case):
type = None
def setUp(self):
if self.type:
self.e = self.type(Connection(transport=Transport).channel())
class test_Direct(ExchangeCase):
type = exchange.DirectExchange
table = [('rFoo', None, 'qFoo'),
('rFoo', None, 'qFox'),
('rBar', None, 'qBar'),
('rBaz', None, 'qBaz')]
def test_lookup(self):
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'rFoo', None),
['qFoo', 'qFox'],
)
self.assertListEqual(
self.e.lookup(self.table, 'eMoz', 'rMoz', 'DEFAULT'),
[],
)
self.assertListEqual(
self.e.lookup(self.table, 'eBar', 'rBar', None),
['qBar'],
)
class test_Fanout(ExchangeCase):
type = exchange.FanoutExchange
table = [(None, None, 'qFoo'),
(None, None, 'qFox'),
(None, None, 'qBar')]
def test_lookup(self):
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'rFoo', None),
['qFoo', 'qFox', 'qBar'],
)
def test_deliver_when_fanout_supported(self):
self.e.channel = Mock()
self.e.channel.supports_fanout = True
message = Mock()
self.e.deliver(message, 'exchange', 'rkey')
self.e.channel._put_fanout.assert_called_with(
'exchange', message, 'rkey',
)
def test_deliver_when_fanout_unsupported(self):
self.e.channel = Mock()
self.e.channel.supports_fanout = False
self.e.deliver(Mock(), 'exchange', None)
self.assertFalse(self.e.channel._put_fanout.called)
class test_Topic(ExchangeCase):
type = exchange.TopicExchange
table = [
('stock.#', None, 'rFoo'),
('stock.us.*', None, 'rBar'),
]
def setUp(self):
super(test_Topic, self).setUp()
self.table = [(rkey, self.e.key_to_pattern(rkey), queue)
for rkey, _, queue in self.table]
def test_prepare_bind(self):
x = self.e.prepare_bind('qFoo', 'eFoo', 'stock.#', {})
self.assertTupleEqual(x, ('stock.#', r'^stock\..*?$', 'qFoo'))
def test_lookup(self):
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'stock.us.nasdaq', None),
['rFoo', 'rBar'],
)
self.assertTrue(self.e._compiled)
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'stock.europe.OSE', None),
['rFoo'],
)
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'stockxeuropexOSE', None),
[],
)
self.assertListEqual(
self.e.lookup(self.table, 'eFoo',
'candy.schleckpulver.snap_crackle', None),
[],
)
def test_deliver(self):
self.e.channel = Mock()
self.e.channel._lookup.return_value = ('a', 'b')
message = Mock()
self.e.deliver(message, 'exchange', 'rkey')
expected = [(('a', message), {}),
(('b', message), {})]
self.assertListEqual(self.e.channel._put.call_args_list, expected)
class test_ExchangeType(ExchangeCase):
type = exchange.ExchangeType
def test_lookup(self):
with self.assertRaises(NotImplementedError):
self.e.lookup([], 'eFoo', 'rFoo', None)
def test_prepare_bind(self):
self.assertTupleEqual(
self.e.prepare_bind('qFoo', 'eFoo', 'rFoo', {}),
('rFoo', None, 'qFoo'),
)
def test_equivalent(self):
e1 = dict(
type='direct',
durable=True,
auto_delete=True,
arguments={},
)
self.assertTrue(
self.e.equivalent(e1, 'eFoo', 'direct', True, True, {}),
)
self.assertFalse(
self.e.equivalent(e1, 'eFoo', 'topic', True, True, {}),
)
self.assertFalse(
self.e.equivalent(e1, 'eFoo', 'direct', False, True, {}),
)
self.assertFalse(
self.e.equivalent(e1, 'eFoo', 'direct', True, False, {}),
)
self.assertFalse(
self.e.equivalent(e1, 'eFoo', 'direct', True, True,
{'expires': 3000}),
)
e2 = dict(e1, arguments={'expires': 3000})
self.assertTrue(
self.e.equivalent(e2, 'eFoo', 'direct', True, True,
{'expires': 3000}),
)
self.assertFalse(
self.e.equivalent(e2, 'eFoo', 'direct', True, True,
{'expires': 6000}),
)
| agpl-3.0 |
saukrIppl/seahub | thirdpart/django_constance-1.0.1-py2.6.egg/constance/admin.py | 6 | 6854 | from datetime import datetime, date, time
from decimal import Decimal
import hashlib
from operator import itemgetter
from django import forms
from django.contrib import admin, messages
from django.contrib.admin import widgets
from django.contrib.admin.options import csrf_protect_m
from django.core.exceptions import PermissionDenied, ImproperlyConfigured
from django.forms import fields
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils import six
from django.utils.formats import localize
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.encoding import smart_bytes
except ImportError:
from django.utils.encoding import smart_str as smart_bytes
try:
from django.conf.urls import patterns, url
except ImportError: # Django < 1.4
from django.conf.urls.defaults import patterns, url
from . import LazyConfig, settings
config = LazyConfig()
NUMERIC_WIDGET = forms.TextInput(attrs={'size': 10})
INTEGER_LIKE = (fields.IntegerField, {'widget': NUMERIC_WIDGET})
STRING_LIKE = (fields.CharField, {
'widget': forms.Textarea(attrs={'rows': 3}),
'required': False,
})
FIELDS = {
bool: (fields.BooleanField, {'required': False}),
int: INTEGER_LIKE,
Decimal: (fields.DecimalField, {'widget': NUMERIC_WIDGET}),
str: STRING_LIKE,
datetime: (fields.DateTimeField, {'widget': widgets.AdminSplitDateTime}),
date: (fields.DateField, {'widget': widgets.AdminDateWidget}),
time: (fields.TimeField, {'widget': widgets.AdminTimeWidget}),
float: (fields.FloatField, {'widget': NUMERIC_WIDGET}),
}
if not six.PY3:
FIELDS.update({
long: INTEGER_LIKE,
unicode: STRING_LIKE,
})
class ConstanceForm(forms.Form):
version = forms.CharField(widget=forms.HiddenInput)
def __init__(self, initial, *args, **kwargs):
super(ConstanceForm, self).__init__(*args, initial=initial, **kwargs)
version_hash = hashlib.md5()
for name, (default, help_text) in settings.CONFIG.items():
config_type = type(default)
if config_type not in FIELDS:
raise ImproperlyConfigured(_("Constance doesn't support "
"config values of the type "
"%(config_type)s. Please fix "
"the value of '%(name)s'.")
% {'config_type': config_type,
'name': name})
field_class, kwargs = FIELDS[config_type]
self.fields[name] = field_class(label=name, **kwargs)
version_hash.update(smart_bytes(initial.get(name, '')))
self.initial['version'] = version_hash.hexdigest()
def save(self):
for name in settings.CONFIG:
setattr(config, name, self.cleaned_data[name])
def clean_version(self):
value = self.cleaned_data['version']
if value != self.initial['version']:
raise forms.ValidationError(_('The settings have been modified '
'by someone else. Please reload the '
'form and resubmit your changes.'))
return value
class ConstanceAdmin(admin.ModelAdmin):
def get_urls(self):
info = self.model._meta.app_label, self.model._meta.module_name
return patterns('',
url(r'^$',
self.admin_site.admin_view(self.changelist_view),
name='%s_%s_changelist' % info),
url(r'^$',
self.admin_site.admin_view(self.changelist_view),
name='%s_%s_add' % info),
)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
# First load a mapping between config name and default value
if not self.has_change_permission(request, None):
raise PermissionDenied
default_initial = ((name, default)
for name, (default, help_text) in settings.CONFIG.items())
# Then update the mapping with actually values from the backend
initial = dict(default_initial,
**dict(config._backend.mget(settings.CONFIG.keys())))
form = ConstanceForm(initial=initial)
if request.method == 'POST':
form = ConstanceForm(data=request.POST, initial=initial)
if form.is_valid():
form.save()
# In django 1.5 this can be replaced with self.message_user
messages.add_message(
request,
messages.SUCCESS,
_('Live settings updated successfully.'),
)
return HttpResponseRedirect('.')
context = {
'config': [],
'title': _('Constance config'),
'app_label': 'constance',
'opts': Config._meta,
'form': form,
'media': self.media + form.media,
}
for name, (default, help_text) in settings.CONFIG.items():
# First try to load the value from the actual backend
value = initial.get(name)
# Then if the returned value is None, get the default
if value is None:
value = getattr(config, name)
context['config'].append({
'name': name,
'default': localize(default),
'help_text': _(help_text),
'value': localize(value),
'modified': value != default,
'form_field': form[name],
})
context['config'].sort(key=itemgetter('name'))
context_instance = RequestContext(request,
current_app=self.admin_site.name)
return render_to_response('admin/constance/change_list.html',
context, context_instance=context_instance)
def has_add_permission(self, *args, **kwargs):
return False
def has_delete_permission(self, *args, **kwargs):
return False
def has_change_permission(self, request, obj=None):
if settings.SUPERUSER_ONLY:
return request.user.is_superuser
return super(ConstanceAdmin, self).has_change_permission(request, obj)
class Config(object):
class Meta(object):
app_label = 'constance'
object_name = 'Config'
model_name = module_name = 'config'
verbose_name_plural = _('config')
get_ordered_objects = lambda x: False
abstract = False
swapped = False
def get_change_permission(self):
return 'change_%s' % self.model_name
_meta = Meta()
admin.site.register([Config], ConstanceAdmin)
| apache-2.0 |
ecolitan/fatics | test/variant/test_chess960.py | 1 | 9385 | # Copyright (C) 2010 Wil Mahan <[email protected]>
#
# This file is part of FatICS.
#
# FatICS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FatICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with FatICS. If not, see <http://www.gnu.org/licenses/>.
#
from test.test import *
import random
from pgn import Pgn
from db import db
class TestChess960(Test):
def test_bad_idn(self):
t = self.connect_as_guest('GuestABCD')
t2 = self.connect_as_admin()
t.write('match admin white 1 0 fr idn=960\n')
self.expect('idn must be between', t)
self.close(t)
self.close(t2)
def test_checkmate(self):
moves = ['b4', 'b6', 'Bb2', 'Bb7', 'Bxg7', 'Bxg2', 'Bxh8',
'Bxh1', 'Qg7#' ]
self._assert_game_is_legal(moves, 100, 'admin checkmated} 1-0')
def test_stalemate(self):
moves = ['h3', 'Nb6', 'Bh2', 'e6', 'e4', 'Bd6', 'e5', 'Be7', 'd4',
'd6', 'Bb5', 'a6', 'Bxe8', 'Qxe8', 'exd6', 'Bxd6', 'Bxd6',
'cxd6', 'Qg4', 'g6', 'Ned3', 'f5', 'Qg3', 'O-O-O', 'Nb3', 'Bf7',
'O-O-O', 'Qc6', 'Nb4', 'Qc7', 'Rd3', 'Kb8', 'Rc3', 'Qe7', 'Re1',
'Ka8', 'd5', 'e5', 'Qe3', 'Nxd5', 'Nxd5', 'Bxd5', 'Qb6', 'Rc8',
'f3', 'Rxc3', 'bxc3', 'Rc8', 'Rd1', 'Rc6', 'Qe3', 'Bxb3', 'cxb3',
'Qc7', 'Kb2', 'b5', 'Rd5', 'Kb7', 'g4', 'Rc5', 'Rxc5', 'Qxc5',
'gxf5', 'Qxe3', 'fxg6', 'hxg6', 'Ka3', 'Qxf3', 'h4', 'Qxc3',
'h5', 'gxh5']
self._assert_game_is_legal(moves, 734, 'drawn by stalemate} 1/2-1/2')
def test_examine(self):
moves = ['b4', 'b6', 'Bb2', 'Bb7', 'Bxg7', 'Bxg2', 'Bxh8',
'Bxh1', 'Qg7#' ]
self._assert_game_is_legal(moves, 100, 'admin checkmated} 1-0',
clear_hist=False)
t = self.connect_as_admin()
self.set_style_12(t)
t.write('exl\n')
self.expect('<12> qbbnrnkr pppppppp -------- -------- -------- -------- PPPPPPPP QBBNRNKR W -1 1 1 1 1 0 1 admin admin 2 0 0 39 39 0 0 1 none (0:00) none 0 0 0', t)
t.write('forward 9999\n')
self.expect('admin goes forward 9999', t)
self.expect('admin checkmated 1-0', t)
t.write('back\n')
self.expect('admin backs up 1 move.', t)
t.write('Qg7\n')
self.expect('admin checkmated 1-0', t)
t.write('unex\n')
self.expect('You are no longer examining', t)
t.write('aclearhist admin\n')
self.expect('History of admin cleared.', t)
self.close(t)
def test_rematch_same_idn(self):
t = self.connect_as_guest('GuestABCD')
t2 = self.connect_as_admin()
t.write('set style 12\n')
t2.write('set style 12\n')
t.write('match admin white 1 0 fr idn=404\n')
self.expect('Challenge:', t2)
t2.write('a\n')
self.expect('<12> rbbqnnkr pppppppp -------- -------- -------- -------- PPPPPPPP RBBQNNKR W -1 1 1 1 1 0 1 GuestABCD admin 1 1 0 39 39 60 60 1 none (0:00) none 0 0 0', t)
self.expect('<12> rbbqnnkr pppppppp -------- -------- -------- -------- PPPPPPPP RBBQNNKR W -1 1 1 1 1 0 1 GuestABCD admin -1 1 0 39 39 60 60 1 none (0:00) none 1 0 0', t2)
t.write('res\n')
self.expect('GuestABCD resigns} 0-1', t)
self.expect('GuestABCD resigns} 0-1', t2)
t.write('rem\n')
self.expect('Challenge:', t2)
t2.write('a\n')
self.expect('<12> rbbqnnkr pppppppp -------- -------- -------- -------- PPPPPPPP RBBQNNKR W -1 1 1 1 1 0 1 admin GuestABCD -1 1 0 39 39 60 60 1 none (0:00) none 1 0 0', t)
self.expect('<12> rbbqnnkr pppppppp -------- -------- -------- -------- PPPPPPPP RBBQNNKR W -1 1 1 1 1 0 1 admin GuestABCD 1 1 0 39 39 60 60 1 none (0:00) none 0 0 0', t2)
t.write('res\n')
self.expect('GuestABCD resigns} 1-0', t)
self.expect('GuestABCD resigns} 1-0', t2)
# this test is expected to fail 1 out of every 960 runs :)
"""
t.write('rem\n')
self.expect('Challenge:', t2)
t2.write('a\n')
self.expect_not('<12> rbbqnnkr ', t)
t.write('abort\n')
self.expect('aborted on move 1', t)
self.expect('aborted on move 1', t2)
"""
t2.write('aclearhist admin\n')
self.expect('History of admin cleared.', t2)
self.close(t)
self.close(t2)
def _assert_game_is_legal(self, moves, idn, result=None, clear_hist=True):
t = self.connect_as_guest('GuestABCD')
t2 = self.connect_as_admin()
t.write('set style 12\n')
t2.write('set style 12\n')
t.write('match admin white 1 0 fr idn=%d\n' % idn)
self.expect('Issuing:', t)
self.expect('Challenge:', t2)
t2.write('accept\n')
self.expect('<12> ', t)
self.expect('<12> ', t2)
wtm = True
for mv in moves:
if wtm:
t.write('%s\n' % mv)
else:
t2.write('%s\n' % mv)
self.expect('<12> ', t)
self.expect('<12> ', t2)
wtm = not wtm
if result is not None:
if 'by repetition' in result:
t2.write('draw\n')
self.expect(result, t)
self.expect(result, t2)
if clear_hist:
t2.write('aclearhist admin\n')
self.expect('History of admin cleared.', t2)
else:
t.write('abort\n')
t2.write('abort\n')
self.close(t)
self.close(t2)
class TestPgn(Test):
def test_pgn(self):
t = self.connect_as_guest('GuestABCD')
t2 = self.connect_as_guest('GuestEFGH')
t.write('set style 12\n')
t2.write('set style 12\n')
f = open('../data/chess960.pgn', 'r')
pgn = Pgn(f)
for g in pgn:
print 'game %s' % g
assert(g.tags['FEN'])
idn = db.idn_from_fen(g.tags['FEN'])
if idn is None:
print('could not get idn for fen %s' % g.tags['FEN'])
assert(False)
t.write('match GuestEFGH white 5 0 chess960 idn=%d\n' % idn)
self.expect('Issuing:', t)
self.expect('Challenge:', t2)
t2.write('accept\n')
self.expect('<12> ', t)
self.expect('<12> ', t2)
wtm = True
for mv in g.moves:
if wtm:
#print 'sending %s to white' % mv.text
t.write('%s%s\n' % (mv.text, mv.decorator))
else:
#print 'sending %s to black' % mv.text
t2.write('%s%s\n' % (mv.text, mv.decorator))
self.expect('<12> ', t)
self.expect('<12> ', t2)
wtm = not wtm
if g.result == '1-0' and g.is_checkmate:
self.expect('GuestEFGH checkmated} 1-0', t)
self.expect('GuestEFGH checkmated} 1-0', t2)
elif g.result == '0-1' and g.is_checkmate:
self.expect('GuestABCD checkmated} 0-1', t)
self.expect('GuestABCD checkmated} 0-1', t2)
elif g.result == '1/2-1/2' and g.is_stalemate:
self.expect('drawn by stalemate} 1/2-1/2', t)
self.expect('drawn by stalemate} 1/2-1/2', t2)
elif g.result == '1/2-1/2' and g.is_draw_nomaterial:
self.expect('Neither player has mating material} 1/2-1/2', t)
self.expect('Neither player has mating material} 1/2-1/2', t2)
elif g.result == '1/2-1/2' and g.is_repetition:
""" Old FICS does not consider holding when detecting
repetitions, so a FICS draw by repetition won't necessarily
be a draw by our rules. """
if wtm:
t.write('draw\n')
else:
t2.write('draw\n')
self.expect('drawn by repetition} 1/2-1/2', t)
self.expect('drawn by repetition} 1/2-1/2', t2)
"""t.write('abort\n')
t2.write('abort\n')
self.expect('Game aborted', t)
self.expect('Game aborted', t2)"""
elif g.result == '1/2-1/2' and g.is_fifty:
random.choice([t, t2]).write('draw\n')
self.expect('drawn by the 50 move rule} 1/2-1/2', t)
self.expect('drawn by the 50 move rule} 1/2-1/2', t2)
else:
t.write('abort\n')
t2.write('abort\n')
# don't depend on the abort message, in case the PGN
# omits the comment explaining why the game was drawn
#self.expect('Game aborted', t)
#self.expect('Game aborted', t2)
self.close(t)
self.close(t2)
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 smarttab autoindent
| agpl-3.0 |
ClearCorp/odoo-clearcorp | TODO-9.0/product_invoice_report/wizard/product_invoice_wizard.py | 3 | 3713 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import models, fields, api, _
class ProductInvoiceWizard(models.TransientModel):
_name = 'product.invoice.report.wizard'
@api.one
@api.constrains('date_from','date_to')
def _check_filter_date(self):
if self.filter=='filter_date':
if self.date_from>self.date_to:
raise Warning(_('Start Date must be less than End Date'))
@api.constrains('period_from','period_to')
def _check_filter_period(self):
if self.filter=='filter_period':
if self.period_from.date_start>self.period_to.date_stop:
raise Warning(_('Start Period must be less than End Period'))
@api.multi
def print_report(self):
#Get all customers if no one is selected
if not self.partner_ids:
self.partner_ids = self.env['res.partner'].search([('customer','=',True)])
data = {
'form': {
'sortby': self.sortby,
'filter': self.filter,
'date_from': self.date_from,
'date_to': self.date_to,
'fiscalyear_id': self.fiscalyear_id.id,
'period_to': self.period_to.id,
'period_from':self.period_from.id,
}
}
if self.out_format=='qweb-PDF':
res = self.env['report'].get_action(self.partner_ids,
'product_invoice_report.report_product_invoice_pdf', data=data)
return res
elif self.out_format=='qweb-XLS':
res = self.env['report'].get_action(self.partner_ids,
'product_invoice_report.report_product_invoice_xls', data=data)
return res
out_format=fields.Selection([('qweb-PDF', 'Portable Document Format (.pdf)'), ('qweb-XLS','Microsoft Excel 97/2000/XP/2003 (.xls)')], string="Print Format",required=True)
sortby=fields.Selection([('sort_date', 'Date'), ('sort_period','Period'), ('sort_partner','Partner'),('sort_product','Product'),('sort_product_category','Product Category')], string="Sort by",required=True)
filter=fields.Selection([('filter_no', 'No Filter'), ('filter_date','Date'), ('filter_period','Period')], string="Filter",required=True,default='filter_no')
date_from=fields.Date(string="Start Date")
date_to=fields.Date(string="End Date")
fiscalyear_id=fields.Many2one('account.fiscalyear',string="Fiscal Year")
period_to= fields.Many2one('account.period',string="End Period")
period_from= fields.Many2one('account.period',string="Start Period")
partner_ids= fields.Many2many('res.partner',string="Customer") | agpl-3.0 |
spxtr/bazel | tools/build_defs/docker/rewrite_json.py | 22 | 9758 | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manipulates Docker image layer metadata."""
from collections import namedtuple
import copy
import json
import os
import os.path
import sys
from tools.build_defs.docker import utils
from third_party.py import gflags
gflags.DEFINE_string(
'name', None, 'The name of the current layer')
gflags.DEFINE_string(
'base', None, 'The parent image')
gflags.DEFINE_string(
'output', None, 'The output file to generate')
gflags.DEFINE_string(
'layer', None, 'The current layer tar')
gflags.DEFINE_list(
'entrypoint', None,
'Override the "Entrypoint" of the previous layer')
gflags.DEFINE_list(
'command', None,
'Override the "Cmd" of the previous layer')
gflags.DEFINE_string(
'user', None, 'The username to run commands under')
gflags.DEFINE_list('labels', None, 'Augment the "Label" of the previous layer')
gflags.DEFINE_list(
'ports', None,
'Augment the "ExposedPorts" of the previous layer')
gflags.DEFINE_list(
'volumes', None,
'Augment the "Volumes" of the previous layer')
gflags.DEFINE_string(
'workdir', None,
'Set the working directory for the layer')
gflags.DEFINE_list(
'env', None,
'Augment the "Env" of the previous layer')
FLAGS = gflags.FLAGS
_MetadataOptionsT = namedtuple('MetadataOptionsT',
['name', 'parent', 'size', 'entrypoint', 'cmd',
'env', 'labels', 'ports', 'volumes', 'workdir',
'user'])
class MetadataOptions(_MetadataOptionsT):
"""Docker image layer metadata options."""
def __new__(cls,
name=None,
parent=None,
size=None,
entrypoint=None,
cmd=None,
user=None,
labels=None,
env=None,
ports=None,
volumes=None,
workdir=None):
"""Constructor."""
return super(MetadataOptions, cls).__new__(cls,
name=name,
parent=parent,
size=size,
entrypoint=entrypoint,
cmd=cmd,
user=user,
labels=labels,
env=env,
ports=ports,
volumes=volumes,
workdir=workdir)
_DOCKER_VERSION = '1.5.0'
_PROCESSOR_ARCHITECTURE = 'amd64'
_OPERATING_SYSTEM = 'linux'
def Resolve(value, environment):
"""Resolves environment variables embedded in the given value."""
outer_env = os.environ
try:
os.environ = environment
return os.path.expandvars(value)
finally:
os.environ = outer_env
def DeepCopySkipNull(data):
"""Do a deep copy, skipping null entry."""
if type(data) == type(dict()):
return dict((DeepCopySkipNull(k), DeepCopySkipNull(v))
for k, v in data.iteritems() if v is not None)
return copy.deepcopy(data)
def KeyValueToDict(pair):
"""Converts an iterable object of key=value pairs to dictionary."""
d = dict()
for kv in pair:
(k, v) = kv.split('=', 1)
d[k] = v
return d
def RewriteMetadata(data, options):
"""Rewrite and return a copy of the input data according to options.
Args:
data: The dict of Docker image layer metadata we're copying and rewriting.
options: The changes this layer makes to the overall image's metadata, which
first appears in this layer's version of the metadata
Returns:
A deep copy of data, which has been updated to reflect the metadata
additions of this layer.
Raises:
Exception: a required option was missing.
"""
output = DeepCopySkipNull(data)
if not options.name:
raise Exception('Missing required option: name')
output['id'] = options.name
if options.parent:
output['parent'] = options.parent
elif data:
raise Exception('Expected empty input object when parent is omitted')
if options.size:
output['Size'] = options.size
elif 'Size' in output:
del output['Size']
if 'config' not in output:
output['config'] = {}
if options.entrypoint:
output['config']['Entrypoint'] = options.entrypoint
if options.cmd:
output['config']['Cmd'] = options.cmd
if options.user:
output['config']['User'] = options.user
output['docker_version'] = _DOCKER_VERSION
output['architecture'] = _PROCESSOR_ARCHITECTURE
output['os'] = _OPERATING_SYSTEM
def Dict2ConfigValue(d):
return ['%s=%s' % (k, d[k]) for k in sorted(d.keys())]
if options.env:
# Build a dictionary of existing environment variables (used by Resolve).
environ_dict = KeyValueToDict(output['config'].get('Env', []))
# Merge in new environment variables, resolving references.
for k, v in options.env.iteritems():
# Resolve handles scenarios like "PATH=$PATH:...".
environ_dict[k] = Resolve(v, environ_dict)
output['config']['Env'] = Dict2ConfigValue(environ_dict)
if options.labels:
label_dict = KeyValueToDict(output['config'].get('Label', []))
for k, v in options.labels.iteritems():
label_dict[k] = v
output['config']['Label'] = Dict2ConfigValue(label_dict)
if options.ports:
if 'ExposedPorts' not in output['config']:
output['config']['ExposedPorts'] = {}
for p in options.ports:
if '/' in p:
# The port spec has the form 80/tcp, 1234/udp
# so we simply use it as the key.
output['config']['ExposedPorts'][p] = {}
else:
# Assume tcp
output['config']['ExposedPorts'][p + '/tcp'] = {}
if options.volumes:
if 'Volumes' not in output['config']:
output['config']['Volumes'] = {}
for p in options.volumes:
output['config']['Volumes'][p] = {}
if options.workdir:
output['config']['WorkingDir'] = options.workdir
# TODO(mattmoor): comment, created, container_config
# container_config contains information about the container
# that was used to create this layer, so it shouldn't
# propagate from the parent to child. This is where we would
# annotate information that can be extract by tools like Blubber
# or Quay.io's UI to gain insight into the source that generated
# the layer. A Dockerfile might produce something like:
# # (nop) /bin/sh -c "apt-get update"
# We might consider encoding the fully-qualified bazel build target:
# //tools/build_defs/docker:image
# However, we should be sensitive to leaking data through this field.
if 'container_config' in output:
del output['container_config']
return output
def GetParentIdentifier(f):
"""Try to look at the parent identifier from a docker image.
The identifier is expected to be in the 'top' file for our rule so we look at
it first ('./top', 'top'). If it's not found, then we use the 'repositories'
file and tries to parse it to get the first declared repository (so we can
actually parse a file generated by 'docker save').
Args:
f: the input tar file.
Returns:
The identifier of the docker image, or None if no identifier was found.
"""
# TODO(dmarting): Maybe we could drop the 'top' file all together?
top = utils.GetTarFile(f, 'top')
if top:
return top.strip()
repositories = utils.GetTarFile(f, 'repositories')
if repositories:
data = json.loads(repositories)
for k1 in data:
for k2 in data[k1]:
# Returns the first found key
return data[k1][k2].strip()
return None
def main(unused_argv):
parent = ''
base_json = '{}'
if FLAGS.base:
parent = GetParentIdentifier(FLAGS.base)
if parent:
base_json = utils.GetTarFile(FLAGS.base, '%s/json' % parent)
data = json.loads(base_json)
name = FLAGS.name
if name.startswith('@'):
with open(name[1:], 'r') as f:
name = f.read()
labels = KeyValueToDict(FLAGS.labels)
for label, value in labels.iteritems():
if value.startswith('@'):
with open(value[1:], 'r') as f:
labels[label] = f.read()
output = RewriteMetadata(data,
MetadataOptions(name=name,
parent=parent,
size=os.path.getsize(FLAGS.layer),
entrypoint=FLAGS.entrypoint,
cmd=FLAGS.command,
user=FLAGS.user,
labels=labels,
env=KeyValueToDict(FLAGS.env),
ports=FLAGS.ports,
volumes=FLAGS.volumes,
workdir=FLAGS.workdir))
with open(FLAGS.output, 'w') as fp:
json.dump(output, fp, sort_keys=True)
fp.write('\n')
if __name__ == '__main__':
main(FLAGS(sys.argv))
| apache-2.0 |
annarev/tensorflow | tensorflow/python/saved_model/utils_impl.py | 7 | 10433 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TensorInfo helpers.
@tf_export(v1=["saved_model.build_tensor_info",
"saved_model.utils.build_tensor_info"])
@deprecation.deprecated(
None,
"This function will only be available through the v1 compatibility "
"library as tf.compat.v1.saved_model.utils.build_tensor_info or "
"tf.compat.v1.saved_model.build_tensor_info.")
def build_tensor_info(tensor):
"""Utility function to build TensorInfo proto from a Tensor.
Args:
tensor: Tensor or SparseTensor whose name, dtype and shape are used to
build the TensorInfo. For SparseTensors, the names of the three
constituent Tensors are used.
Returns:
A TensorInfo protocol buffer constructed based on the supplied argument.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError("build_tensor_info is not supported in Eager mode.")
return build_tensor_info_internal(tensor)
def build_tensor_info_internal(tensor):
"""Utility function to build TensorInfo proto from a Tensor."""
if (isinstance(tensor, composite_tensor.CompositeTensor) and
not isinstance(tensor, sparse_tensor.SparseTensor)):
return _build_composite_tensor_info_internal(tensor)
tensor_info = meta_graph_pb2.TensorInfo(
dtype=dtypes.as_dtype(tensor.dtype).as_datatype_enum,
tensor_shape=tensor.get_shape().as_proto())
if isinstance(tensor, sparse_tensor.SparseTensor):
tensor_info.coo_sparse.values_tensor_name = tensor.values.name
tensor_info.coo_sparse.indices_tensor_name = tensor.indices.name
tensor_info.coo_sparse.dense_shape_tensor_name = tensor.dense_shape.name
else:
tensor_info.name = tensor.name
return tensor_info
def _build_composite_tensor_info_internal(tensor):
"""Utility function to build TensorInfo proto from a CompositeTensor."""
spec = tensor._type_spec # pylint: disable=protected-access
tensor_info = meta_graph_pb2.TensorInfo()
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_coder.encode_structure(spec)
tensor_info.composite_tensor.type_spec.CopyFrom(spec_proto.type_spec_value)
for component in nest.flatten(tensor, expand_composites=True):
tensor_info.composite_tensor.components.add().CopyFrom(
build_tensor_info_internal(component))
return tensor_info
def build_tensor_info_from_op(op):
"""Utility function to build TensorInfo proto from an Op.
Note that this function should be used with caution. It is strictly restricted
to TensorFlow internal use-cases only. Please make sure you do need it before
using it.
This utility function overloads the TensorInfo proto by setting the name to
the Op's name, dtype to DT_INVALID and tensor_shape as None. One typical usage
is for the Op of the call site for the defunned function:
```python
@function.defun
def some_variable_initialization_fn(value_a, value_b):
a = value_a
b = value_b
value_a = constant_op.constant(1, name="a")
value_b = constant_op.constant(2, name="b")
op_info = utils.build_op_info(
some_variable_initialization_fn(value_a, value_b))
```
Args:
op: An Op whose name is used to build the TensorInfo. The name that points
to the Op could be fetched at run time in the Loader session.
Returns:
A TensorInfo protocol buffer constructed based on the supplied argument.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError(
"build_tensor_info_from_op is not supported in Eager mode.")
return meta_graph_pb2.TensorInfo(
dtype=types_pb2.DT_INVALID,
tensor_shape=tensor_shape.unknown_shape().as_proto(),
name=op.name)
@tf_export(v1=["saved_model.get_tensor_from_tensor_info",
"saved_model.utils.get_tensor_from_tensor_info"])
@deprecation.deprecated(
None,
"This function will only be available through the v1 compatibility "
"library as tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info or "
"tf.compat.v1.saved_model.get_tensor_from_tensor_info.")
def get_tensor_from_tensor_info(tensor_info, graph=None, import_scope=None):
"""Returns the Tensor or CompositeTensor described by a TensorInfo proto.
Args:
tensor_info: A TensorInfo proto describing a Tensor or SparseTensor or
CompositeTensor.
graph: The tf.Graph in which tensors are looked up. If None, the
current default graph is used.
import_scope: If not None, names in `tensor_info` are prefixed with this
string before lookup.
Returns:
The Tensor or SparseTensor or CompositeTensor in `graph` described by
`tensor_info`.
Raises:
KeyError: If `tensor_info` does not correspond to a tensor in `graph`.
ValueError: If `tensor_info` is malformed.
"""
graph = graph or ops.get_default_graph()
def _get_tensor(name):
return graph.get_tensor_by_name(
ops.prepend_name_scope(name, import_scope=import_scope))
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
return _get_tensor(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
_get_tensor(tensor_info.coo_sparse.indices_tensor_name),
_get_tensor(tensor_info.coo_sparse.values_tensor_name),
_get_tensor(tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [_get_tensor(component.name) for component in
tensor_info.composite_tensor.components]
return nest.pack_sequence_as(spec, components, expand_composites=True)
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def get_element_from_tensor_info(tensor_info, graph=None, import_scope=None):
"""Returns the element in the graph described by a TensorInfo proto.
Args:
tensor_info: A TensorInfo proto describing an Op or Tensor by name.
graph: The tf.Graph in which tensors are looked up. If None, the current
default graph is used.
import_scope: If not None, names in `tensor_info` are prefixed with this
string before lookup.
Returns:
Op or tensor in `graph` described by `tensor_info`.
Raises:
KeyError: If `tensor_info` does not correspond to an op or tensor in `graph`
"""
graph = graph or ops.get_default_graph()
return graph.as_graph_element(
ops.prepend_name_scope(tensor_info.name, import_scope=import_scope))
# Path helpers.
def get_or_create_variables_dir(export_dir):
"""Return variables sub-directory, or create one if it doesn't exist."""
variables_dir = get_variables_dir(export_dir)
if not file_io.file_exists(variables_dir):
file_io.recursive_create_dir(variables_dir)
return variables_dir
def get_variables_dir(export_dir):
"""Return variables sub-directory in the SavedModel."""
return os.path.join(
compat.as_text(export_dir),
compat.as_text(constants.VARIABLES_DIRECTORY))
def get_variables_path(export_dir):
"""Return the variables path, used as the prefix for checkpoint files."""
return os.path.join(
compat.as_text(get_variables_dir(export_dir)),
compat.as_text(constants.VARIABLES_FILENAME))
def get_or_create_assets_dir(export_dir):
"""Return assets sub-directory, or create one if it doesn't exist."""
assets_destination_dir = get_assets_dir(export_dir)
if not file_io.file_exists(assets_destination_dir):
file_io.recursive_create_dir(assets_destination_dir)
return assets_destination_dir
def get_assets_dir(export_dir):
"""Return path to asset directory in the SavedModel."""
return os.path.join(
compat.as_text(export_dir),
compat.as_text(constants.ASSETS_DIRECTORY))
def get_or_create_debug_dir(export_dir):
"""Returns path to the debug sub-directory, creating if it does not exist."""
debug_dir = get_debug_dir(export_dir)
if not file_io.file_exists(debug_dir):
file_io.recursive_create_dir(debug_dir)
return debug_dir
def get_saved_model_pbtxt_path(export_dir):
return os.path.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
def get_saved_model_pb_path(export_dir):
return os.path.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
def get_debug_dir(export_dir):
"""Returns path to the debug sub-directory in the SavedModel."""
return os.path.join(
compat.as_text(export_dir), compat.as_text(constants.DEBUG_DIRECTORY))
| apache-2.0 |
savanu/servo | tests/wpt/web-platform-tests/websockets/handlers/stash_responder_wsh.py | 206 | 1673 | #!/usr/bin/python
import urlparse, json
from mod_pywebsocket import common, msgutil, util
from mod_pywebsocket.handshake import hybi
from wptserve import stash
address, authkey = stash.load_env_config()
stash = stash.Stash("/stash_responder", address=address, authkey=authkey)
def web_socket_do_extra_handshake(request):
return
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line == "echo":
query = request.unparsed_uri.split('?')[1]
GET = dict(urlparse.parse_qsl(query))
# TODO(kristijanburnik): This code should be reused from
# /mixed-content/generic/expect.py or implemented more generally
# for other tests.
path = GET.get("path", request.unparsed_uri.split('?')[0])
key = GET["key"]
action = GET["action"]
if action == "put":
value = GET["value"]
stash.take(key=key, path=path)
stash.put(key=key, value=value, path=path)
response_data = json.dumps({"status": "success", "result": key})
elif action == "purge":
value = stash.take(key=key, path=path)
response_data = json.dumps({"status": "success", "result": value})
elif action == "take":
value = stash.take(key=key, path=path)
if value is None:
status = "allowed"
else:
status = "blocked"
response_data = json.dumps({"status": status, "result": value})
msgutil.send_message(request, response_data)
return
| mpl-2.0 |
swarna-k/MyDiary | flask/lib/python2.7/site-packages/sqlalchemy/util/__init__.py | 47 | 2514 | # util/__init__.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .compat import callable, cmp, reduce, \
threading, py3k, py33, py2k, jython, pypy, cpython, win32, \
pickle, dottedgetter, parse_qsl, namedtuple, next, reraise, \
raise_from_cause, text_type, safe_kwarg, string_types, int_types, \
binary_type, nested, \
quote_plus, with_metaclass, print_, itertools_filterfalse, u, ue, b,\
unquote_plus, unquote, b64decode, b64encode, byte_buffer, itertools_filter,\
iterbytes, StringIO, inspect_getargspec, zip_longest
from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \
Properties, OrderedProperties, ImmutableProperties, OrderedDict, \
OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \
column_dict, ordered_column_set, populate_column_dict, unique_list, \
UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \
to_column_set, update_copy, flatten_iterator, has_intersection, \
LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence, \
coerce_generator_arg, lightweight_named_tuple
from .langhelpers import iterate_attributes, class_hierarchy, \
portable_instancemethod, unbound_method_to_callable, \
getargspec_init, format_argspec_init, format_argspec_plus, \
get_func_kwargs, get_cls_kwargs, decorator, as_interface, \
memoized_property, memoized_instancemethod, md5_hex, \
group_expirable_memoized_property, dependencies, decode_slice, \
monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\
duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\
classproperty, set_creation_order, warn_exception, warn, NoneType,\
constructor_copy, methods_equivalent, chop_traceback, asint,\
generic_repr, counter, PluginLoader, hybridproperty, hybridmethod, \
safe_reraise,\
get_callable_argspec, only_once, attrsetter, ellipses_string, \
warn_limited, map_bits, MemoizedSlots, EnsureKWArgType
from .deprecations import warn_deprecated, warn_pending_deprecation, \
deprecated, pending_deprecation, inject_docstring_text
# things that used to be not always available,
# but are now as of current support Python versions
from collections import defaultdict
from functools import partial
from functools import update_wrapper
from contextlib import contextmanager
| bsd-3-clause |
adlnet-archive/edx-platform | common/lib/xmodule/xmodule/split_test_module.py | 6 | 28192 | """
Module for running content split tests
"""
import logging
import json
from webob import Response
from uuid import uuid4
from operator import itemgetter
from xmodule.progress import Progress
from xmodule.seq_module import SequenceDescriptor
from xmodule.studio_editable import StudioEditableModule, StudioEditableDescriptor
from xmodule.x_module import XModule, module_attr, STUDENT_VIEW
from xmodule.modulestore.inheritance import UserPartitionList
from lxml import etree
from xblock.core import XBlock
from xblock.fields import Scope, Integer, String, ReferenceValueDict
from xblock.fragment import Fragment
log = logging.getLogger('edx.' + __name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
DEFAULT_GROUP_NAME = _(u'Group ID {group_id}')
class ValidationMessageType(object):
"""
The type for a validation message -- currently 'information', 'warning' or 'error'.
"""
information = 'information'
warning = 'warning'
error = 'error'
@staticmethod
def display_name(message_type):
"""
Returns the display name for the specified validation message type.
"""
if message_type == ValidationMessageType.warning:
# Translators: This message will be added to the front of messages of type warning,
# e.g. "Warning: this component has not been configured yet".
return _(u"Warning")
elif message_type == ValidationMessageType.error:
# Translators: This message will be added to the front of messages of type error,
# e.g. "Error: required field is missing".
return _(u"Error")
else:
return None
# TODO: move this into the xblock repo once it has a formal validation contract
class ValidationMessage(object):
"""
Represents a single validation message for an xblock.
"""
def __init__(self, xblock, message_text, message_type, action_class=None, action_label=None):
assert isinstance(message_text, unicode)
self.xblock = xblock
self.message_text = message_text
self.message_type = message_type
self.action_class = action_class
self.action_label = action_label
def __unicode__(self):
return self.message_text
class SplitTestFields(object):
"""Fields needed for split test module"""
has_children = True
# All available user partitions (with value and display name). This is updated each time
# editable_metadata_fields is called.
user_partition_values = []
# Default value used for user_partition_id
no_partition_selected = {'display_name': _("Not Selected"), 'value': -1}
@staticmethod
def build_partition_values(all_user_partitions, selected_user_partition):
"""
This helper method builds up the user_partition values that will
be passed to the Studio editor
"""
SplitTestFields.user_partition_values = []
# Add "No selection" value if there is not a valid selected user partition.
if not selected_user_partition:
SplitTestFields.user_partition_values.append(SplitTestFields.no_partition_selected)
for user_partition in all_user_partitions:
SplitTestFields.user_partition_values.append({"display_name": user_partition.name, "value": user_partition.id})
return SplitTestFields.user_partition_values
display_name = String(
display_name=_("Display Name"),
help=_("This name is used for organizing your course content, but is not shown to students."),
scope=Scope.settings,
default=_("Content Experiment")
)
# Specified here so we can see what the value set at the course-level is.
user_partitions = UserPartitionList(
help=_("The list of group configurations for partitioning students in content experiments."),
default=[],
scope=Scope.settings
)
user_partition_id = Integer(
help=_("The configuration defines how users are grouped for this content experiment. Caution: Changing the group configuration of a student-visible experiment will impact the experiment data."),
scope=Scope.content,
display_name=_("Group Configuration"),
default=no_partition_selected["value"],
values=lambda: SplitTestFields.user_partition_values # Will be populated before the Studio editor is shown.
)
# group_id is an int
# child is a serialized UsageId (aka Location). This child
# location needs to actually match one of the children of this
# Block. (expected invariant that we'll need to test, and handle
# authoring tools that mess this up)
# TODO: is there a way to add some validation around this, to
# be run on course load or in studio or ....
group_id_to_child = ReferenceValueDict(
help=_("Which child module students in a particular group_id should see"),
scope=Scope.content
)
@XBlock.needs('user_tags') # pylint: disable=abstract-method
@XBlock.wants('partitions')
class SplitTestModule(SplitTestFields, XModule, StudioEditableModule):
"""
Show the user the appropriate child. Uses the ExperimentState
API to figure out which child to show.
Course staff still get put in an experimental condition, but have the option
to see the other conditions. The only thing that counts toward their
grade/progress is the condition they are actually in.
Technical notes:
- There is more dark magic in this code than I'd like. The whole varying-children +
grading interaction is a tangle between super and subclasses of descriptors and
modules.
"""
def __init__(self, *args, **kwargs):
super(SplitTestModule, self).__init__(*args, **kwargs)
self.child_descriptor = None
child_descriptors = self.get_child_descriptors()
if len(child_descriptors) >= 1:
self.child_descriptor = child_descriptors[0]
if self.child_descriptor is not None:
self.child = self.system.get_module(self.child_descriptor)
else:
self.child = None
def get_child_descriptor_by_location(self, location):
"""
Look through the children and look for one with the given location.
Returns the descriptor.
If none match, return None
"""
# NOTE: calling self.get_children() creates a circular reference--
# it calls get_child_descriptors() internally, but that doesn't work until
# we've picked a choice. Use self.descriptor.get_children() instead.
for child in self.descriptor.get_children():
if child.location == location:
return child
return None
def get_content_titles(self):
"""
Returns list of content titles for split_test's child.
This overwrites the get_content_titles method included in x_module by default.
WHY THIS OVERWRITE IS NECESSARY: If we fetch *all* of split_test's children,
we'll end up getting all of the possible conditions users could ever see.
Ex: If split_test shows a video to group A and HTML to group B, the
regular get_content_titles in x_module will get the title of BOTH the video
AND the HTML.
We only want the content titles that should actually be displayed to the user.
split_test's .child property contains *only* the child that should actually
be shown to the user, so we call get_content_titles() on only that child.
"""
return self.child.get_content_titles()
def get_child_descriptors(self):
"""
For grading--return just the chosen child.
"""
group_id = self.get_group_id()
if group_id is None:
return []
# group_id_to_child comes from json, so it has to have string keys
str_group_id = str(group_id)
if str_group_id in self.group_id_to_child:
child_location = self.group_id_to_child[str_group_id]
child_descriptor = self.get_child_descriptor_by_location(child_location)
else:
# Oops. Config error.
log.debug("configuration error in split test module: invalid group_id %r (not one of %r). Showing error", str_group_id, self.group_id_to_child.keys())
if child_descriptor is None:
# Peak confusion is great. Now that we set child_descriptor,
# get_children() should return a list with one element--the
# xmodule for the child
log.debug("configuration error in split test module: no such child")
return []
return [child_descriptor]
def get_group_id(self):
"""
Returns the group ID, or None if none is available.
"""
partitions_service = self.runtime.service(self, 'partitions')
if not partitions_service:
return None
return partitions_service.get_user_group_for_partition(self.user_partition_id)
def _staff_view(self, context):
"""
Render the staff view for a split test module.
"""
fragment = Fragment()
active_contents = []
inactive_contents = []
for child_location in self.children: # pylint: disable=no-member
child_descriptor = self.get_child_descriptor_by_location(child_location)
child = self.system.get_module(child_descriptor)
rendered_child = child.render(STUDENT_VIEW, context)
fragment.add_frag_resources(rendered_child)
group_name, updated_group_id = self.get_data_for_vertical(child)
if updated_group_id is None: # inactive group
group_name = child.display_name
updated_group_id = [g_id for g_id, loc in self.group_id_to_child.items() if loc == child_location][0]
inactive_contents.append({
'group_name': _(u'{group_name} (inactive)').format(group_name=group_name),
'id': child.location.to_deprecated_string(),
'content': rendered_child.content,
'group_id': updated_group_id,
})
continue
active_contents.append({
'group_name': group_name,
'id': child.location.to_deprecated_string(),
'content': rendered_child.content,
'group_id': updated_group_id,
})
# Sort active and inactive contents by group name.
sorted_active_contents = sorted(active_contents, key=itemgetter('group_name'))
sorted_inactive_contents = sorted(inactive_contents, key=itemgetter('group_name'))
# Use the new template
fragment.add_content(self.system.render_template('split_test_staff_view.html', {
'items': sorted_active_contents + sorted_inactive_contents,
}))
fragment.add_css('.split-test-child { display: none; }')
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_staff.js'))
fragment.initialize_js('ABTestSelector')
return fragment
def author_view(self, context):
"""
Renders the Studio preview by rendering each child so that they can all be seen and edited.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_configured = not self.user_partition_id == SplitTestFields.no_partition_selected['value']
is_root = root_xblock and root_xblock.location == self.location
active_groups_preview = None
inactive_groups_preview = None
if is_root:
[active_children, inactive_children] = self.descriptor.active_and_inactive_children()
active_groups_preview = self.studio_render_children(
fragment, active_children, context
)
inactive_groups_preview = self.studio_render_children(
fragment, inactive_children, context
)
fragment.add_content(self.system.render_template('split_test_author_view.html', {
'split_test': self,
'is_root': is_root,
'is_configured': is_configured,
'active_groups_preview': active_groups_preview,
'inactive_groups_preview': inactive_groups_preview,
'group_configuration_url': self.descriptor.group_configuration_url,
}))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_author_view.js'))
fragment.initialize_js('SplitTestAuthorView')
return fragment
def studio_render_children(self, fragment, children, context):
"""
Renders the specified children and returns it as an HTML string. In addition, any
dependencies are added to the specified fragment.
"""
html = ""
for active_child_descriptor in children:
active_child = self.system.get_module(active_child_descriptor)
rendered_child = active_child.render(StudioEditableModule.get_preview_view_name(active_child), context)
if active_child.category == 'vertical':
group_name, group_id = self.get_data_for_vertical(active_child)
if group_name:
rendered_child.content = rendered_child.content.replace(
DEFAULT_GROUP_NAME.format(group_id=group_id),
group_name
)
fragment.add_frag_resources(rendered_child)
html = html + rendered_child.content
return html
def student_view(self, context):
"""
Renders the contents of the chosen condition for students, and all the
conditions for staff.
"""
if self.child is None:
# raise error instead? In fact, could complain on descriptor load...
return Fragment(content=u"<div>Nothing here. Move along.</div>")
if self.system.user_is_staff:
return self._staff_view(context)
else:
child_fragment = self.child.render(STUDENT_VIEW, context)
fragment = Fragment(self.system.render_template('split_test_student_view.html', {
'child_content': child_fragment.content,
'child_id': self.child.scope_ids.usage_id,
}))
fragment.add_frag_resources(child_fragment)
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_student.js'))
fragment.initialize_js('SplitTestStudentView')
return fragment
@XBlock.handler
def log_child_render(self, request, suffix=''): # pylint: disable=unused-argument
"""
Record in the tracking logs which child was rendered
"""
# TODO: use publish instead, when publish is wired to the tracking logs
self.system.track_function('xblock.split_test.child_render', {'child_id': self.child.scope_ids.usage_id.to_deprecated_string()})
return Response()
def get_icon_class(self):
return self.child.get_icon_class() if self.child else 'other'
def get_progress(self):
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def get_data_for_vertical(self, vertical):
"""
Return name and id of a group corresponding to `vertical`.
"""
user_partition = self.descriptor.get_selected_partition()
if user_partition:
for group in user_partition.groups:
group_id = unicode(group.id)
child_location = self.group_id_to_child.get(group_id, None)
if child_location == vertical.location:
return (group.name, group.id)
return (None, None)
@XBlock.needs('user_tags') # pylint: disable=abstract-method
@XBlock.wants('partitions')
@XBlock.wants('user')
class SplitTestDescriptor(SplitTestFields, SequenceDescriptor, StudioEditableDescriptor):
# the editing interface can be the same as for sequences -- just a container
module_class = SplitTestModule
filename_extension = "xml"
mako_template = "widgets/metadata-only-edit.html"
child_descriptor = module_attr('child_descriptor')
log_child_render = module_attr('log_child_render')
get_content_titles = module_attr('get_content_titles')
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('split_test')
renderable_groups = {}
# json.dumps doesn't know how to handle Location objects
for group in self.group_id_to_child:
renderable_groups[group] = self.group_id_to_child[group].to_deprecated_string()
xml_object.set('group_id_to_child', json.dumps(renderable_groups))
xml_object.set('user_partition_id', str(self.user_partition_id))
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
raw_group_id_to_child = xml_object.attrib.get('group_id_to_child', None)
user_partition_id = xml_object.attrib.get('user_partition_id', None)
try:
group_id_to_child = json.loads(raw_group_id_to_child)
except ValueError:
msg = "group_id_to_child is not valid json"
log.exception(msg)
system.error_tracker(msg)
for child in xml_object:
try:
descriptor = system.process_xml(etree.tostring(child))
children.append(descriptor.scope_ids.usage_id)
except Exception:
msg = "Unable to load child when parsing split_test module."
log.exception(msg)
system.error_tracker(msg)
return ({
'group_id_to_child': group_id_to_child,
'user_partition_id': user_partition_id
}, children)
def get_context(self):
_context = super(SplitTestDescriptor, self).get_context()
_context.update({
'selected_partition': self.get_selected_partition()
})
return _context
def has_dynamic_children(self):
"""
Grading needs to know that only one of the children is actually "real". This
makes it use module.get_child_descriptors().
"""
return True
def editor_saved(self, user, old_metadata, old_content):
"""
Used to create default verticals for the groups.
Assumes that a mutable modulestore is being used.
"""
# Any existing value of user_partition_id will be in "old_content" instead of "old_metadata"
# because it is Scope.content.
if 'user_partition_id' not in old_content or old_content['user_partition_id'] != self.user_partition_id:
selected_partition = self.get_selected_partition()
if selected_partition is not None:
self.group_id_mapping = {} # pylint: disable=attribute-defined-outside-init
for group in selected_partition.groups:
self._create_vertical_for_group(group, user.id)
# Don't need to call update_item in the modulestore because the caller of this method will do it.
else:
# If children referenced in group_id_to_child have been deleted, remove them from the map.
for str_group_id, usage_key in self.group_id_to_child.items():
if usage_key not in self.children: # pylint: disable=no-member
del self.group_id_to_child[str_group_id]
@property
def editable_metadata_fields(self):
# Update the list of partitions based on the currently available user_partitions.
SplitTestFields.build_partition_values(self.user_partitions, self.get_selected_partition())
editable_fields = super(SplitTestDescriptor, self).editable_metadata_fields
# Explicitly add user_partition_id, which does not automatically get picked up because it is Scope.content.
# Note that this means it will be saved by the Studio editor as "metadata", but the field will
# still update correctly.
editable_fields[SplitTestFields.user_partition_id.name] = self._create_metadata_editor_info(
SplitTestFields.user_partition_id
)
return editable_fields
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(SplitTestDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
SplitTestDescriptor.due,
SplitTestDescriptor.user_partitions
])
return non_editable_fields
def get_selected_partition(self):
"""
Returns the partition that this split module is currently using, or None
if the currently selected partition ID does not match any of the defined partitions.
"""
for user_partition in self.user_partitions:
if user_partition.id == self.user_partition_id:
return user_partition
return None
def active_and_inactive_children(self):
"""
Returns two values:
1. The active children of this split test, in the order of the groups.
2. The remaining (inactive) children, in the order they were added to the split test.
"""
children = self.get_children()
user_partition = self.get_selected_partition()
if not user_partition:
return [], children
def get_child_descriptor(location):
"""
Returns the child descriptor which matches the specified location, or None if one is not found.
"""
for child in children:
if child.location == location:
return child
return None
# Compute the active children in the order specified by the user partition
active_children = []
for group in user_partition.groups:
group_id = unicode(group.id)
child_location = self.group_id_to_child.get(group_id, None)
child = get_child_descriptor(child_location)
if child:
active_children.append(child)
# Compute the inactive children in the order they were added to the split test
inactive_children = [child for child in children if child not in active_children]
return active_children, inactive_children
def validation_messages(self):
"""
Returns a list of validation messages describing the current state of the block. Each message
includes a message type indicating whether the message represents information, a warning or an error.
"""
_ = self.runtime.service(self, "i18n").ugettext # pylint: disable=redefined-outer-name
messages = []
if self.user_partition_id < 0:
messages.append(ValidationMessage(
self,
_(u"The experiment is not associated with a group configuration."),
ValidationMessageType.warning,
'edit-button',
_(u"Select a Group Configuration")
))
else:
user_partition = self.get_selected_partition()
if not user_partition:
messages.append(ValidationMessage(
self,
_(u"The experiment uses a deleted group configuration. Select a valid group configuration or delete this experiment."),
ValidationMessageType.error
))
else:
[active_children, inactive_children] = self.active_and_inactive_children()
if len(active_children) < len(user_partition.groups):
messages.append(ValidationMessage(
self,
_(u"The experiment does not contain all of the groups in the configuration."),
ValidationMessageType.error,
'add-missing-groups-button',
_(u"Add Missing Groups")
))
if len(inactive_children) > 0:
messages.append(ValidationMessage(
self,
_(u"The experiment has an inactive group. Move content into active groups, then delete the inactive group."),
ValidationMessageType.warning
))
return messages
@XBlock.handler
def add_missing_groups(self, request, suffix=''): # pylint: disable=unused-argument
"""
Create verticals for any missing groups in the split test instance.
Called from Studio view.
"""
user_partition = self.get_selected_partition()
changed = False
for group in user_partition.groups:
str_group_id = unicode(group.id)
if str_group_id not in self.group_id_to_child:
user_id = self.runtime.service(self, 'user').user_id
self._create_vertical_for_group(group, user_id)
changed = True
if changed:
# TODO user.id - to be fixed by Publishing team
self.system.modulestore.update_item(self, None)
return Response()
@property
def group_configuration_url(self):
assert hasattr(self.system, 'modulestore') and hasattr(self.system.modulestore, 'get_course'), \
"modulestore has to be available"
course_module = self.system.modulestore.get_course(self.location.course_key)
group_configuration_url = None
if 'split_test' in course_module.advanced_modules:
user_partition = self.get_selected_partition()
if user_partition:
group_configuration_url = "{url}#{configuration_id}".format(
url='/group_configurations/' + unicode(self.location.course_key),
configuration_id=str(user_partition.id)
)
return group_configuration_url
def _create_vertical_for_group(self, group, user_id):
"""
Creates a vertical to associate with the group.
This appends the new vertical to the end of children, and updates group_id_to_child.
A mutable modulestore is needed to call this method (will need to update after mixed
modulestore work, currently relies on mongo's create_item method).
"""
assert hasattr(self.system, 'modulestore') and hasattr(self.system.modulestore, 'create_item'), \
"editor_saved should only be called when a mutable modulestore is available"
modulestore = self.system.modulestore
dest_usage_key = self.location.replace(category="vertical", name=uuid4().hex)
metadata = {'display_name': DEFAULT_GROUP_NAME.format(group_id=group.id)}
modulestore.create_item(
user_id,
self.location.course_key,
dest_usage_key.block_type,
block_id=dest_usage_key.block_id,
definition_data=None,
metadata=metadata,
runtime=self.system,
)
self.children.append(dest_usage_key) # pylint: disable=no-member
self.group_id_to_child[unicode(group.id)] = dest_usage_key
@property
def general_validation_message(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
validation_messages = self.validation_messages()
if validation_messages:
has_error = any(message.message_type == ValidationMessageType.error for message in validation_messages)
return {
'message': _(u"This content experiment has issues that affect content visibility."),
'type': ValidationMessageType.error if has_error else ValidationMessageType.warning,
}
return None
| agpl-3.0 |
mkhutornenko/incubator-aurora | src/test/python/apache/thermos/monitoring/test_disk.py | 1 | 2175 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
from tempfile import mkstemp
import pytest
from twitter.common.dirutil import safe_mkdtemp
from twitter.common.quantity import Amount, Data, Time
from apache.thermos.monitoring.disk import DiskCollector, InotifyDiskCollector
TEST_AMOUNT_1 = Amount(100, Data.MB)
TEST_AMOUNT_2 = Amount(10, Data.MB)
TEST_AMOUNT_SUM = TEST_AMOUNT_1 + TEST_AMOUNT_2
def make_file(size, dir):
_, filename = mkstemp(dir=dir)
with open(filename, 'w') as f:
f.write('0' * int(size.as_(Data.BYTES)))
return filename
def _run_collector_tests(collector, target, wait):
assert collector.value == 0
collector.sample()
wait()
assert collector.value == 0
f1 = make_file(TEST_AMOUNT_1, dir=target)
wait()
assert collector.value >= TEST_AMOUNT_1.as_(Data.BYTES)
make_file(TEST_AMOUNT_2, dir=target)
wait()
assert collector.value >= TEST_AMOUNT_SUM.as_(Data.BYTES)
os.unlink(f1)
wait()
assert TEST_AMOUNT_SUM.as_(Data.BYTES) > collector.value >= TEST_AMOUNT_2.as_(Data.BYTES)
def test_du_diskcollector():
target = safe_mkdtemp()
collector = DiskCollector(target)
def wait():
collector.sample()
if collector._thread is not None:
collector._thread.event.wait()
_run_collector_tests(collector, target, wait)
@pytest.mark.skipif("sys.platform == 'darwin'")
def test_inotify_diskcollector():
target = safe_mkdtemp()
INTERVAL = Amount(50, Time.MILLISECONDS)
collector = InotifyDiskCollector(target)
collector._thread.COLLECTION_INTERVAL = INTERVAL
def wait():
time.sleep((2 * INTERVAL).as_(Time.SECONDS))
_run_collector_tests(collector, target, wait)
| apache-2.0 |
wetneb/django | tests/forms_tests/tests/test_regressions.py | 14 | 8031 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import (
CharField, ChoiceField, Form, HiddenInput, IntegerField, ModelForm,
ModelMultipleChoiceField, MultipleChoiceField, RadioSelect, Select,
TextInput,
)
from django.test import TestCase, ignore_warnings
from django.utils import translation
from django.utils.translation import gettext_lazy, ugettext_lazy
from ..models import Cheese
class FormsRegressionsTestCase(TestCase):
def test_class(self):
# Tests to prevent against recurrences of earlier bugs.
extra_attrs = {'class': 'special'}
class TestForm(Form):
f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs))
f2 = CharField(widget=TextInput(attrs=extra_attrs))
self.assertHTMLEqual(TestForm(auto_id=False).as_p(), '<p>F1: <input type="text" class="special" name="f1" maxlength="10" /></p>\n<p>F2: <input type="text" class="special" name="f2" /></p>')
def test_regression_3600(self):
# Tests for form i18n #
# There were some problems with form translations in #3600
class SomeForm(Form):
username = CharField(max_length=10, label=ugettext_lazy('Username'))
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
# Translations are done at rendering time, so multi-lingual apps can define forms)
with translation.override('de'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Benutzername:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
with translation.override('pl'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Nazwa u\u017cytkownika:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
def test_regression_5216(self):
# There was some problems with form translations in #5216
class SomeForm(Form):
field_1 = CharField(max_length=10, label=ugettext_lazy('field_1'))
field_2 = CharField(max_length=10, label=ugettext_lazy('field_2'), widget=TextInput(attrs={'id': 'field_2_id'}))
f = SomeForm()
self.assertHTMLEqual(f['field_1'].label_tag(), '<label for="id_field_1">field_1:</label>')
self.assertHTMLEqual(f['field_2'].label_tag(), '<label for="field_2_id">field_2:</label>')
# Unicode decoding problems...
GENDERS = (('\xc5', 'En tied\xe4'), ('\xf8', 'Mies'), ('\xdf', 'Nainen'))
class SomeForm(Form):
somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect(), label='\xc5\xf8\xdf')
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Translated error messages used to be buggy.
with translation.override('ru'):
f = SomeForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>\u041e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u043f\u043e\u043b\u0435.</li></ul>\n<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Deep copying translated text shouldn't raise an error)
class CopyForm(Form):
degree = IntegerField(widget=Select(choices=((1, gettext_lazy('test')),)))
f = CopyForm()
@ignore_warnings(category=UnicodeWarning)
def test_regression_5216_b(self):
# Testing choice validation with UTF-8 bytestrings as input (these are the
# Russian abbreviations "мес." and "шт.".
UNITS = ((b'\xd0\xbc\xd0\xb5\xd1\x81.', b'\xd0\xbc\xd0\xb5\xd1\x81.'),
(b'\xd1\x88\xd1\x82.', b'\xd1\x88\xd1\x82.'))
f = ChoiceField(choices=UNITS)
self.assertEqual(f.clean('\u0448\u0442.'), '\u0448\u0442.')
self.assertEqual(f.clean(b'\xd1\x88\xd1\x82.'), '\u0448\u0442.')
def test_misc(self):
# There once was a problem with Form fields called "data". Let's make sure that
# doesn't come back.
class DataForm(Form):
data = CharField(max_length=10)
f = DataForm({'data': 'xyzzy'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'data': 'xyzzy'})
# A form with *only* hidden fields that has errors is going to be very unusual.
class HiddenForm(Form):
data = IntegerField(widget=HiddenInput)
f = HiddenForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist nonfield"><li>(Hidden field data) This field is required.</li></ul>\n<p> <input type="hidden" name="data" id="id_data" /></p>')
self.assertHTMLEqual(f.as_table(), '<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field data) This field is required.</li></ul><input type="hidden" name="data" id="id_data" /></td></tr>')
def test_xss_error_messages(self):
###################################################
# Tests for XSS vulnerabilities in error messages #
###################################################
# The forms layer doesn't escape input values directly because error messages
# might be presented in non-HTML contexts. Instead, the message is just marked
# for escaping by the template engine. So we'll need to construct a little
# silly template to trigger the escaping.
from django.template import Template, Context
t = Template('{{ form.errors }}')
class SomeForm(Form):
field = ChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': '<script>'})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
class SomeForm(Form):
field = MultipleChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
from forms_tests.models import ChoiceModel
class SomeForm(Form):
field = ModelMultipleChoiceField(ChoiceModel.objects.all())
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>"<script>" is not a valid value for a primary key.</li></ul></li></ul>')
def test_regression_14234(self):
"""
Re-cleaning an instance that was added via a ModelForm should not raise
a pk uniqueness error.
"""
class CheeseForm(ModelForm):
class Meta:
model = Cheese
fields = '__all__'
form = CheeseForm({
'name': 'Brie',
})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Camembert'
obj.full_clean()
| bsd-3-clause |
stanxii/wr1004sjl | linux-3.4.6/tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| lgpl-2.1 |
wakatime/sketch-wakatime | WakaTime.sketchplugin/Contents/Resources/wakatime/packages/pygments/filters/__init__.py | 31 | 11573 | # -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError, text_type, string_types
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""Lookup a filter by name. Return None if not found."""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""Return a generator of all filter names."""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case',
['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(text_type, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in [('spaces', u'·'),
('tabs', u'»'),
('newlines', u'¶')]:
opt = options.get(name, False)
if isinstance(opt, string_types) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' ' * (tabsize - 1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or u' '
tabs = self.tabs or u'\t'
newlines = self.newlines or u'\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return u'', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = u'\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""Merges consecutive tokens with the same token type in the output
stream of a lexer.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
}
| bsd-3-clause |
inspirehep/harvesting-kit | harvestingkit/tests/elsevier_package_tests.py | 1 | 11162 | # -*- coding: utf-8 -*-
#
# This file is part of Harvesting Kit.
# Copyright (C) 2014, 2015 CERN.
#
# Harvesting Kit is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Harvesting Kit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Harvesting Kit; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Tests for Elsevier."""
import os
import unittest
import pkg_resources
from harvestingkit.elsevier_package import ElsevierPackage
from xml.dom.minidom import parse, parseString, Element
from harvestingkit.tests import journal_mappings
class ElsevierPackageTests(unittest.TestCase):
"""Test extraction of Elsevier records."""
def setUp(self):
"""Setup initial document."""
self.els = ElsevierPackage(CONSYN=True,
journal_mappings=journal_mappings)
self.document = parse(pkg_resources.resource_filename(
'harvestingkit.tests',
os.path.join('data', 'sample_consyn_record.xml')
))
def test_doi(self):
"""Test that doi is good."""
self.assertEqual(self.els._get_doi(self.document), '10.1016/0370-2693(88)91603-6')
def test_title(self):
"""Test that title is good."""
self.assertEqual(self.els.get_title(self.document), 'Toward classification of conformal theories')
def test_doctype(self):
"""Test that doctype is good."""
self.assertEqual(self.els.get_doctype(self.document), 'fla')
def test_abstract(self):
"""Test that abstract is good."""
abstract = 'By studying the representations of the mapping class groups '\
'which arise in 2D conformal theories we derive some restrictions '\
'on the value of the conformal dimension h i of operators and the '\
'central charge c of the Virasoro algebra. As a simple application '\
'we show that when there are a finite number of operators in the '\
'conformal algebra, the h i and c are all rational.'
self.assertEqual(self.els.get_abstract(self.document), abstract)
def test_keywords(self):
"""Test that keywords are good."""
keywords = ['Heavy quarkonia', 'Quark gluon plasma', 'Mott effect', 'X(3872)']
self.assertEqual(self.els.get_keywords(self.document), keywords)
def test_add_orcids(self):
"""Test that orcids are good.
According to "Tag by Tag The Elsevier DTD 5 Family of XML DTDs" orcids will be
distributed as an attribute in the ce:author tag.
"""
xml_author = Element('ce:author')
xml_author.setAttribute('orcid', '1234-5678-4321-8765')
authors = [{}]
# _add_orcids will alter the authors list
self.els._add_orcids(authors, [xml_author])
self.assertEqual(authors, [{'orcid': 'ORCID:1234-5678-4321-8765'}])
def test_authors(self):
"""Test that authors are good."""
authors = [{'affiliation': ['Lyman Laboratory of Physics, Harvard University, Cambridge, MA 02138, USA'], 'surname': 'Vafa', 'given_name': 'Cumrun', 'orcid': 'ORCID:1234-5678-4321-8765'}]
self.assertEqual(self.els.get_authors(self.document), authors)
def test_copyright(self):
"""Test that copyright is good."""
self.assertEqual(self.els.get_copyright(self.document), 'Copyright unknown. Published by Elsevier B.V.')
def test_publication_information(self):
"""Test that pubinfo is good."""
publication_information = ('Phys.Lett.',
'0370-2693',
'B206',
'3',
'421',
'426',
'1988',
'1988-05-26',
'10.1016/0370-2693(88)91603-6')
self.assertEqual(self.els.get_publication_information(self.document), publication_information)
def test_publication_date_oa(self):
"""Test that date is good from openAccessEffective."""
data = """
<doc xmlns:oa="http://vtw.elsevier.com/data/ns/properties/OpenAccess-1/">
<oa:openAccessInformation>
<oa:openAccessStatus xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
http://vtw.elsevier.com/data/voc/oa/OpenAccessStatus#Full
</oa:openAccessStatus>
<oa:openAccessEffective xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">2014-11-11T08:38:44Z</oa:openAccessEffective>
<oa:sponsor xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<oa:sponsorName>SCOAP³ - Sponsoring Consortium for Open Access Publishing in Particle Physics</oa:sponsorName>
<oa:sponsorType>http://vtw.elsevier.com/data/voc/oa/SponsorType#FundingBody</oa:sponsorType>
</oa:sponsor>
<oa:userLicense xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">http://creativecommons.org/licenses/by/3.0/</oa:userLicense>
</oa:openAccessInformation>
</doc>"""
doc = parseString(data)
self.assertEqual(
self.els.get_publication_date(doc),
"2014-11-11"
)
def test_publication_date_cover_display(self):
"""Test that date is good from coverDisplayDate."""
data = """
<doc xmlns:prism="http://vtw.elsevier.com/data/ns/properties/OpenAccess-1/">
<prism:coverDisplayDate>December 2014</prism:coverDisplayDate>
</doc>"""
doc = parseString(data)
self.assertEqual(
self.els.get_publication_date(doc),
"2014-12"
)
def test_publication_date_cover_display_full(self):
"""Test that date is good from coverDisplayDate."""
data = """
<doc xmlns:prism="http://vtw.elsevier.com/data/ns/properties/OpenAccess-1/">
<prism:coverDisplayDate>1 December 2014</prism:coverDisplayDate>
</doc>"""
doc = parseString(data)
self.assertEqual(
self.els.get_publication_date(doc),
"2014-12-01"
)
def test_publication_date_cover(self):
"""Test that date is good."""
data = """
<doc xmlns:prism="http://vtw.elsevier.com/data/ns/properties/OpenAccess-1/">
<prism:coverDisplayDate>April 2011</prism:coverDisplayDate>
<prism:coverDate>2011-04-01</prism:coverDate>
</doc>"""
doc = parseString(data)
self.assertEqual(
self.els.get_publication_date(doc),
"2011-04-01"
)
def test_references(self):
"""Test that references is good."""
references = [('[1]', ['Belavin, A.A.', 'Polyakov, A.M.', 'Zamolodchikov, A.B.'], '', 'Nucl. Phys. B 241 1984', '333', '', '241', '1984', [], None, True, '', 'Nucl. Phys. B', '', [], '', []),
('[2]', ['Friedan, D.', 'Qiu, Z.', 'Shenker, S.H.'], '', 'Phys. Rev. Lett. 52 1984', '1575', '', '52', '1984', [], None, True, '', 'Phys. Rev. Lett.', '', [], '', []),
('[3]', ['Cardy, J.L.'], '', 'Nucl. Phys. B 270 1986', '186', '', '270', '1986', [], None, True, '[FS16]', 'Nucl. Phys. B', '', [], '', []),
('[3]', ['Capelli, A.', 'Itzykson, C.', 'Zuber, J.-B.'], '', 'Nucl. Phys. B 280 1987', '445', '', '280', '1987', [], None, True, '[FS 18]', 'Nucl. Phys. B', '', [], '', []),
('[3]', ['Capelli, A.', 'Itzykson, C.', 'Zuber, J.-B.'], '', 'Commun. Math. Phys. 113 1987', '1', '', '113', '1987', [], None, True, '', 'Commun. Math. Phys.', '', [], '', []),
('[3]', ['Gepner, D.'], '', 'Nucl. Phys. B 287 1987', '111', '', '287', '1987', [], None, True, '', 'Nucl. Phys. B', '', [], '', []),
('[4]', [], '', '', '', '', '', '', 'G. Anderson and G. Moore, IAS preprint IASSNS-HEP-87/69.', None, [], '', '', '', [], '', []),
('[5]', ['Friedan, D.', 'Shenker, S.'], '', 'Phys. Lett. B 175 1986', '287', '', '175', '1986', [], None, True, '', 'Phys. Lett. B', '', [], '', []),
('[5]', ['Friedan, D.', 'Shenker, S.'], '', 'Nucl. Phys. B 281 1987', '509', '', '281', '1987', [], None, True, '', 'Nucl. Phys. B', '', [], '', []),
('[6]', [], '', '', '', '', '', '', 'E. Martinec and S. Shenker, unpublished.', None, [], '', '', '', [], '', []),
('[7]', ['Vafa, C.'], '', 'Phys. Lett. B 199 1987', '195', '', '199', '1987', [], None, True, '', 'Phys. Lett. B', '', [], '', []),
('[8]', ['Harer, J.'], '', 'Inv. Math. 72 1983', '221', '', '72', '1983', [], None, True, '', 'Inv. Math.', '', [], '', []),
('[9]', ['Tsuchiya, A.', 'Kanie, Y.'], '', 'Lett. Math. Phys. 13 1987', '303', '', '13', '1987', [], None, True, '', 'Lett. Math. Phys.', '', [], '', []),
('[10]', [], '', '', '', '', '', '', 'E. Verlinde, to be published.', None, [], '', '', '', [], '', []),
('[11]', ['Dehn, M.'], '', 'Acta Math. 69 1938', '135', '', '69', '1938', [], None, True, '', 'Acta Math.', '', [], '', []),
('[12]', [], '', '', '', '', '', '', 'D. Friedan and S. Shenker, unpublished.', None, [], '', '', '', [], '', []),
('[13]', [], '', '', '', '', '', '', 'J. Harvey, G. Moore, and C. Vafa, Nucl. Phys. B, to be published', None, [], '', '', '', [], '', []),
('[14]', [], '', '', '', '', '', '', 'D. Kastor, E. Martinec and Z. Qiu, E. Fermi Institute preprint EFI-87-58.', None, [], '', '', '', [], '', []),
('[15]', ['Adeva, B.'], '', 'Phys. Rev. D 58 1998', '112001', '', '58', '1998', [], None, True, '', 'Phys. Rev. D', '', [], '', [])]
for ref in self.els.get_references(self.document):
self.assertTrue(ref in references)
def test_get_record(self):
"""Test that the whole record is correct."""
source_file = pkg_resources.resource_filename(
'harvestingkit.tests',
os.path.join('data', 'sample_consyn_record.xml')
)
marc_file = pkg_resources.resource_filename(
'harvestingkit.tests',
os.path.join('data', 'sample_consyn_output.xml')
)
xml = self.els.get_record(source_file, test=True)
with open(marc_file) as marc:
result = marc.read()
self.assertEqual(xml.strip(), result.strip())
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ElsevierPackageTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-2.0 |
ivano666/tensorflow | tensorflow/python/kernel_tests/gradient_correctness_test.py | 15 | 1413 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class GradientCorrectnessTest(tf.test.TestCase):
def testMultipleOutputChainedGradients(self):
with self.test_session() as sess:
x = tf.constant(1.0, dtype=tf.float32)
yexp = tf.exp(x)
yexplog = tf.log(yexp)
grads = tf.gradients([yexp, yexplog], [x])
grad_vals = sess.run(grads)
exp1_plus_one = (1.0 + np.exp(1.0)).astype(np.float32)
# [dexp(x)/dx + d(log(exp(x)))/dx] @ x=1 == exp(1) + 1
self.assertAllClose(grad_vals[0], exp1_plus_one)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
orgito/ansible | lib/ansible/modules/database/misc/kibana_plugin.py | 52 | 7252 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Thierno IB. BARRY @barryib
# Sponsored by Polyconseil http://polyconseil.fr.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: kibana_plugin
short_description: Manage Kibana plugins
description:
- This module can be used to manage Kibana plugins.
version_added: "2.2"
author: Thierno IB. BARRY (@barryib)
options:
name:
description:
- Name of the plugin to install.
required: True
state:
description:
- Desired state of a plugin.
choices: ["present", "absent"]
default: present
url:
description:
- Set exact URL to download the plugin from.
- For local file, prefix its absolute path with file://
timeout:
description:
- "Timeout setting: 30s, 1m, 1h etc."
default: 1m
plugin_bin:
description:
- Location of the Kibana binary.
default: /opt/kibana/bin/kibana
plugin_dir:
description:
- Your configured plugin directory specified in Kibana.
default: /opt/kibana/installedPlugins/
version:
description:
- Version of the plugin to be installed.
- If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
force:
description:
- Delete and re-install the plugin. Can be useful for plugins update.
type: bool
default: 'no'
'''
EXAMPLES = '''
- name: Install Elasticsearch head plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
- name: Install specific version of a plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
version: '2.3.3'
- name: Uninstall Elasticsearch head plugin
kibana_plugin:
state: absent
name: elasticsearch/marvel
'''
RETURN = '''
cmd:
description: the launched command during plugin management (install / remove)
returned: success
type: str
name:
description: the plugin name to install or remove
returned: success
type: str
url:
description: the url from where the plugin is installed from
returned: success
type: str
timeout:
description: the timeout for plugin download
returned: success
type: str
stdout:
description: the command stdout
returned: success
type: str
stderr:
description: the command stderr
returned: success
type: str
state:
description: the state for the managed plugin
returned: success
type: str
'''
import os
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
PACKAGE_STATE_MAP = dict(
present="--install",
absent="--remove"
)
def parse_plugin_repo(string):
elements = string.split("/")
# We first consider the simplest form: pluginname
repo = elements[0]
# We consider the form: username/pluginname
if len(elements) > 1:
repo = elements[1]
# remove elasticsearch- prefix
# remove es- prefix
for string in ("elasticsearch-", "es-"):
if repo.startswith(string):
return repo[len(string):]
return repo
def is_plugin_present(plugin_dir, working_dir):
return os.path.isdir(os.path.join(working_dir, plugin_dir))
def parse_error(string):
reason = "reason: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'):
if LooseVersion(kibana_version) > LooseVersion('4.6'):
kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
cmd_args = [kibana_plugin_bin, "install"]
if url:
cmd_args.append(url)
else:
cmd_args.append(plugin_name)
else:
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
if url:
cmd_args.append("--url %s" % url)
if timeout:
cmd_args.append("--timeout %s" % timeout)
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
if LooseVersion(kibana_version) > LooseVersion('4.6'):
kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
cmd_args = [kibana_plugin_bin, "remove", plugin_name]
else:
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def get_kibana_version(module, plugin_bin):
cmd_args = [plugin_bin, '--version']
cmd = " ".join(cmd_args)
rc, out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="Failed to get Kibana version : %s" % err)
return out.strip()
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
url=dict(default=None),
timeout=dict(default="1m"),
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
version=dict(default=None),
force=dict(default="no", type="bool")
),
supports_check_mode=True,
)
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
timeout = module.params["timeout"]
plugin_bin = module.params["plugin_bin"]
plugin_dir = module.params["plugin_dir"]
version = module.params["version"]
force = module.params["force"]
changed, cmd, out, err = False, '', '', ''
kibana_version = get_kibana_version(module, plugin_bin)
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
# skip if the state is correct
if (present and state == "present" and not force) or (state == "absent" and not present and not force):
module.exit_json(changed=False, name=name, state=state)
if version:
name = name + '/' + version
if state == "present":
if force:
remove_plugin(module, plugin_bin, name)
changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
if __name__ == '__main__':
main()
| gpl-3.0 |
guorendong/iridium-browser-ubuntu | tools/telemetry/telemetry/unittest_util/gtest_progress_reporter.py | 20 | 3115 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import unittest
from telemetry.unittest_util import progress_reporter
from telemetry.util import exception_formatter
def _FormatTestName(test):
chunks = test.id().split('.')[2:]
return '.'.join(chunks)
class GTestProgressReporter(progress_reporter.ProgressReporter):
def __init__(self, output_stream):
super(GTestProgressReporter, self).__init__(output_stream)
self._suite_start_time = None
self._test_start_time = None
def _Print(self, *args):
print >> self._output_stream, ' '.join(map(str, args))
self._output_stream.flush()
def _TestTimeMs(self):
return (time.time() - self._test_start_time) * 1000
def StartTest(self, test):
self._Print('[ RUN ]', _FormatTestName(test))
self._test_start_time = time.time()
def StartTestSuite(self, suite):
contains_test_suites = any(isinstance(test, unittest.TestSuite)
for test in suite)
if not contains_test_suites:
test_count = len([test for test in suite])
unit = 'test' if test_count == 1 else 'tests'
self._Print('[----------]', test_count, unit)
self._suite_start_time = time.time()
def StopTestSuite(self, suite):
contains_test_suites = any(isinstance(test, unittest.TestSuite)
for test in suite)
if not contains_test_suites:
test_count = len([test for test in suite])
unit = 'test' if test_count == 1 else 'tests'
elapsed_ms = (time.time() - self._suite_start_time) * 1000
self._Print('[----------]', test_count, unit,
'(%d ms total)' % elapsed_ms)
self._Print()
def StopTestRun(self, result):
unit = 'test' if len(result.successes) == 1 else 'tests'
self._Print('[ PASSED ]', len(result.successes), '%s.' % unit)
if result.errors or result.failures:
all_errors = result.errors[:]
all_errors.extend(result.failures)
unit = 'test' if len(all_errors) == 1 else 'tests'
self._Print('[ FAILED ]', len(all_errors), '%s, listed below:' % unit)
for test, _ in all_errors:
self._Print('[ FAILED ] ', _FormatTestName(test))
if not result.wasSuccessful():
self._Print()
count = len(result.errors) + len(result.failures)
unit = 'TEST' if count == 1 else 'TESTS'
self._Print(count, 'FAILED', unit)
self._Print()
def Error(self, test, err):
self.Failure(test, err)
def Failure(self, test, err):
exception_formatter.PrintFormattedException(*err)
test_name = _FormatTestName(test)
self._Print('[ FAILED ]', test_name, '(%0.f ms)' % self._TestTimeMs())
def Success(self, test):
test_name = _FormatTestName(test)
self._Print('[ OK ]', test_name, '(%0.f ms)' % self._TestTimeMs())
def Skip(self, test, reason):
test_name = _FormatTestName(test)
logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason)
self.Success(test)
| bsd-3-clause |
mbatchkarov/dc_evaluation | eval/pipeline/feature_handlers.py | 1 | 6374 | from discoutils.tokens import DocumentFeature
from discoutils.thesaurus_loader import Thesaurus
from eval.pipeline.thesauri import DummyThesaurus
from eval.utils.reflection_utils import get_named_object
def get_token_handler(handler_name, k, transformer_name, thesaurus):
"""
:param handler_name: fully qualified name of the handler class. Must implement the BaseFeatureHandler interface
:param k: if the handler maker replacements, how many neighbours to insert for each replaced feature
:param transformer_name: fully qualified function name of the function (float -> float) that transforms the
similarity score between a feature and its replacements.
:param thesaurus: source of vectors or neighbours used to make replacements
:return:
"""
handler = get_named_object(handler_name)
transformer = get_named_object(transformer_name)
return handler(k, transformer, thesaurus)
class BaseFeatureHandler():
"""
Base class for all feature handlers. This is used during document vectorisation and decides what to do with each
newly encountered document features. Currently the options are:
- ignore it. This is the standard test-time behaviour of `CountVectorizer` for out-of-vocabulary features.
- enter it into the vocabulary and increment the corresponding column in the document vector. This is the default
train-time behaviour of `CountVectorizer`
- replace it with other features according to a distributional model.
The decision is based on whether the feature is in the current model vocabulary (IV) or not (OOV), and whether
it is in the distributional model (IT) or not (OOT).
This class does standard CountVectorizer-like vectorization:
- in vocabulary, in thesaurus: only insert feature itself
- IV,OOT: feature itself
- OOV, IT: ignore feature
- OOV, OOT: ignore feature
"""
def __init__(self, *args):
pass
def handle_IV_IT_feature(self, **kwargs):
self._insert_feature_only(**kwargs)
def handle_IV_OOT_feature(self, **kwargs):
self._insert_feature_only(**kwargs)
def handle_OOV_IT_feature(self, **kwargs):
self._ignore_feature()
def handle_OOV_OOT_feature(self, **kwargs):
self._ignore_feature()
def _insert_feature_only(self, feature_index_in_vocab, j_indices, values, **kwargs):
j_indices.append(feature_index_in_vocab)
values.append(1)
def _ignore_feature(self):
pass
def _paraphrase(self, feature, vocabulary, j_indices, values, stats, **kwargs):
"""
Replaces term with its k nearest neighbours from the thesaurus
Parameters
----------
neighbour_source : callable, returns a thesaurus-like object (a list of
(neighbour, sim) tuples, sorted by highest sim first,
acts as a defaultdict(list) ). The callable takes one parameter for
compatibility purposes- one of the possible callables I want to
use here requires access to the vocabulary.
The default behaviour is to return a callable pointing to the
currently loaded thesaurus.
"""
# logging.debug('Paraphrasing %r in doc %d', feature, doc_id)
neighbours = self.thesaurus.get_nearest_neighbours(feature)
if self.thesaurus.__class__.__name__ == 'Thesaurus':
# todo this will also activate for DenseVectors, because they are also instances of thesaurus
# the check needs to be self.thesaurus.__class__.__name__ == 'Thesaurus', but then
# we need to make sure init_sims is called with the correct vocabulary so that all neighbours are IV
# precomputed thesauri do not guarantee that the returned neighbours will be in vocabulary
# these should by now only the used in testing though
neighbours = [(neighbour, sim) for (neighbour, sim) in neighbours
if DocumentFeature.from_string(neighbour) in vocabulary]
event = [str(feature), len(neighbours)]
for neighbour, sim in neighbours[:self.k]:
# the document may already contain the feature we
# are about to insert into it,
# a merging strategy is required,
# e.g. what do we do if the document has the word X
# in it and we encounter X again. By default,
# scipy uses addition
df = DocumentFeature.from_string(neighbour)
j_indices.append(vocabulary.get(df))
values.append(self.sim_transformer(sim))
# track the event
event.extend([neighbour, sim])
stats.register_paraphrase(tuple(event))
class SignifierSignifiedFeatureHandler(BaseFeatureHandler):
"""
Handles features the way standard Naive Bayes does, except
- OOV, IT: insert the first K IV neighbours from thesaurus instead of
ignoring the feature
This is standard feature expansion from the IR literature.
"""
def __init__(self, k, sim_transformer, thesaurus):
self.k = k
self.sim_transformer = sim_transformer
self.thesaurus = thesaurus
def handle_OOV_IT_feature(self, **kwargs):
self._paraphrase(**kwargs)
class SignifiedOnlyFeatureHandler(BaseFeatureHandler):
"""
Ignores all OOT features and inserts the first K IV neighbours from
thesaurus for all IT features. This is what I called Extreme Feature Expansion
in my thesis
"""
def __init__(self, k, sim_transformer, thesaurus):
self.k = k
self.sim_transformer = sim_transformer
self.thesaurus = thesaurus
def handle_OOV_IT_feature(self, **kwargs):
self._paraphrase(**kwargs)
handle_IV_IT_feature = handle_OOV_IT_feature
def handle_IV_OOT_feature(self, **kwargs):
self._ignore_feature()
class SignifierRandomBaselineFeatureHandler(SignifiedOnlyFeatureHandler):
"""
Ignores all OOT features and inserts K random IV tokens for all IT features. Useful to unit tests.
"""
def __init__(self, k, sim_transformer, thesaurus):
self.k = k
self.sim_transformer = sim_transformer
self.thesaurus = thesaurus
def handle_OOV_IT_feature(self, **kwargs):
self._paraphrase(**kwargs)
handle_IV_IT_feature = handle_OOV_IT_feature
| bsd-3-clause |
talon-one/talon_one.py | test/test_application_session_entity.py | 1 | 2063 | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.application_session_entity import ApplicationSessionEntity # noqa: E501
from talon_one.rest import ApiException
class TestApplicationSessionEntity(unittest.TestCase):
"""ApplicationSessionEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ApplicationSessionEntity
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.application_session_entity.ApplicationSessionEntity() # noqa: E501
if include_optional :
return ApplicationSessionEntity(
session_id = 56
)
else :
return ApplicationSessionEntity(
session_id = 56,
)
def testApplicationSessionEntity(self):
"""Test ApplicationSessionEntity"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| mit |
pkoutsias/SickRage | lib/feedparser/namespaces/mediarss.py | 43 | 5377 | # Support for the Media RSS format
# Copyright 2010-2015 Kurt McKee <[email protected]>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
# Canonical namespace
'http://search.yahoo.com/mrss/': 'media',
# Old namespace (no trailing slash)
'http://search.yahoo.com/mrss': 'media',
}
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_media_category(self):
self._end_category()
def _end_media_keywords(self):
for term in self.pop('media_keywords').split(','):
if term.strip():
self._addTag(term.strip(), None, None)
def _start_media_title(self, attrsD):
self._start_title(attrsD)
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_rating(self, attrsD):
context = self._getContext()
context.setdefault('media_rating', attrsD)
self.push('rating', 1)
def _end_media_rating(self):
rating = self.pop('rating')
if rating is not None and rating.strip():
context = self._getContext()
context['media_rating']['content'] = rating
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_description(self, attrsD):
self._start_description(attrsD)
def _end_media_description(self):
self._end_description()
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = [cc.strip().lower() for cc in restriction.split(' ')]
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
| gpl-3.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/pip-7.1.0-py3.4.egg/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
erinn/ansible | v1/ansible/module_utils/openstack.py | 198 | 4502 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
def openstack_argument_spec():
# DEPRECATED: This argument spec is only used for the deprecated old
# OpenStack modules. It turns out that modern OpenStack auth is WAY
# more complex than this.
# Consume standard OpenStack environment variables.
# This is mainly only useful for ad-hoc command line operation as
# in playbooks one would assume variables would be used appropriately
OS_AUTH_URL=os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
OS_PASSWORD=os.environ.get('OS_PASSWORD', None)
OS_REGION_NAME=os.environ.get('OS_REGION_NAME', None)
OS_USERNAME=os.environ.get('OS_USERNAME', 'admin')
OS_TENANT_NAME=os.environ.get('OS_TENANT_NAME', OS_USERNAME)
spec = dict(
login_username = dict(default=OS_USERNAME),
auth_url = dict(default=OS_AUTH_URL),
region_name = dict(default=OS_REGION_NAME),
availability_zone = dict(default=None),
)
if OS_PASSWORD:
spec['login_password'] = dict(default=OS_PASSWORD)
else:
spec['login_password'] = dict(required=True)
if OS_TENANT_NAME:
spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
else:
spec['login_tenant_name'] = dict(required=True)
return spec
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
ret = []
for (k, v) in addresses.iteritems():
if key_name and k == key_name:
ret.extend([addrs['addr'] for addrs in v])
else:
for interface_spec in v:
if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
ret.append(interface_spec['addr'])
return ret
def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None),
auth_type=dict(default=None),
auth=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
verify=dict(default=True, aliases=['validate_certs']),
cacert=dict(default=None),
cert=dict(default=None),
key=dict(default=None),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
api_timeout=dict(default=None, type='int'),
endpoint_type=dict(
default='public', choices=['public', 'internal', 'admin']
)
)
spec.update(kwargs)
return spec
def openstack_module_kwargs(**kwargs):
ret = {}
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
if key in ret:
ret[key].extend(kwargs[key])
else:
ret[key] = kwargs[key]
return ret
| gpl-3.0 |
hassoon3/odoo | openerp/tools/win32.py | 457 | 1993 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
import time
import datetime
if not hasattr(locale, 'D_FMT'):
locale.D_FMT = 1
if not hasattr(locale, 'T_FMT'):
locale.T_FMT = 2
if not hasattr(locale, 'nl_langinfo'):
def nl_langinfo(param):
if param == locale.D_FMT:
val = time.strptime('30/12/2004', '%d/%m/%Y')
dt = datetime.datetime(*val[:-2])
format_date = dt.strftime('%x')
for x, y in [('30', '%d'),('12', '%m'),('2004','%Y'),('04', '%Y')]:
format_date = format_date.replace(x, y)
return format_date
if param == locale.T_FMT:
val = time.strptime('13:24:56', '%H:%M:%S')
dt = datetime.datetime(*val[:-2])
format_time = dt.strftime('%X')
for x, y in [('13', '%H'),('24', '%M'),('56','%S')]:
format_time = format_time.replace(x, y)
return format_time
locale.nl_langinfo = nl_langinfo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
thruflo/pyramid_basemodel | src/pyramid_basemodel/root.py | 2 | 1206 | # -*- coding: utf-8 -*-
"""Provides a base traversal root and a mixin class for objects in the
Pyramid traversal hierarchy.
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/traversal.html
"""
__all__ = [
'BaseRoot',
]
import logging
logger = logging.getLogger(__name__)
from zope.interface import implementer
from zope.interface import alsoProvides
from pyramid.interfaces import ILocation
@implementer(ILocation)
class BaseRoot(object):
"""Base class for traversal factories."""
__name__ = ''
__parent__ = None
def locatable(self, context, key, provides=None):
"""Make a context object locatable and return it."""
# Compose.
if provides is None:
provides = alsoProvides
if not hasattr(context, '__name__'):
context.__name__ = key
context._located_parent = self
context.request = self.request
if not ILocation.providedBy(context):
provides(context, ILocation)
return context
def __init__(self, request, key='', parent=None):
self.__name__ = key
self.__parent__ = parent
self.request = request
| unlicense |
stevenzhang18/Indeed-Flask | lib/pandas/tests/test_panel.py | 9 | 92205 | # -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
from inspect import getargspec
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal,
assert_produces_warning,
ensure_clean,
assertRaisesRegexp,
makeCustomDataframe as mkdf,
makeMixedDataFrame
)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning, check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
foo = repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in getargspec(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert(self.panel._get_axis(0) is self.panel.items)
assert(self.panel._get_axis(1) is self.panel.major_axis)
assert(self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
item = self.panel[0]
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
item = self.panel[0]
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in compat.iteritems(self.panel):
pass
self.assertEqual(len(list(compat.iteritems(self.panel))),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % name)
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3*4*5).reshape(3,4,5), items=['ItemA','ItemB','ItemC'],
major_axis=pd.date_range('20130101',periods=4),minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p,op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index,
columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1' : df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'], newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :], newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :],
p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :],
p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols],
p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
col = 'C'
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
from pandas import DataFrame
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'],df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0,1,3,5],[2,3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64','int64']:
panel = Panel(np.arange(40).reshape((2,4,5)), items=['a1','a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]}, 'ItemB': {
'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing,
SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ],
['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'), ('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3), minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4), dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64','float32','object']:
panel = Panel(items=lrange(2),major_axis=lrange(10),minor_axis=lrange(5),dtype=dtype)
_check_dtype(panel,dtype)
for dtype in ['float64','float32','int64','int32','object']:
panel = Panel(np.array(np.random.randn(2,10,5),dtype=dtype),items=lrange(2),major_axis=lrange(10),minor_axis=lrange(5),dtype=dtype)
_check_dtype(panel,dtype)
for dtype in ['float64','float32','int64','int32','object']:
panel = Panel(np.array(np.random.randn(2,10,5),dtype='O'),items=lrange(2),major_axis=lrange(10),minor_axis=lrange(5),dtype=dtype)
_check_dtype(panel,dtype)
for dtype in ['float64','float32','int64','int32','object']:
panel = Panel(np.random.randn(2,10,5),items=lrange(2),major_axis=lrange(10),minor_axis=lrange(5),dtype=dtype)
_check_dtype(panel,dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5), index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5), index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
wp3 = Panel.from_dict(d3)
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
wp4 = Panel.from_dict(d4)
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in compat.iteritems(self.panel))
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0,5000,100)))[:50] # unique random int keys
d = OrderedDict([(k,mkdf(10,5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major,
minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df,
'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3,4,5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 4, 5\), indices imply \(4, 5, 5\)", testit)
def testit():
Panel(np.random.randn(3,4,5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 4, 5\), indices imply \(5, 4, 5\)", testit)
def testit():
Panel(np.random.randn(3,4,5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 4, 5\), indices imply \(5, 5, 4\)", testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert(conformed.index.equals(self.panel.major_axis))
assert(conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A = dict(a = ['1','1.0'])))
expected = Panel(dict(A = dict(a = [1,1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'),index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
from pandas import Series,DataFrame
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values,
np.sqrt(self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x*2, axis='items')
expected = self.panel*2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x*2, axis='major_axis')
expected = self.panel*2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x*2, axis='minor_axis')
expected = self.panel*2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'),index=self.panel.major_axis,columns=self.panel.minor_axis)
assert_frame_equal(result,expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'),index=self.panel.minor_axis,columns=self.panel.items)
assert_frame_equal(result,expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'),index=self.panel.major_axis,columns=self.panel.items)
assert_frame_equal(result,expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result,expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result,expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result,expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result,expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x*2, axis = ['items','major_axis'])
expected = (self.panel*2).transpose('minor_axis','major_axis','items')
assert_panel_equal(result,expected)
result = self.panel.apply(lambda x: x*2, axis = ['major_axis','items'])
assert_panel_equal(result,expected)
result = self.panel.apply(lambda x: x*2, axis = ['items','minor_axis'])
expected = (self.panel*2).transpose('major_axis','minor_axis','items')
assert_panel_equal(result,expected)
result = self.panel.apply(lambda x: x*2, axis = ['minor_axis','items'])
assert_panel_equal(result,expected)
result = self.panel.apply(lambda x: x*2, axis = ['major_axis','minor_axis'])
expected = self.panel*2
assert_panel_equal(result,expected)
result = self.panel.apply(lambda x: x*2, axis = ['minor_axis','major_axis'])
assert_panel_equal(result,expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis = ['items','major_axis'])
expected = self.panel.sum(1).T
assert_frame_equal(result,expected)
result = self.panel.apply(lambda x: x.sum(1), axis = ['items','major_axis'])
expected = self.panel.sum(0)
assert_frame_equal(result,expected)
# transforms
f = lambda x: ((x.T-x.mean(1))/x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis = ['items','major_axis'])
expected = Panel(dict([ (ax,f(self.panel.loc[:,:,ax])) for ax in self.panel.minor_axis ]))
assert_panel_equal(result,expected)
result = self.panel.apply(f, axis = ['major_axis','minor_axis'])
expected = Panel(dict([ (ax,f(self.panel.loc[ax])) for ax in self.panel.items ]))
assert_panel_equal(result,expected)
result = self.panel.apply(f, axis = ['minor_axis','items'])
expected = Panel(dict([ (ax,f(self.panel.loc[:,ax])) for ax in self.panel.major_axis ]))
assert_panel_equal(result,expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ('two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(4,3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(4,3), columns=list("ABC"), index=index)
p = Panel({'f':dfa, 'g':dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result,expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex,
major_axis=new_major, major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result,self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis,
method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result,self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis,
copy = False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis,
copy = False)
assert_panel_equal(result,self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4,3))
p = Panel({ 'Item1' : df })
expected = Panel({ 'Item1' : df })
expected['Item2'] = np.nan
items = ['Item1','Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis, copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis, copy=False))
results.append(p.reindex(items=items, major_axis=major_axis, minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis, minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected,r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3,4,5))
p.iloc[0:2,0:2,0:2] = np.nan
self.assertRaises(NotImplementedError, lambda : p.fillna(999,limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel,result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError, 'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor', minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p,expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'),
(2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples([(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3, 'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b', 1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'),
(np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'), (1, 'two', 'C'),
(1, 'one', 'A'), (1, 'one', 'B'), (1, 'one', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'), (2, 'one', 'C'),
(np.nan, 'two', 'A'), (np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'),
(2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14], ['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4], [-5, -6, -7, -8]
], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples([(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4], [13, 13],
[14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'], ['x', 'x'],
['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'], [-1, -1],
[-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6], [-7, -7],
[-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A','B']]
expected.items = ['A','A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:,'A']
result = panel.iloc[:,0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:,'E']
result = panel.loc[:,'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:,['A','B']]
expected.major_axis = ['A','A']
result = panel.loc[:,'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:,:,'A']
result = panel.iloc[:,:,0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:,:,'E']
result = panel.loc[:,:,'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:,:,['A','B']]
expected.minor_axis = ['A','A']
result = panel.loc[:,:,'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx),
shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx),
shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx],
shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in compat.iteritems(self.panel)))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item '+ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values,
items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1./3, 0.25, 1./6]}),
'i3': DataFrame({'c1': [.5, 1./3, 1./6],
'c2': [.25, .2, 1./7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2./3, .5, 1./3]})})
assert_panel_equal(result, expected)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {
'ItemA': 'foo',
'ItemB': 'bar',
'ItemC': 'baz'
}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.items.equals(exp))
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2),
('AAPL', 1), ('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
import os
try:
import xlwt
import xlrd
import openpyxl
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in compat.iteritems(self.panel):
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd
import xlsxwriter
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
path = '__tmp__.xlsx'
with ensure_clean(path) as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in compat.iteritems(self.panel):
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.ix[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
assert_panel_equal(inp, exp)
result = p.dropna(axis=1, how='all')
assert_panel_equal(result, p)
p.ix[:, ['b', 'd'], :] = np.nan
result = p.dropna(axis=1, how='all')
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
p.ix[['b'], :, 0] = np.nan
result = p.dropna()
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
result = p.dropna(how='all')
assert_panel_equal(result, p)
p.ix['b'] = np.nan
result = p.dropna(how='all')
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
def test_drop(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
panel = Panel({"One": df, "Two": df})
def check_drop(drop_val, axis_number, aliases, expected):
try:
actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
for alias in aliases:
actual = panel.drop(drop_val, axis=alias)
assert_panel_equal(actual, expected)
except AssertionError:
com.pprint_thing("Failed with axis_number %d and aliases: %s" %
(axis_number, aliases))
raise
# Items
expected = Panel({"One": df})
check_drop('Two', 0, ['items'], expected)
self.assertRaises(ValueError, panel.drop, 'Three')
# errors = 'ignore'
dropped = panel.drop('Three', errors='ignore')
assert_panel_equal(dropped, panel)
dropped = panel.drop(['Two', 'Three'], errors='ignore')
expected = Panel({"One": df})
assert_panel_equal(dropped, expected)
# Major
exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(0, 1, ['major_axis', 'major'], expected)
exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop([1], 1, ['major_axis', 'major'], expected)
# Minor
exp_df = df[['B']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
exp_df = df[['A']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop("B", 2, ['minor_axis', 'minor'], expected)
def test_update(self):
pan = Panel([[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan],
[np.nan, np.nan, 7]]], items=[1])
pan.update(other)
expected = Panel([[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[3.6, 2., 3],
[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_from_dict(self):
pan = Panel({'one': DataFrame([[1.5, np.nan, 3],
[1.5, np.nan, 3],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]),
'two': DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]])})
other = {'two': DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]])}
pan.update(other)
expected = Panel({'two': DataFrame([[3.6, 2., 3],
[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]),
'one': DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]])})
assert_panel_equal(pan, expected)
def test_update_nooverwrite(self):
pan = Panel([[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan],
[np.nan, np.nan, 7]]], items=[1])
pan.update(other, overwrite=False)
expected = Panel([[[1.5, np.nan, 3],
[1.5, np.nan, 3],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, 2., 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_filtered(self):
pan = Panel([[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan],
[np.nan, np.nan, 7]]], items=[1])
pan.update(other, filter_func=lambda x: x > 2)
expected = Panel([[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3],
[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_raise(self):
pan = Panel([[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
np.testing.assert_raises(Exception, pan.update, *(pan,),
**{'raise_conflict': True})
def test_all_any(self):
self.assertTrue((self.panel.all(axis=0).values ==
nanall(self.panel, axis=0)).all())
self.assertTrue((self.panel.all(axis=1).values ==
nanall(self.panel, axis=1).T).all())
self.assertTrue((self.panel.all(axis=2).values ==
nanall(self.panel, axis=2).T).all())
self.assertTrue((self.panel.any(axis=0).values ==
nanany(self.panel, axis=0)).all())
self.assertTrue((self.panel.any(axis=1).values ==
nanany(self.panel, axis=1).T).all())
self.assertTrue((self.panel.any(axis=2).values ==
nanany(self.panel, axis=2).T).all())
def test_all_any_unhandled(self):
self.assertRaises(NotImplementedError, self.panel.all, bool_only=True)
self.assertRaises(NotImplementedError, self.panel.any, bool_only=True)
class TestLongPanel(tm.TestCase):
"""
LongPanel no longer exists, but...
"""
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
panel = tm.makePanel()
tm.add_nans(panel)
self.panel = panel.to_frame()
self.unfiltered_panel = panel.to_frame(filter_observations=False)
def test_ops_differently_indexed(self):
# trying to set non-identically indexed panel
wp = self.panel.to_panel()
wp2 = wp.reindex(major=wp.major_axis[:-1])
lp2 = wp2.to_frame()
result = self.panel + lp2
assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
# careful, mutation
self.panel['foo'] = lp2['ItemA']
assert_series_equal(self.panel['foo'].reindex(lp2.index),
lp2['ItemA'], check_names=False)
def test_ops_scalar(self):
result = self.panel.mul(2)
expected = DataFrame.__mul__(self.panel, 2)
assert_frame_equal(result, expected)
def test_combineFrame(self):
wp = self.panel.to_panel()
result = self.panel.add(wp['ItemA'].stack(), axis=0)
assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
wp = self.panel.to_panel()
result = self.panel.add(self.panel)
wide_result = result.to_panel()
assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
# one item
result = self.panel.add(self.panel.filter(['ItemA']))
def test_combine_scalar(self):
result = self.panel.mul(2)
expected = DataFrame(self.panel._data) * 2
assert_frame_equal(result, expected)
def test_combine_series(self):
s = self.panel['ItemA'][:10]
result = self.panel.add(s, axis=0)
expected = DataFrame.add(self.panel, s, axis=0)
assert_frame_equal(result, expected)
s = self.panel.ix[5]
result = self.panel + s
expected = DataFrame.add(self.panel, s, axis=1)
assert_frame_equal(result, expected)
def test_operators(self):
wp = self.panel.to_panel()
result = (self.panel + 1).to_panel()
assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_arith_flex_panel(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
self.panel = self.panel.to_panel()
for n in [ np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
for op in ops:
alias = aliases.get(op, op)
f = getattr(operator, alias)
exp = f(self.panel, n)
result = getattr(self.panel, op)(n)
assert_panel_equal(result, exp, check_panel_type=True)
# rops
r_f = lambda x, y: f(y, x)
exp = r_f(self.panel, n)
result = getattr(self.panel, 'r' + op)(n)
assert_panel_equal(result, exp)
def test_sort(self):
def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sortlevel(level=1)
self.assertTrue(is_sorted(sorted_minor.index.labels[1]))
sorted_major = sorted_minor.sortlevel(level=0)
self.assertTrue(is_sorted(sorted_major.index.labels[0]))
def test_to_string(self):
buf = StringIO()
self.panel.to_string(buf)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.index.levels[0]
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected, check_names=False) # TODO trucate drops index.names
trunced = self.panel.truncate(before=start).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected, check_names=False) # TODO trucate drops index.names
trunced = self.panel.truncate(after=end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected, check_names=False) # TODO trucate drops index.names
# truncate on dates that aren't in there
wp = self.panel.to_panel()
new_index = wp.major_axis[::5]
wp2 = wp.reindex(major=new_index)
lp2 = wp2.to_frame()
lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
assert_panel_equal(wp_trunc, lp_trunc.to_panel())
# throw proper exception
self.assertRaises(Exception, lp2.truncate, wp.major_axis[-2],
wp.major_axis[2])
def test_axis_dummies(self):
from pandas.core.reshape import make_axis_dummies
minor_dummies = make_axis_dummies(self.panel, 'minor')
self.assertEqual(len(minor_dummies.columns),
len(self.panel.index.levels[1]))
major_dummies = make_axis_dummies(self.panel, 'major')
self.assertEqual(len(major_dummies.columns),
len(self.panel.index.levels[0]))
mapping = {'A': 'one',
'B': 'one',
'C': 'two',
'D': 'two'}
transformed = make_axis_dummies(self.panel, 'minor',
transform=mapping.get)
self.assertEqual(len(transformed.columns), 2)
self.assert_numpy_array_equal(transformed.columns, ['one', 'two'])
# TODO: test correctness
def test_get_dummies(self):
from pandas.core.reshape import get_dummies, make_axis_dummies
self.panel['Label'] = self.panel.index.labels[1]
minor_dummies = make_axis_dummies(self.panel, 'minor')
dummies = get_dummies(self.panel['Label'])
self.assert_numpy_array_equal(dummies.values, minor_dummies.values)
def test_mean(self):
means = self.panel.mean(level='minor')
# test versus Panel version
wide_means = self.panel.to_panel().mean('major')
assert_frame_equal(means, wide_means)
def test_sum(self):
sums = self.panel.sum(level='minor')
# test versus Panel version
wide_sums = self.panel.to_panel().sum('major')
assert_frame_equal(sums, wide_sums)
def test_count(self):
index = self.panel.index
major_count = self.panel.count(level=0)['ItemA']
labels = index.labels[0]
for i, idx in enumerate(index.levels[0]):
self.assertEqual(major_count[i], (labels == i).sum())
minor_count = self.panel.count(level=1)['ItemA']
labels = index.labels[1]
for i, idx in enumerate(index.levels[1]):
self.assertEqual(minor_count[i], (labels == i).sum())
def test_join(self):
lp1 = self.panel.filter(['ItemA', 'ItemB'])
lp2 = self.panel.filter(['ItemC'])
joined = lp1.join(lp2)
self.assertEqual(len(joined.columns), 3)
self.assertRaises(Exception, lp1.join,
self.panel.filter(['ItemB', 'ItemC']))
def test_pivot(self):
from pandas.core.reshape import _slow_pivot
one, two, three = (np.array([1, 2, 3, 4, 5]),
np.array(['a', 'b', 'c', 'd', 'e']),
np.array([1, 2, 3, 5, 4.]))
df = pivot(one, two, three)
self.assertEqual(df['a'][1], 1)
self.assertEqual(df['b'][2], 2)
self.assertEqual(df['c'][3], 3)
self.assertEqual(df['d'][4], 5)
self.assertEqual(df['e'][5], 4)
assert_frame_equal(df, _slow_pivot(one, two, three))
# weird overlap, TODO: test?
a, b, c = (np.array([1, 2, 3, 4, 4]),
np.array(['a', 'a', 'a', 'a', 'a']),
np.array([1., 2., 3., 4., 5.]))
self.assertRaises(Exception, pivot, a, b, c)
# corner case, empty
df = pivot(np.array([]), np.array([]), np.array([]))
def test_monotonic():
pos = np.array([1, 2, 3, 5])
def _monotonic(arr):
return not (arr[1:] < arr[:-1]).any()
assert _monotonic(pos)
neg = np.array([1, 2, 3, 4, 3])
assert not _monotonic(neg)
neg2 = np.array([5, 1, 2, 3, 4, 5])
assert not _monotonic(neg2)
def test_panel_index():
index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3])
expected = MultiIndex.from_arrays([np.tile([1, 2, 3, 4], 3),
np.repeat([1, 2, 3], 4)])
assert(index.equals(expected))
def test_import_warnings():
# GH8152
panel = Panel(np.random.rand(3, 3, 3))
with assert_produces_warning():
panel.major_xs(1, copy=False)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
movmov/cc | vendor/Twisted-10.0.0/twisted/test/test_strports.py | 12 | 3002 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
from twisted.application import strports
from twisted.trial import unittest
class ParserTestCase(unittest.TestCase):
f = "Factory"
def testSimpleNumeric(self):
self.assertEqual(strports.parse('80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
def testSimpleTCP(self):
self.assertEqual(strports.parse('tcp:80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
def testInterfaceTCP(self):
self.assertEqual(strports.parse('tcp:80:interface=127.0.0.1', self.f),
('TCP', (80, self.f),
{'interface':'127.0.0.1', 'backlog':50}))
def testBacklogTCP(self):
self.assertEqual(strports.parse('tcp:80:backlog=6', self.f),
('TCP', (80, self.f),
{'interface':'', 'backlog':6}))
def test_simpleUNIX(self):
"""
L{strports.parse} returns a C{'UNIX'} port description with defaults
for C{'mode'}, C{'backlog'}, and C{'wantPID'} when passed a string with
the C{'unix:'} prefix and no other parameter values.
"""
self.assertEqual(
strports.parse('unix:/var/run/finger', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_modeUNIX(self):
"""
C{mode} can be set by including C{"mode=<some integer>"}.
"""
self.assertEqual(
strports.parse('unix:/var/run/finger:mode=0660', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0660, 'backlog': 50, 'wantPID': True}))
def test_wantPIDUNIX(self):
"""
C{wantPID} can be set to false by included C{"lockfile=0"}.
"""
self.assertEqual(
strports.parse('unix:/var/run/finger:lockfile=0', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': False}))
def testAllKeywords(self):
self.assertEqual(strports.parse('port=80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
def testEscape(self):
self.assertEqual(
strports.parse(r'unix:foo\:bar\=baz\:qux\\', self.f),
('UNIX', ('foo:bar=baz:qux\\', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def testImpliedEscape(self):
self.assertEqual(
strports.parse(r'unix:address=foo=bar', self.f),
('UNIX', ('foo=bar', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def testNonstandardDefault(self):
self.assertEqual(
strports.parse('filename', self.f, 'unix'),
('UNIX', ('filename', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
| apache-2.0 |
jhperales/gumpswd | filelock.py | 2 | 2500 | import os
import time
import errno
class FileLockException(Exception):
pass
class FileLock(object):
""" A file locking mechanism that has context-manager support so
you can use it in a with statement. This should be relatively cross
compatible as it doesn't rely on msvcrt or fcntl for the locking.
"""
def __init__(self, file_name, timeout=10, delay=.05):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
self.is_locked = False
self.lockfile = os.path.join(os.getcwd(), "%s.lock" % file_name)
self.file_name = file_name
self.timeout = timeout
self.delay = delay
def acquire(self):
""" Acquire the lock, if possible. If the lock is in use, it check again
every `wait` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.
"""
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR)
break;
except OSError as e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= self.timeout:
raise FileLockException("Timeout occured.")
time.sleep(self.delay)
self.is_locked = True
def release(self):
""" Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
if self.is_locked:
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
""" Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
if self.is_locked:
self.release()
def __del__(self):
""" Make sure that the FileLock instance doesn't leave a lockfile
lying around.
"""
self.release()
| mit |
duanhjlt/gyp | test/win/gyptest-macro-targetfilename.py | 61 | 1172 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure macro expansion of $(TargetFileName) is handled.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
if not (test.format == 'msvs' and
int(os.environ.get('GYP_MSVS_VERSION', 0)) == 2013):
CHDIR = 'vs-macros'
test.run_gyp('targetfilename.gyp', chdir=CHDIR)
test.build('targetfilename.gyp', test.ALL, chdir=CHDIR)
test.built_file_must_exist('test_targetfilename_executable.exe', chdir=CHDIR)
test.built_file_must_exist('test_targetfilename_loadable_module.dll',
chdir=CHDIR)
test.built_file_must_exist('test_targetfilename_shared_library.dll',
chdir=CHDIR)
test.built_file_must_exist('test_targetfilename_static_library.lib',
chdir=CHDIR)
test.built_file_must_exist('test_targetfilename_product_extension.foo',
chdir=CHDIR)
test.pass_test()
| bsd-3-clause |
pythonvietnam/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
flowersteam/explauto | explauto/sensorimotor_model/inverse/cmamodel.py | 2 | 2682 |
from .optimize import OptimizedInverseModel
from . import cma
class CMAESInverseModel(OptimizedInverseModel):
"""
An inverse model class using CMA-ES optimization routine,
on an error function computed from the forward model.
"""
name = 'CMAES'
desc = 'CMA-ES, Covariance Matrix Adaptation Evolution Strategy'
def __init__(self, dim_x=None, dim_y=None, fmodel=None, cmaes_sigma=0.05, maxfevals=20, seed=None, **kwargs):
self.cmaes_sigma = cmaes_sigma
self.maxfevals = maxfevals
self.seed = seed
OptimizedInverseModel.__init__(self, dim_x, dim_y, fmodel=fmodel, **kwargs)
def _setuplimits(self, constraints):
OptimizedInverseModel._setuplimits(self, constraints)
self.upper = list(c[1] for c in self.constraints)
self.lower = list(c[0] for c in self.constraints)
def infer_x(self, y):
"""Infer probable x from input y
@param y the desired output for infered x.
@return a list of probable x
"""
OptimizedInverseModel.infer_x(self, y)
if self.fmodel.size() == 0:
return self._random_x()
x_guesses = [self._guess_x_simple(y)[0]]
result = []
for xg in x_guesses:
res = cma.fmin(self._error, xg, self.cmaes_sigma,
options={'bounds':[self.lower, self.upper],
'verb_log':0,
'verb_disp':False,
'maxfevals':self.maxfevals,
'seed': self.seed})
result.append((res[1], res[0]))
return [xi for fi, xi in sorted(result)]
def infer_dims(self, x, y, dims_x, dims_y, dims_out):
"""Infer probable output from input x, y
"""
OptimizedInverseModel.infer_x(self, y)
assert len(x) == len(dims_x)
assert len(y) == len(dims_y)
if len(self.fmodel.dataset) == 0:
return [[0.0]*self.dim_out]
else:
_, index = self.fmodel.dataset.nn_dims(x, y, dims_x, dims_y, k=1)
guesses = [self.fmodel.dataset.get_dims(index[0], dims_out)]
result = []
for g in guesses:
res = cma.fmin(lambda q:self._error_dims(q, dims_x, dims_y, dims_out), g, self.cmaes_sigma,
options={'bounds':[self.lower, self.upper],
'verb_log':0,
'verb_disp':False,
'maxfevals':self.maxfevals,
'seed': self.seed})
result.append((res[1], res[0]))
return sorted(result)[0][1]
| gpl-3.0 |
MjAbuz/foundation | foundation/organisation/search_indexes.py | 1 | 1662 | from haystack import indexes
from .models import Person, Project, WorkingGroup, NetworkGroup
class PersonIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
twitter = indexes.CharField(model_attr='twitter')
url = indexes.CharField(model_attr='url')
def get_model(self):
return Person
def get_updated_field(self):
return 'updated_at'
class ProjectIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
twitter = indexes.CharField(model_attr='twitter')
homepage_url = indexes.CharField(model_attr='homepage_url')
mailinglist_url = indexes.CharField(model_attr='mailinglist_url')
sourcecode_url = indexes.CharField(model_attr='sourcecode_url')
def get_model(self):
return Project
def get_updated_field(self):
return 'updated_at'
class WorkingGroupIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
incubation = indexes.BooleanField(model_attr='incubation')
def get_model(self):
return WorkingGroup
def get_updated_field(self):
return 'updated_at'
class NetworkGroupIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
twitter = indexes.CharField(model_attr='twitter')
homepage_url = indexes.CharField(model_attr='homepage_url')
mailinglist_url = indexes.CharField(model_attr='mailinglist_url')
def get_model(self):
return NetworkGroup
def get_updated_field(self):
return 'updated_at'
| mit |
bobobox/ansible | lib/ansible/modules/cloud/cloudstack/cs_role.py | 38 | 6359 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_role
short_description: Manages user roles on Apache CloudStack based clouds.
description:
- Create, update, delete user roles.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the role.
required: true
id:
description:
- ID of the role.
- If provided, C(id) is used as key.
required: false
default: null
aliases: [ 'uuid' ]
role_type:
description:
- Type of the role.
- Only considered for creation.
required: false
default: User
choices: [ 'User', 'DomainAdmin', 'ResourceAdmin', 'Admin' ]
description:
description:
- Description of the role.
required: false
default: null
state:
description:
- State of the role.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure an user role is present
- local_action:
module: cs_role
name: myrole_user
# Ensure a role having particular ID is named as myrole_user
- local_action:
module: cs_role
name: myrole_user
id: 04589590-ac63-4ffc-93f5-b698b8ac38b6
# Ensure a role is absent
- local_action:
module: cs_role
name: myrole_user
state: absent
'''
RETURN = '''
---
id:
description: UUID of the role.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the role.
returned: success
type: string
sample: myrole
description:
description: Description of the role.
returned: success
type: string
sample: "This is my role description"
role_type:
description: Type of the role.
returned: success
type: string
sample: User
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, CloudStackException, cs_argument_spec, cs_required_together
class AnsibleCloudStackRole(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRole, self).__init__(module)
self.returns = {
'type': 'role_type',
}
def get_role(self):
uuid = self.module.params.get('uuid')
if uuid:
args = {
'id': uuid,
}
roles = self.cs.listRoles(**args)
if roles:
return roles['role'][0]
else:
args = {
'name': self.module.params.get('name'),
}
roles = self.cs.listRoles(**args)
if roles:
return roles['role'][0]
return None
def present_role(self):
role = self.get_role()
if role:
role = self._update_role(role)
else:
role = self._create_role(role)
return role
def _create_role(self, role):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.module.params.get('role_type'),
'description': self.module.params.get('description'),
}
if not self.module.check_mode:
res = self.cs.createRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
role = res['role']
return role
def _update_role(self, role):
args = {
'id': role['id'],
'name': self.module.params.get('name'),
'description': self.module.params.get('description'),
}
if self.has_changed(args, role):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
# The API as in 4.9 does not return an updated role yet
if 'role' not in res:
role = self.get_role()
else:
role = res['role']
return role
def absent_role(self):
role = self.get_role()
if role:
self.result['changed'] = True
args = {
'id': role['id'],
}
if not self.module.check_mode:
res = self.cs.deleteRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return role
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
uuid=dict(default=None, aliases=['id']),
name=dict(required=True),
description=dict(default=None),
role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_role = AnsibleCloudStackRole(module)
state = module.params.get('state')
if state == 'absent':
role = acs_role.absent_role()
else:
role = acs_role.present_role()
result = acs_role.get_result(role)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
saukrIppl/seahub | thirdpart/registration/tests/models.py | 7 | 9730 | import datetime
import hashlib
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core import management
from django.test import TestCase
from registration.models import RegistrationProfile
class RegistrationModelTests(TestCase):
"""
Test the model and manager used in the default backend.
"""
user_info = {'username': 'alice',
'password': 'swordfish',
'email': '[email protected]'}
def setUp(self):
self.old_activation = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', None)
settings.ACCOUNT_ACTIVATION_DAYS = 7
def tearDown(self):
settings.ACCOUNT_ACTIVATION_DAYS = self.old_activation
def test_profile_creation(self):
"""
Creating a registration profile for a user populates the
profile with the correct user and a SHA1 hash to use as
activation key.
"""
new_user = User.objects.create_user(**self.user_info)
profile = RegistrationProfile.objects.create_profile(new_user)
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(profile.user.id, new_user.id)
self.failUnless(re.match('^[a-f0-9]{40}$', profile.activation_key))
self.assertEqual(unicode(profile),
"Registration information for alice")
def test_activation_email(self):
"""
``RegistrationProfile.send_activation_email`` sends an
email.
"""
new_user = User.objects.create_user(**self.user_info)
profile = RegistrationProfile.objects.create_profile(new_user)
profile.send_activation_email(Site.objects.get_current())
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.user_info['email']])
def test_user_creation(self):
"""
Creating a new user populates the correct data, and sets the
user's account inactive.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
self.assertEqual(new_user.username, 'alice')
self.assertEqual(new_user.email, '[email protected]')
self.failUnless(new_user.check_password('swordfish'))
self.failIf(new_user.is_active)
def test_user_creation_email(self):
"""
By default, creating a new user sends an activation email.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
self.assertEqual(len(mail.outbox), 1)
def test_user_creation_no_email(self):
"""
Passing ``send_email=False`` when creating a new user will not
send an activation email.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
send_email=False,
**self.user_info)
self.assertEqual(len(mail.outbox), 0)
def test_unexpired_account(self):
"""
``RegistrationProfile.activation_key_expired()`` is ``False``
within the activation window.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
profile = RegistrationProfile.objects.get(user=new_user)
self.failIf(profile.activation_key_expired())
def test_expired_account(self):
"""
``RegistrationProfile.activation_key_expired()`` is ``True``
outside the activation window.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
new_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
new_user.save()
profile = RegistrationProfile.objects.get(user=new_user)
self.failUnless(profile.activation_key_expired())
def test_valid_activation(self):
"""
Activating a user within the permitted window makes the
account active, and resets the activation key.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
profile = RegistrationProfile.objects.get(user=new_user)
activated = RegistrationProfile.objects.activate_user(profile.activation_key)
self.failUnless(isinstance(activated, User))
self.assertEqual(activated.id, new_user.id)
self.failUnless(activated.is_active)
profile = RegistrationProfile.objects.get(user=new_user)
self.assertEqual(profile.activation_key, RegistrationProfile.ACTIVATED)
def test_expired_activation(self):
"""
Attempting to activate outside the permitted window does not
activate the account.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
new_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
new_user.save()
profile = RegistrationProfile.objects.get(user=new_user)
activated = RegistrationProfile.objects.activate_user(profile.activation_key)
self.failIf(isinstance(activated, User))
self.failIf(activated)
new_user = User.objects.get(username='alice')
self.failIf(new_user.is_active)
profile = RegistrationProfile.objects.get(user=new_user)
self.assertNotEqual(profile.activation_key, RegistrationProfile.ACTIVATED)
def test_activation_invalid_key(self):
"""
Attempting to activate with a key which is not a SHA1 hash
fails.
"""
self.failIf(RegistrationProfile.objects.activate_user('foo'))
def test_activation_already_activated(self):
"""
Attempting to re-activate an already-activated account fails.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
profile = RegistrationProfile.objects.get(user=new_user)
RegistrationProfile.objects.activate_user(profile.activation_key)
profile = RegistrationProfile.objects.get(user=new_user)
self.failIf(RegistrationProfile.objects.activate_user(profile.activation_key))
def test_activation_nonexistent_key(self):
"""
Attempting to activate with a non-existent key (i.e., one not
associated with any account) fails.
"""
# Due to the way activation keys are constructed during
# registration, this will never be a valid key.
invalid_key = hashlib.sha1('foo').hexdigest()
self.failIf(RegistrationProfile.objects.activate_user(invalid_key))
def test_expired_user_deletion(self):
"""
``RegistrationProfile.objects.delete_expired_users()`` only
deletes inactive users whose activation window has expired.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
expired_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
username='bob',
password='secret',
email='[email protected]')
expired_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
expired_user.save()
RegistrationProfile.objects.delete_expired_users()
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertRaises(User.DoesNotExist, User.objects.get, username='bob')
def test_management_command(self):
"""
The ``cleanupregistration`` management command properly
deletes expired accounts.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
expired_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
username='bob',
password='secret',
email='[email protected]')
expired_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
expired_user.save()
management.call_command('cleanupregistration')
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertRaises(User.DoesNotExist, User.objects.get, username='bob')
| apache-2.0 |
therandomcode/Fanalytics | lib/werkzeug/posixemulation.py | 364 | 3519 | # -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import errno
import time
import random
from ._compat import to_unicode
from .filesystem import get_filesystem_encoding
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
src = to_unicode(src, get_filesystem_encoding())
dst = to_unicode(dst, get_filesystem_encoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
| apache-2.0 |
terhorstd/nest-simulator | pynest/nest/tests/test_create.py | 2 | 3250 | # -*- coding: utf-8 -*-
#
# test_create.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Creation tests
"""
import unittest
import warnings
import nest
@nest.ll_api.check_stack
class CreateTestCase(unittest.TestCase):
"""Creation tests"""
def test_ModelCreate(self):
"""Model Creation"""
nest.ResetKernel()
for model in nest.Models(mtype='nodes'):
node = nest.Create(model)
self.assertGreater(node[0], 0)
def test_ModelCreateN(self):
"""Model Creation with N"""
nest.ResetKernel()
num_nodes = 10
for model in nest.Models(mtype='nodes'):
nodes = nest.Create(model, num_nodes)
self.assertEqual(len(nodes), num_nodes)
def test_ModelCreateNdict(self):
"""Model Creation with N and dict"""
nest.ResetKernel()
num_nodes = 10
voltage = 12.0
n = nest.Create('iaf_psc_alpha', num_nodes, {'V_m': voltage})
self.assertEqual(nest.GetStatus(n, 'V_m'), (voltage, ) * num_nodes)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertRaises(TypeError, nest.Create,
'iaf_psc_alpha', 10, tuple())
self.assertTrue(issubclass(w[-1].category, UserWarning))
def test_ModelDicts(self):
"""IAF Creation with N and dicts"""
nest.ResetKernel()
num_nodes = 10
V_m = (0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
n = nest.Create('iaf_psc_alpha', num_nodes, [{'V_m': v} for v in V_m])
self.assertEqual(nest.GetStatus(n, 'V_m'), V_m)
def test_CopyModel(self):
"""CopyModel"""
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'new_neuron', {'V_m': 10.0})
vm = nest.GetDefaults('new_neuron')['V_m']
self.assertEqual(vm, 10.0)
n = nest.Create('new_neuron', 10)
vm = nest.GetStatus([n[0]])[0]['V_m']
self.assertEqual(vm, 10.0)
nest.CopyModel('static_synapse', 'new_synapse', {'weight': 10.})
nest.Connect([n[0]], [n[1]], syn_spec='new_synapse')
w = nest.GetDefaults('new_synapse')['weight']
self.assertEqual(w, 10.0)
self.assertRaisesRegex(
nest.kernel.NESTError, "NewModelNameExists",
nest.CopyModel, 'iaf_psc_alpha', 'new_neuron')
def suite():
suite = unittest.makeSuite(CreateTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
PLyczkowski/Sticky-Keymap | 2.74/python/lib/site-packages/numpy/doc/broadcasting.py | 231 | 5565 | """
========================
Broadcasting over arrays
========================
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
have compatible shapes. Broadcasting provides a means of vectorizing
array operations so that looping occurs in C instead of Python. It does
this without making needless copies of data and usually leads to
efficient algorithm implementations. There are, however, cases where
broadcasting is a bad idea because it leads to inefficient use of memory
that slows computation.
NumPy operations are usually done on pairs of arrays on an
element-by-element basis. In the simplest case, the two arrays must
have exactly the same shape, as in the following example:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = np.array([2.0, 2.0, 2.0])
>>> a * b
array([ 2., 4., 6.])
NumPy's broadcasting rule relaxes this constraint when the arrays'
shapes meet certain constraints. The simplest broadcasting example occurs
when an array and a scalar value are combined in an operation:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = 2.0
>>> a * b
array([ 2., 4., 6.])
The result is equivalent to the previous example where ``b`` was an array.
We can think of the scalar ``b`` being *stretched* during the arithmetic
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
without actually making copies, so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
because broadcasting moves less memory around during the multiplication
(``b`` is a scalar rather than an array).
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
It starts with the trailing dimensions, and works its way forward. Two
dimensions are compatible when
1) they are equal, or
2) one of them is 1
If these conditions are not met, a
``ValueError: frames are not aligned`` exception is thrown, indicating that
the arrays have incompatible shapes. The size of the resulting array
is the maximum size along each dimension of the input arrays.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
each color in the image by a different value, you can multiply the image
by a one-dimensional array with 3 values. Lining up the sizes of the
trailing axes of these arrays according to the broadcast rules, shows that
they are compatible::
Image (3d array): 256 x 256 x 3
Scale (1d array): 3
Result (3d array): 256 x 256 x 3
When either of the dimensions compared is one, the other is
used. In other words, dimensions with size 1 are stretched or "copied"
to match the other.
In the following example, both the ``A`` and ``B`` arrays have axes with
length one that are expanded to a larger size during the broadcast
operation::
A (4d array): 8 x 1 x 6 x 1
B (3d array): 7 x 1 x 5
Result (4d array): 8 x 7 x 6 x 5
Here are some more examples::
A (2d array): 5 x 4
B (1d array): 1
Result (2d array): 5 x 4
A (2d array): 5 x 4
B (1d array): 4
Result (2d array): 5 x 4
A (3d array): 15 x 3 x 5
B (3d array): 15 x 1 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 1
Result (3d array): 15 x 3 x 5
Here are examples of shapes that do not broadcast::
A (1d array): 3
B (1d array): 4 # trailing dimensions do not match
A (2d array): 2 x 1
B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
An example of broadcasting in practice::
>>> x = np.arange(4)
>>> xx = x.reshape(4,1)
>>> y = np.ones(5)
>>> z = np.ones((3,4))
>>> x.shape
(4,)
>>> y.shape
(5,)
>>> x + y
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape
>>> xx.shape
(4, 1)
>>> y.shape
(5,)
>>> (xx + y).shape
(4, 5)
>>> xx + y
array([[ 1., 1., 1., 1., 1.],
[ 2., 2., 2., 2., 2.],
[ 3., 3., 3., 3., 3.],
[ 4., 4., 4., 4., 4.]])
>>> x.shape
(4,)
>>> z.shape
(3, 4)
>>> (x + z).shape
(3, 4)
>>> x + z
array([[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.]])
Broadcasting provides a convenient way of taking the outer product (or
any other outer operation) of two arrays. The following example shows an
outer addition operation of two 1-d arrays::
>>> a = np.array([0.0, 10.0, 20.0, 30.0])
>>> b = np.array([1.0, 2.0, 3.0])
>>> a[:, np.newaxis] + b
array([[ 1., 2., 3.],
[ 11., 12., 13.],
[ 21., 22., 23.],
[ 31., 32., 33.]])
Here the ``newaxis`` index operator inserts a new axis into ``a``,
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
See `this article <http://wiki.scipy.org/EricsBroadcastingDoc>`_
for illustrations of broadcasting concepts.
"""
from __future__ import division, absolute_import, print_function
| gpl-2.0 |
nikitabrazhnik/flask2 | Module 3/Chapter09/chapter_9/webapp/controllers/rest/parsers.py | 11 | 1408 | from flask.ext.restful import reqparse
user_post_parser = reqparse.RequestParser()
user_post_parser.add_argument('username', type=str, required=True)
user_post_parser.add_argument('password', type=str, required=True)
post_get_parser = reqparse.RequestParser()
post_get_parser.add_argument('page', type=int, location=['args', 'headers'])
post_get_parser.add_argument('user', type=str, location=['args', 'headers'])
post_post_parser = reqparse.RequestParser()
post_post_parser.add_argument(
'token',
type=str,
required=True,
help="Auth Token is required to edit posts"
)
post_post_parser.add_argument(
'title',
type=str,
required=True,
help="Title is required"
)
post_post_parser.add_argument(
'text',
type=str,
required=True,
help="Body text is required"
)
post_post_parser.add_argument(
'tags',
type=str,
action='append'
)
post_put_parser = reqparse.RequestParser()
post_put_parser.add_argument(
'token',
type=str,
required=True,
help="Auth Token is required to create posts"
)
post_put_parser.add_argument(
'title',
type=str
)
post_put_parser.add_argument(
'text',
type=str
)
post_put_parser.add_argument(
'tags',
type=str
)
post_delete_parser = reqparse.RequestParser()
post_delete_parser.add_argument(
'token',
type=str,
required=True,
help="Auth Token is required to delete posts"
)
| mit |
nortikin/sverchok | nodes/surface/gordon_surface.py | 1 | 5334 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import numpy as np
import bpy
from bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level, throttle_and_update_node, repeat_last_for_length
from sverchok.utils.math import supported_metrics
from sverchok.utils.nurbs_common import SvNurbsMaths
from sverchok.utils.curve.core import SvCurve
from sverchok.utils.curve.nurbs import SvNurbsCurve
from sverchok.utils.surface.gordon import gordon_surface
class SvGordonSurfaceNode(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: NURBS Gordon Surface Curves Net
Tooltip: Generate a NURBS surface from a net of curves (a.k.a. Gordon Surface)
"""
bl_idname = 'SvGordonSurfaceNode'
bl_label = 'NURBS Surface from Curves Net'
bl_icon = 'GP_MULTIFRAME_EDITING'
sv_icon = 'SV_SURFACE_FROM_CURVES'
metric: EnumProperty(name='Metric',
description = "Knot mode",
default="POINTS", items=supported_metrics,
update=updateNode)
@throttle_and_update_node
def update_sockets(self, context):
self.inputs['T1'].hide_safe = self.explicit_t_values != True
self.inputs['T2'].hide_safe = self.explicit_t_values != True
explicit_t_values : BoolProperty(
name = "Explicit T values",
default = False,
update = update_sockets)
knotvector_accuracy : IntProperty(
name = "Knotvector accuracy",
default = 4,
min = 1, max = 10,
update = updateNode)
def draw_buttons(self, context, layout):
layout.prop(self, 'explicit_t_values')
def draw_buttons_ext(self, context, layout):
if not self.explicit_t_values:
layout.prop(self, 'metric')
layout.prop(self, 'knotvector_accuracy')
def sv_init(self, context):
self.inputs.new('SvCurveSocket', "CurvesU")
self.inputs.new('SvCurveSocket', "CurvesV")
self.inputs.new('SvStringsSocket', "T1")
self.inputs.new('SvStringsSocket', "T2")
self.inputs.new('SvVerticesSocket', "Intersections")
self.outputs.new('SvSurfaceSocket', "Surface")
self.update_sockets(context)
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
u_curves_s = self.inputs['CurvesU'].sv_get()
v_curves_s = self.inputs['CurvesV'].sv_get()
intersections_s = self.inputs['Intersections'].sv_get()
if self.explicit_t_values:
t1_s = self.inputs['T1'].sv_get()
t2_s = self.inputs['T2'].sv_get()
else:
t1_s = [[[]]]
t2_s = [[[]]]
u_curves_s = ensure_nesting_level(u_curves_s, 2, data_types=(SvCurve,))
v_curves_s = ensure_nesting_level(v_curves_s, 2, data_types=(SvCurve,))
t1_s = ensure_nesting_level(t1_s, 3)
t2_s = ensure_nesting_level(t2_s, 3)
intersections_s = ensure_nesting_level(intersections_s, 4)
surface_out = []
for u_curves, v_curves, t1s, t2s, intersections in zip_long_repeat(u_curves_s, v_curves_s, t1_s, t2_s, intersections_s):
u_curves = [SvNurbsCurve.to_nurbs(c) for c in u_curves]
if any(c is None for c in u_curves):
raise Exception("Some of U curves are not NURBS!")
v_curves = [SvNurbsCurve.to_nurbs(c) for c in v_curves]
if any(c is None for c in v_curves):
raise Exception("Some of V curves are not NURBS!")
if self.explicit_t_values:
if len(t1s) < len(u_curves):
t1s = repeat_last_for_length(t1s, len(u_curves))
elif len(t1s) > len(u_curves):
raise Exception(f"Number of items in T1 input {len(t1s)} > number of U-curves {len(u_curves)}")
if len(t1s[0]) != len(v_curves):
raise Exception(f"Length of items in T1 input {len(t1s[0])} != number of V-curves {len(v_curves)}")
if len(t2s) < len(v_curves):
t2s = repeat_last_for_length(t2s, len(v_curves))
elif len(t2s) > len(v_curves):
raise Exception(f"Number of items in T2 input {len(t2s)} > number of V-curves {len(v_curves)}")
if len(t2s[0]) != len(u_curves):
raise Exception(f"Length of items in T2 input {len(t2s[0])} != number of U-curves {len(u_curves)}")
if self.explicit_t_values:
kwargs = {'u_knots': np.array(t1s), 'v_knots': np.array(t2s)}
else:
kwargs = dict()
_, _, _, surface = gordon_surface(u_curves, v_curves, intersections, metric=self.metric, knotvector_accuracy = self.knotvector_accuracy, **kwargs)
surface_out.append(surface)
self.outputs['Surface'].sv_set(surface_out)
def register():
bpy.utils.register_class(SvGordonSurfaceNode)
def unregister():
bpy.utils.unregister_class(SvGordonSurfaceNode)
| gpl-3.0 |
cython-testbed/pandas | pandas/tseries/offsets.py | 1 | 81716 | # -*- coding: utf-8 -*-
from datetime import date, datetime, timedelta
import functools
import operator
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.core.dtypes.generic import ABCPeriod
from pandas.core.tools.datetimes import to_datetime
import pandas.core.common as com
# import after tools, dateutil check
from dateutil.easter import easter
from pandas._libs import tslibs, Timestamp, OutOfBoundsDatetime, Timedelta
from pandas.util._decorators import cache_readonly
from pandas._libs.tslibs import (
ccalendar, conversion,
frequencies as libfrequencies)
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import (
ApplyTypeError,
as_datetime, _is_normalized,
_get_calendar, _to_dt64,
apply_index_wraps,
roll_yearday,
shift_month,
BaseOffset)
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd', 'CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'BusinessHour', 'CustomBusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset', 'CalendarDay']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to
# pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslibs.NaT:
return tslibs.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = conversion.localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = conversion.tz_convert_single(
result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = conversion.localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = tslibs.normalize_date(result)
if tz is not None and result.tzinfo is None:
result = conversion.localize_pydatetime(result, tz)
return result
return wrapper
# ---------------------------------------------------------------------
# DateOffset
class DateOffset(BaseOffset):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus pseudo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negative number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
Parameters
----------
n : int, default 1
The number of time periods the offset represents.
normalize : bool, default False
Whether to round the result of a DateOffset addition down to the
previous midnight.
**kwds
Temporal parameter that add to or replace the offset value.
Parameters that **add** to the offset (like Timedelta):
- years
- months
- weeks
- days
- hours
- minutes
- seconds
- microseconds
- nanoseconds
Parameters that **replace** the offset value:
- year
- month
- day
- weekday
- hour
- minute
- second
- microsecond
- nanosecond
See Also
--------
dateutil.relativedelta.relativedelta
Examples
--------
>>> ts = pd.Timestamp('2017-01-01 09:10:11')
>>> ts + DateOffset(months=3)
Timestamp('2017-04-01 09:10:11')
>>> ts = pd.Timestamp('2017-01-01 09:10:11')
>>> ts + DateOffset(month=3)
Timestamp('2017-03-01 09:10:11')
"""
_params = cache_readonly(BaseOffset._params.fget)
_use_relativedelta = False
_adjust_dst = False
_attributes = frozenset(['n', 'normalize'] +
list(liboffsets.relativedelta_kwds))
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
BaseOffset.__init__(self, n, normalize)
off, use_rd = liboffsets._determine_offset(kwds)
object.__setattr__(self, "_offset", off)
object.__setattr__(self, "_use_relativedelta", use_rd)
for key in kwds:
val = kwds[key]
object.__setattr__(self, key, val)
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = conversion.localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if type(self) is not DateOffset:
raise NotImplementedError("DateOffset subclass {name} "
"does not have a vectorized "
"implementation".format(
name=self.__class__.__name__))
kwds = self.kwds
relativedelta_fast = {'years', 'months', 'weeks', 'days', 'hours',
'minutes', 'seconds', 'microseconds'}
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(kwds).issubset(relativedelta_fast)):
months = ((kwds.get('years', 0) * 12 +
kwds.get('months', 0)) * self.n)
if months:
shifted = liboffsets.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + \
i.to_perioddelta('W')
timedelta_kwds = {k: v for k, v in kwds.items()
if k in ['days', 'hours', 'minutes',
'seconds', 'microseconds']}
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
kwd = set(kwds) - relativedelta_fast
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) {kwd} not able to be "
"applied vectorized".format(kwd=kwd))
def isAnchored(self):
# TODO: Does this make sense for the general case? It would help
# if there were a canonical docstring for what isAnchored means.
return (self.n == 1)
# TODO: Combine this with BusinessMixin version by defining a whitelisted
# set of attributes on each object rather than the existing behavior of
# iterating over internal ``__dict__``
def _repr_attrs(self):
exclude = {'n', 'inc', 'normalize'}
attrs = []
for attr in sorted(self.__dict__):
if attr.startswith('_') or attr == 'kwds':
continue
elif attr not in exclude:
value = getattr(self, attr)
attrs.append('{attr}={value}'.format(attr=attr, value=value))
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
@property
def name(self):
return self.rule_code
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@cache_readonly
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '{n}{code}'.format(n=self.n, code=code)
else:
fstr = code
try:
if self._offset:
fstr += self._offset_str()
except AttributeError:
# TODO: standardize `_offset` vs `offset` naming convention
pass
return fstr
def _offset_str(self):
return ''
@property
def nanos(self):
raise ValueError("{name} is a non-fixed frequency".format(name=self))
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix {suffix}".format(suffix=suffix))
return cls()
class _CustomMixin(object):
"""
Mixin for classes that define and validate calendar, holidays,
and weekdays attributes
"""
def __init__(self, weekmask, holidays, calendar):
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# Custom offset instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
object.__setattr__(self, "weekmask", weekmask)
object.__setattr__(self, "holidays", holidays)
object.__setattr__(self, "calendar", calendar)
class BusinessMixin(object):
""" Mixin to business types to provide related functions """
@property
def offset(self):
"""Alias for self._offset"""
# Alias for backward compat
return self._offset
def _repr_attrs(self):
if self.offset:
attrs = ['offset={offset!r}'.format(offset=self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'offset'])
def __init__(self, n=1, normalize=False, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
wday = other.weekday()
# avoid slowness below by operating on weeks first
weeks = n // 5
if n <= 0 and wday > 4:
# roll forward
n += 1
n -= 5 * weeks
# n is always >= 0 at this point
if n == 0 and wday > 4:
# roll back
days = 4 - wday
elif wday > 4:
# roll forward
days = (7 - wday) + (n - 1)
elif wday + n <= 4:
# shift by n days without leaving the current week
days = n
else:
# shift by n days plus 2 to get past the weekend
days = n + 2
result = other + timedelta(days=7 * weeks + days)
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHourMixin(BusinessMixin):
def __init__(self, start='09:00', end='17:00', offset=timedelta(0)):
# must be validated here to equality check
start = liboffsets._validate_business_time(start)
object.__setattr__(self, "start", start)
end = liboffsets._validate_business_time(end)
object.__setattr__(self, "end", end)
object.__setattr__(self, "_offset", offset)
@cache_readonly
def next_bday(self):
"""used for moving to next businessday"""
if self.n >= 0:
nb_offset = 1
else:
nb_offset = -1
if self._prefix.startswith('C'):
# CustomBusinessHour
return CustomBusinessDay(n=nb_offset,
weekmask=self.weekmask,
holidays=self.holidays,
calendar=self.calendar)
else:
return BusinessDay(n=nb_offset)
@cache_readonly
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
@cache_readonly
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag:
# create dummy datetime to calculate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return (until - dtstart).total_seconds()
else:
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return (until - dtstart).total_seconds()
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec
if self.n >= 0:
dt = self._prev_opening_time(
dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(
dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
daytime = self._get_daytime_flag
businesshours = self._get_business_hours_by_sec
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight business hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or
self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calculate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(
result - timedelta(seconds=1)) + bhdelta
return result
else:
# TODO: Figure out the end of this sente
raise ApplyTypeError(
'Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calculated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = (dt - op).total_seconds()
if span <= businesshours:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHourMixin, self)._repr_attrs()
start = self.start.strftime('%H:%M')
end = self.end.strftime('%H:%M')
attrs = ['{prefix}={start}-{end}'.format(prefix=self._prefix,
start=start, end=end)]
out += ': ' + ', '.join(attrs)
return out
class BusinessHour(BusinessHourMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded:: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
_attributes = frozenset(['n', 'normalize', 'start', 'end', 'offset'])
def __init__(self, n=1, normalize=False, start='09:00',
end='17:00', offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
super(BusinessHour, self).__init__(start=start, end=end, offset=offset)
class CustomBusinessDay(_CustomMixin, BusinessDay):
"""
DateOffset subclass representing possibly n custom business days,
excluding holidays
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'C'
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar', 'offset'])
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = _to_dt64(dt, 'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class CustomBusinessHour(_CustomMixin, BusinessHourMixin,
SingleConstructorOffset):
"""
DateOffset subclass representing possibly n custom business days
.. versionadded:: 0.18.1
"""
_prefix = 'CBH'
_anchor = 0
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar',
'start', 'end', 'offset'])
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None,
start='09:00', end='17:00', offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
BusinessHourMixin.__init__(self, start=start, end=end, offset=offset)
# ---------------------------------------------------------------------
# Month-Based Offset Classes
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
_attributes = frozenset(['n', 'normalize'])
__init__ = BaseOffset.__init__
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
month = ccalendar.MONTH_ALIASES[self.n]
return "{code}-{month}".format(code=self.rule_code,
month=month)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == self._get_offset_day(dt)
@apply_wraps
def apply(self, other):
compare_day = self._get_offset_day(other)
n = liboffsets.roll_convention(other.day, self.n, compare_day)
return shift_month(other, n, self._day_opt)
@apply_index_wraps
def apply_index(self, i):
shifted = liboffsets.shift_months(i.asi8, self.n, self._day_opt)
return i._shallow_copy(shifted)
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
_prefix = 'M'
_day_opt = 'end'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
_prefix = 'MS'
_day_opt = 'start'
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
_prefix = 'BM'
_day_opt = 'business_end'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
_prefix = 'BMS'
_day_opt = 'business_start'
class _CustomBusinessMonth(_CustomMixin, BusinessMixin, MonthOffset):
"""
DateOffset subclass representing one custom business month, incrementing
between [BEGIN/END] of month dates
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar', 'offset'])
onOffset = DateOffset.onOffset # override MonthOffset method
apply_index = DateOffset.apply_index # override MonthOffset method
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@cache_readonly
def cbday_roll(self):
"""Define default roll function to be called in apply method"""
cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds)
if self._prefix.endswith('S'):
# MonthBegin
roll_func = cbday.rollforward
else:
# MonthEnd
roll_func = cbday.rollback
return roll_func
@cache_readonly
def m_offset(self):
if self._prefix.endswith('S'):
# MonthBegin
moff = MonthBegin(n=1, normalize=False)
else:
# MonthEnd
moff = MonthEnd(n=1, normalize=False)
return moff
@cache_readonly
def month_roll(self):
"""Define default roll function to be called in apply method"""
if self._prefix.endswith('S'):
# MonthBegin
roll_func = self.m_offset.rollback
else:
# MonthEnd
roll_func = self.m_offset.rollforward
return roll_func
@apply_wraps
def apply(self, other):
# First move to month offset
cur_month_offset_date = self.month_roll(other)
# Find this custom month offset
compare_date = self.cbday_roll(cur_month_offset_date)
n = liboffsets.roll_convention(other.day, self.n, compare_date.day)
new = cur_month_offset_date + n * self.m_offset
result = self.cbday_roll(new)
return result
class CustomBusinessMonthEnd(_CustomBusinessMonth):
# TODO(py27): Replace condition with Subsitution after dropping Py27
if _CustomBusinessMonth.__doc__:
__doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end')
_prefix = 'CBM'
class CustomBusinessMonthBegin(_CustomBusinessMonth):
# TODO(py27): Replace condition with Subsitution after dropping Py27
if _CustomBusinessMonth.__doc__:
__doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]',
'beginning')
_prefix = 'CBMS'
# ---------------------------------------------------------------------
# Semi-Month Based Offset Classes
class SemiMonthOffset(DateOffset):
_adjust_dst = True
_default_day_of_month = 15
_min_day_of_month = 2
_attributes = frozenset(['n', 'normalize', 'day_of_month'])
def __init__(self, n=1, normalize=False, day_of_month=None):
BaseOffset.__init__(self, n, normalize)
if day_of_month is None:
object.__setattr__(self, "day_of_month",
self._default_day_of_month)
else:
object.__setattr__(self, "day_of_month", int(day_of_month))
if not self._min_day_of_month <= self.day_of_month <= 27:
msg = 'day_of_month must be {min}<=day_of_month<=27, got {day}'
raise ValueError(msg.format(min=self._min_day_of_month,
day=self.day_of_month))
@classmethod
def _from_name(cls, suffix=None):
return cls(day_of_month=suffix)
@property
def rule_code(self):
suffix = '-{day_of_month}'.format(day_of_month=self.day_of_month)
return self._prefix + suffix
@apply_wraps
def apply(self, other):
# shift `other` to self.day_of_month, incrementing `n` if necessary
n = liboffsets.roll_convention(other.day, self.n, self.day_of_month)
days_in_month = ccalendar.get_days_in_month(other.year, other.month)
# For SemiMonthBegin on other.day == 1 and
# SemiMonthEnd on other.day == days_in_month,
# shifting `other` to `self.day_of_month` _always_ requires
# incrementing/decrementing `n`, regardless of whether it is
# initially positive.
if type(self) is SemiMonthBegin and (self.n <= 0 and other.day == 1):
n -= 1
elif type(self) is SemiMonthEnd and (self.n > 0 and
other.day == days_in_month):
n += 1
return self._apply(n, other)
def _apply(self, n, other):
"""Handle specific apply logic for child classes"""
raise com.AbstractMethodError(self)
@apply_index_wraps
def apply_index(self, i):
# determine how many days away from the 1st of the month we are
days_from_start = i.to_perioddelta('M').asi8
delta = Timedelta(days=self.day_of_month - 1).value
# get boolean array for each element before the day_of_month
before_day_of_month = days_from_start < delta
# get boolean array for each element after the day_of_month
after_day_of_month = days_from_start > delta
# determine the correct n for each date in i
roll = self._get_roll(i, before_day_of_month, after_day_of_month)
# isolate the time since it will be striped away one the next line
time = i.to_perioddelta('D')
# apply the correct number of months
i = (i.to_period('M') + (roll // 2)).to_timestamp()
# apply the correct day
i = self._apply_index_days(i, roll)
return i + time
def _get_roll(self, i, before_day_of_month, after_day_of_month):
"""Return an array with the correct n for each date in i.
The roll array is based on the fact that i gets rolled back to
the first day of the month.
"""
raise com.AbstractMethodError(self)
def _apply_index_days(self, i, roll):
"""Apply the correct day for each date in i"""
raise com.AbstractMethodError(self)
class SemiMonthEnd(SemiMonthOffset):
"""
Two DateOffset's per month repeating on the last
day of the month and day_of_month.
.. versionadded:: 0.19.0
Parameters
----------
n: int
normalize : bool, default False
day_of_month: int, {1, 3,...,27}, default 15
"""
_prefix = 'SM'
_min_day_of_month = 1
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = ccalendar.get_days_in_month(dt.year, dt.month)
return dt.day in (self.day_of_month, days_in_month)
def _apply(self, n, other):
months = n // 2
day = 31 if n % 2 else self.day_of_month
return shift_month(other, months, day)
def _get_roll(self, i, before_day_of_month, after_day_of_month):
n = self.n
is_month_end = i.is_month_end
if n > 0:
roll_end = np.where(is_month_end, 1, 0)
roll_before = np.where(before_day_of_month, n, n + 1)
roll = roll_end + roll_before
elif n == 0:
roll_after = np.where(after_day_of_month, 2, 0)
roll_before = np.where(~after_day_of_month, 1, 0)
roll = roll_before + roll_after
else:
roll = np.where(after_day_of_month, n + 2, n + 1)
return roll
def _apply_index_days(self, i, roll):
"""Add days portion of offset to DatetimeIndex i
Parameters
----------
i : DatetimeIndex
roll : ndarray[int64_t]
Returns
-------
result : DatetimeIndex
"""
nanos = (roll % 2) * Timedelta(days=self.day_of_month).value
i += nanos.astype('timedelta64[ns]')
return i + Timedelta(days=-1)
class SemiMonthBegin(SemiMonthOffset):
"""
Two DateOffset's per month repeating on the first
day of the month and day_of_month.
.. versionadded:: 0.19.0
Parameters
----------
n: int
normalize : bool, default False
day_of_month: int, {2, 3,...,27}, default 15
"""
_prefix = 'SMS'
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day in (1, self.day_of_month)
def _apply(self, n, other):
months = n // 2 + n % 2
day = 1 if n % 2 else self.day_of_month
return shift_month(other, months, day)
def _get_roll(self, i, before_day_of_month, after_day_of_month):
n = self.n
is_month_start = i.is_month_start
if n > 0:
roll = np.where(before_day_of_month, n, n + 1)
elif n == 0:
roll_start = np.where(is_month_start, 0, 1)
roll_after = np.where(after_day_of_month, 1, 0)
roll = roll_start + roll_after
else:
roll_after = np.where(after_day_of_month, n + 2, n + 1)
roll_start = np.where(is_month_start, -1, 0)
roll = roll_after + roll_start
return roll
def _apply_index_days(self, i, roll):
"""Add days portion of offset to DatetimeIndex i
Parameters
----------
i : DatetimeIndex
roll : ndarray[int64_t]
Returns
-------
result : DatetimeIndex
"""
nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value
return i + nanos.astype('timedelta64[ns]')
# ---------------------------------------------------------------------
# Week-Based Offset Classes
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
_inc = timedelta(weeks=1)
_prefix = 'W'
_attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=None):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
if self.weekday is None:
return other + self.n * self._inc
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
if k > 0:
k -= 1
return other + timedelta(weeks=k)
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return ((i.to_period('W') + self.n).to_timestamp() +
i.to_perioddelta('W'))
else:
return self._end_apply_index(i)
def _end_apply_index(self, dtindex):
"""Add self to the given DatetimeIndex, specialized for case where
self.weekday is non-null.
Parameters
----------
dtindex : DatetimeIndex
Returns
-------
result : DatetimeIndex
"""
off = dtindex.to_perioddelta('D')
base, mult = libfrequencies.get_freq_code(self.freqstr)
base_period = dtindex.to_period(base)
if self.n > 0:
# when adding, dates on end roll to next
normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns')
roll = np.where(base_period.to_timestamp(how='end') == normed,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
elif self.weekday is None:
return True
return dt.weekday() == self.weekday
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
weekday = ccalendar.int_to_weekday[self.weekday]
suffix = '-{weekday}'.format(weekday=weekday)
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = ccalendar.weekday_to_int[suffix]
return cls(weekday=weekday)
class _WeekOfMonthMixin(object):
"""Mixin for methods common to WeekOfMonth and LastWeekOfMonth"""
@apply_wraps
def apply(self, other):
compare_day = self._get_offset_day(other)
months = self.n
if months > 0 and compare_day > other.day:
months -= 1
elif months <= 0 and compare_day < other.day:
months += 1
shifted = shift_month(other, months, 'start')
to_day = self._get_offset_day(shifted)
return liboffsets.shift_day(shifted, to_day - shifted.day)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == self._get_offset_day(dt)
class WeekOfMonth(_WeekOfMonthMixin, DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}, default 0
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}, default 0
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_prefix = 'WOM'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'week', 'weekday'])
def __init__(self, n=1, normalize=False, week=0, weekday=0):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "week", week)
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=week<=3, got {week}'
.format(week=self.week))
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
Parameters
----------
other: datetime
Returns
-------
day: int
"""
mstart = datetime(other.year, other.month, 1)
wday = mstart.weekday()
shift_days = (self.weekday - wday) % 7
return 1 + shift_days + self.week * 7
@property
def rule_code(self):
weekday = ccalendar.int_to_weekday.get(self.weekday, '')
return '{prefix}-{week}{weekday}'.format(prefix=self._prefix,
week=self.week + 1,
weekday=weekday)
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix {prefix!r} requires a suffix."
.format(prefix=cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = ccalendar.weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of
each month"
Parameters
----------
n : int, default 1
weekday : {0, 1, ..., 6}, default 0
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_prefix = 'LWOM'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=0):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the last such day in the month.
Parameters
----------
other: datetime
Returns
-------
day: int
"""
dim = ccalendar.get_days_in_month(other.year, other.month)
mend = datetime(other.year, other.month, dim)
wday = mend.weekday()
shift_days = (wday - self.weekday) % 7
return dim - shift_days
@property
def rule_code(self):
weekday = ccalendar.int_to_weekday.get(self.weekday, '')
return '{prefix}-{weekday}'.format(prefix=self._prefix,
weekday=weekday)
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix {prefix!r} requires a suffix."
.format(prefix=cls._prefix))
# TODO: handle n here...
weekday = ccalendar.weekday_to_int[suffix]
return cls(weekday=weekday)
# ---------------------------------------------------------------------
# Quarter-Based Offset Classes
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
_default_startingMonth = None
_from_name_startingMonth = None
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'startingMonth'])
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point. Also apply_index, onOffset, rule_code if
# startingMonth vs month attr names are resolved
def __init__(self, n=1, normalize=False, startingMonth=None):
BaseOffset.__init__(self, n, normalize)
if startingMonth is None:
startingMonth = self._default_startingMonth
object.__setattr__(self, "startingMonth", startingMonth)
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = ccalendar.MONTH_TO_CAL_NUM[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
month = ccalendar.MONTH_ALIASES[self.startingMonth]
return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
@apply_wraps
def apply(self, other):
# months_since: find the calendar quarter containing other.month,
# e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].
# Then find the month in that quarter containing an onOffset date for
# self. `months_since` is the number of months to shift other.month
# to get to this on-offset month.
months_since = other.month % 3 - self.startingMonth % 3
qtrs = liboffsets.roll_qtrday(other, self.n, self.startingMonth,
day_opt=self._day_opt, modby=3)
months = qtrs * 3 - months_since
return shift_month(other, months, self._day_opt)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
mod_month = (dt.month - self.startingMonth) % 3
return mod_month == 0 and dt.day == self._get_offset_day(dt)
@apply_index_wraps
def apply_index(self, dtindex):
shifted = liboffsets.shift_quarters(dtindex.asi8, self.n,
self.startingMonth, self._day_opt)
return dtindex._shallow_copy(shifted)
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
_from_name_startingMonth = 12
_prefix = 'BQ'
_day_opt = 'business_end'
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
_day_opt = 'business_start'
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
_day_opt = 'end'
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
_day_opt = 'start'
# ---------------------------------------------------------------------
# Year-Based Offset Classes
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'month'])
def _get_offset_day(self, other):
# override BaseOffset method to use self.month instead of other.month
# TODO: there may be a more performant way to do this
return liboffsets.get_day_of_month(other.replace(month=self.month),
self._day_opt)
@apply_wraps
def apply(self, other):
years = roll_yearday(other, self.n, self.month, self._day_opt)
months = years * 12 + (self.month - other.month)
return shift_month(other, months, self._day_opt)
@apply_index_wraps
def apply_index(self, dtindex):
shifted = liboffsets.shift_quarters(dtindex.asi8, self.n,
self.month, self._day_opt,
modby=12)
return dtindex._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == self._get_offset_day(dt)
def __init__(self, n=1, normalize=False, month=None):
BaseOffset.__init__(self, n, normalize)
month = month if month is not None else self._default_month
object.__setattr__(self, "month", month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = ccalendar.MONTH_TO_CAL_NUM[suffix]
return cls(**kwargs)
@property
def rule_code(self):
month = ccalendar.MONTH_ALIASES[self.month]
return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
_day_opt = 'business_end'
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
_day_opt = 'business_start'
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
_day_opt = 'end'
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
_day_opt = 'start'
# ---------------------------------------------------------------------
# Special Offset Classes
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_adjust_dst = True
_attributes = frozenset(['weekday', 'startingMonth', 'variation'])
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
variation="nearest"):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "startingMonth", startingMonth)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "variation", variation)
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('{variation} is not a valid variation'
.format(variation=self.variation))
def isAnchored(self):
return (self.n == 1 and
self.startingMonth is not None and
self.weekday is not None)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return (year_end == dt or
self.get_year_end(shift_month(dt, -1, None)) == dt)
else:
return year_end == dt
@apply_wraps
def apply(self, other):
norm = Timestamp(other).normalize()
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = conversion.localize_pydatetime(prev_year, other.tzinfo)
cur_year = conversion.localize_pydatetime(cur_year, other.tzinfo)
next_year = conversion.localize_pydatetime(next_year, other.tzinfo)
# Note: next_year.year == other.year + 1, so we will always
# have other < next_year
if norm == prev_year:
n -= 1
elif norm == cur_year:
pass
elif n > 0:
if norm < prev_year:
n -= 2
elif prev_year < norm < cur_year:
n -= 1
elif cur_year < norm < next_year:
pass
else:
if cur_year < norm < next_year:
n += 1
elif prev_year < norm < cur_year:
pass
elif (norm.year == prev_year.year and norm < prev_year and
prev_year - norm <= timedelta(6)):
# GH#14774, error when next_year.year == cur_year.year
# e.g. prev_year == datetime(2004, 1, 3),
# other == datetime(2004, 1, 1)
n -= 1
else:
assert False
shifted = datetime(other.year + n, self.startingMonth, 1)
result = self.get_year_end(shifted)
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second,
other.microsecond)
return result
def get_year_end(self, dt):
assert dt.tzinfo is None
dim = ccalendar.get_days_in_month(dt.year, self.startingMonth)
target_date = datetime(dt.year, self.startingMonth, dim)
wkday_diff = self.weekday - target_date.weekday()
if wkday_diff == 0:
# year_end is the same for "last" and "nearest" cases
return target_date
if self.variation == "last":
days_forward = (wkday_diff % 7) - 7
# days_forward is always negative, so we always end up
# in the same year as dt
return target_date + timedelta(days=days_forward)
else:
# variation == "nearest":
days_forward = wkday_diff % 7
if days_forward <= 3:
# The upcoming self.weekday is closer than the previous one
return target_date + timedelta(days_forward)
else:
# The previous self.weekday is closer than the upcoming one
return target_date + timedelta(days_forward - 7)
@property
def rule_code(self):
prefix = self._prefix
suffix = self.get_rule_code_suffix()
return "{prefix}-{suffix}".format(prefix=prefix, suffix=suffix)
def _get_suffix_prefix(self):
if self.variation == "nearest":
return 'N'
else:
return 'L'
def get_rule_code_suffix(self):
prefix = self._get_suffix_prefix()
month = ccalendar.MONTH_ALIASES[self.startingMonth]
weekday = ccalendar.int_to_weekday[self.weekday]
return '{prefix}-{month}-{weekday}'.format(prefix=prefix, month=month,
weekday=weekday)
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError("Unable to parse varion_code: "
"{code}".format(code=varion_code))
startingMonth = ccalendar.MONTH_TO_CAL_NUM[startingMonth_code]
weekday = ccalendar.weekday_to_int[weekday_code]
return {"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
_attributes = frozenset(['weekday', 'startingMonth', 'qtr_with_extra_week',
'variation'])
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
qtr_with_extra_week=1, variation="nearest"):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "startingMonth", startingMonth)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "qtr_with_extra_week", qtr_with_extra_week)
object.__setattr__(self, "variation", variation)
if self.n == 0:
raise ValueError('N cannot be 0')
@cache_readonly
def _offset(self):
return FY5253(startingMonth=self.startingMonth,
weekday=self.weekday,
variation=self.variation)
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
def _rollback_to_year(self, other):
"""roll `other` back to the most recent date that was on a fiscal year
end. Return the date of that year-end, the number of full quarters
elapsed between that year-end and other, and the remaining Timedelta
since the most recent quarter-end.
Parameters
----------
other : datetime or Timestamp
Returns
-------
tuple of
prev_year_end : Timestamp giving most recent fiscal year end
num_qtrs : int
tdelta : Timedelta
"""
num_qtrs = 0
norm = Timestamp(other).tz_localize(None)
start = self._offset.rollback(norm)
# Note: start <= norm and self._offset.onOffset(start)
if start < norm:
# roll adjustment
qtr_lens = self.get_weeks(norm)
# check thet qtr_lens is consistent with self._offset addition
end = liboffsets.shift_day(start, days=7 * sum(qtr_lens))
assert self._offset.onOffset(end), (start, end, qtr_lens)
tdelta = norm - start
for qlen in qtr_lens:
if qlen * 7 <= tdelta.days:
num_qtrs += 1
tdelta -= Timedelta(days=qlen * 7)
else:
break
else:
tdelta = Timedelta(0)
# Note: we always have tdelta.value >= 0
return start, num_qtrs, tdelta
@apply_wraps
def apply(self, other):
# Note: self.n == 0 is not allowed.
n = self.n
prev_year_end, num_qtrs, tdelta = self._rollback_to_year(other)
res = prev_year_end
n += num_qtrs
if self.n <= 0 and tdelta.value > 0:
n += 1
# Possible speedup by handling years first.
years = n // 4
if years:
res += self._offset * years
n -= years * 4
# Add an extra day to make *sure* we are getting the quarter lengths
# for the upcoming year, not the previous year
qtr_lens = self.get_weeks(res + Timedelta(days=1))
# Note: we always have 0 <= n < 4
weeks = sum(qtr_lens[:n])
if weeks:
res = liboffsets.shift_day(res, days=weeks * 7)
return res
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
# Avoid round-down errors --> normalize to get
# e.g. '370D' instead of '360D23H'
norm = Timestamp(dt).normalize().tz_localize(None)
next_year_end = self._offset.rollforward(norm)
prev_year_end = norm - self._offset
weeks_in_year = (next_year_end - prev_year_end).days / 7
assert weeks_in_year in [52, 53], weeks_in_year
return weeks_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens:
current = liboffsets.shift_day(current, days=qtr_len * 7)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
qtr = self.qtr_with_extra_week
return "{prefix}-{suffix}-{qtr}".format(prefix=self._prefix,
suffix=suffix, qtr=qtr)
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
"""
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
"""
_adjust_dst = True
_attributes = frozenset(['n', 'normalize'])
__init__ = BaseOffset.__init__
@apply_wraps
def apply(self, other):
current_easter = easter(other.year)
current_easter = datetime(current_easter.year,
current_easter.month, current_easter.day)
current_easter = conversion.localize_pydatetime(current_easter,
other.tzinfo)
n = self.n
if n >= 0 and other < current_easter:
n -= 1
elif n < 0 and other > current_easter:
n += 1
# TODO: Why does this handle the 0 case the opposite of others?
# NOTE: easter returns a datetime.date so we have to convert to type of
# other
new = easter(other.year + n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
class CalendarDay(SingleConstructorOffset):
"""
Calendar day offset. Respects calendar arithmetic as opposed to Day which
respects absolute time.
"""
_adjust_dst = True
_inc = Timedelta(days=1)
_prefix = 'CD'
_attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
BaseOffset.__init__(self, n, normalize)
@apply_wraps
def apply(self, other):
"""
Apply scalar arithmetic with CalendarDay offset. Incoming datetime
objects can be tz-aware or naive.
"""
if type(other) == type(self):
# Add other CalendarDays
return type(self)(self.n + other.n, normalize=self.normalize)
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None:
other = other.replace(tzinfo=None)
other = other + self.n * self._inc
if tzinfo is not None:
# This can raise a AmbiguousTimeError or NonExistentTimeError
other = conversion.localize_pydatetime(other, tzinfo)
try:
return as_timestamp(other)
except TypeError:
raise TypeError("Cannot perform arithmetic between {other} and "
"CalendarDay".format(other=type(other)))
@apply_index_wraps
def apply_index(self, i):
"""
Apply the CalendarDay offset to a DatetimeIndex. Incoming DatetimeIndex
objects are assumed to be tz_naive
"""
return i + self.n * self._inc
# ---------------------------------------------------------------------
# Ticks
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(liboffsets._Tick, SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
_prefix = 'undefined'
_attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
BaseOffset.__init__(self, n, normalize)
if normalize:
raise ValueError("Tick offset with `normalize=True` are not "
"allowed.") # GH#21427
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
elif isinstance(other, ABCPeriod):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
except OverflowError:
raise OverflowError("the add operation between {self} and {other} "
"will overflow".format(self=self, other=other))
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return False
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params)
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return True
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return delta_to_nanoseconds(self.delta)
# TODO: Should Tick have its own apply_index?
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, Timestamp):
# GH 15126
# in order to avoid a recursive
# call of __add__ and __radd__ if there is
# an exception, when we call using the + operator,
# we directly call the known method
result = other.__add__(self)
if result == NotImplemented:
raise OverflowError
return result
elif isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
raise ApplyTypeError('Unhandled type: {type_str}'
.format(type_str=type(other).__name__))
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
# ---------------------------------------------------------------------
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, (default None)
offset : DateOffset, (default BDay())
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset {offset} did not increment date'
.format(offset=offset))
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset {offset} did not decrement date'
.format(offset=offset))
cur = next_date
prefix_mapping = {offset._prefix: offset for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
CustomBusinessHour, # 'CBH'
MonthEnd, # 'M'
MonthBegin, # 'MS'
Nano, # 'N'
SemiMonthEnd, # 'SM'
SemiMonthBegin, # 'SMS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
CalendarDay # 'CD'
]}
| bsd-3-clause |
persandstrom/home-assistant | homeassistant/components/switch/zha.py | 3 | 3400 | """
Switches on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/switch.zha/
"""
import logging
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.components import zha
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['zha']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Zigbee Home Automation switches."""
from zigpy.zcl.clusters.general import OnOff
discovery_info = zha.get_discovery_info(hass, discovery_info)
if discovery_info is None:
return
switch = Switch(**discovery_info)
if discovery_info['new_join']:
in_clusters = discovery_info['in_clusters']
cluster = in_clusters[OnOff.cluster_id]
await zha.configure_reporting(
switch.entity_id, cluster, switch.value_attribute,
min_report=0, max_report=600, reportable_change=1
)
async_add_entities([switch], update_before_add=True)
class Switch(zha.Entity, SwitchDevice):
"""ZHA switch."""
_domain = DOMAIN
value_attribute = 0
def attribute_updated(self, attribute, value):
"""Handle attribute update from device."""
cluster = self._endpoint.on_off
attr_name = cluster.attributes.get(attribute, [attribute])[0]
_LOGGER.debug("%s: Attribute '%s' on cluster '%s' updated to %s",
self.entity_id, attr_name, cluster.ep_attribute, value)
if attribute == self.value_attribute:
self._state = value
self.async_schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Let zha handle polling."""
return False
@property
def is_on(self) -> bool:
"""Return if the switch is on based on the statemachine."""
if self._state is None:
return False
return bool(self._state)
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
from zigpy.exceptions import DeliveryError
try:
res = await self._endpoint.on_off.on()
_LOGGER.debug("%s: turned 'on': %s", self.entity_id, res[1])
except DeliveryError as ex:
_LOGGER.error("%s: Unable to turn the switch on: %s",
self.entity_id, ex)
return
self._state = 1
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
from zigpy.exceptions import DeliveryError
try:
res = await self._endpoint.on_off.off()
_LOGGER.debug("%s: turned 'off': %s", self.entity_id, res[1])
except DeliveryError as ex:
_LOGGER.error("%s: Unable to turn the switch off: %s",
self.entity_id, ex)
return
self._state = 0
self.async_schedule_update_ha_state()
async def async_update(self):
"""Retrieve latest state."""
result = await zha.safe_read(self._endpoint.on_off,
['on_off'],
allow_cache=False,
only_cache=(not self._initialized))
self._state = result.get('on_off', self._state)
| apache-2.0 |
1st1/uvloop | examples/bench/echoserver.py | 1 | 6229 | import argparse
import asyncio
import gc
import os.path
import pathlib
import socket
import ssl
PRINT = 0
async def echo_server(loop, address, unix):
if unix:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
sock.setblocking(False)
if PRINT:
print('Server listening at', address)
with sock:
while True:
client, addr = await loop.sock_accept(sock)
if PRINT:
print('Connection from', addr)
loop.create_task(echo_client(loop, client))
async def echo_client(loop, client):
try:
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, NameError):
pass
with client:
while True:
data = await loop.sock_recv(client, 1000000)
if not data:
break
await loop.sock_sendall(client, data)
if PRINT:
print('Connection closed')
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.read(1000000)
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
class EchoProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
self.transport = None
def data_received(self, data):
self.transport.write(data)
class EchoBufferedProtocol(asyncio.BufferedProtocol):
def connection_made(self, transport):
self.transport = transport
# Here the buffer is intended to be copied, so that the outgoing buffer
# won't be wrongly updated by next read
self.buffer = bytearray(256 * 1024)
def connection_lost(self, exc):
self.transport = None
def get_buffer(self, sizehint):
return self.buffer
def buffer_updated(self, nbytes):
self.transport.write(self.buffer[:nbytes])
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5, loop=loop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--streams', default=False, action='store_true')
parser.add_argument('--proto', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
parser.add_argument('--ssl', default=False, action='store_true')
parser.add_argument('--buffered', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
import uvloop
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('serving on: {}'.format(addr))
server_context = None
if args.ssl:
print('with SSL')
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(
(pathlib.Path(__file__).parent.parent.parent /
'tests' / 'certs' / 'ssl_cert.pem'),
(pathlib.Path(__file__).parent.parent.parent /
'tests' / 'certs' / 'ssl_key.pem'))
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
if args.streams:
if args.proto:
print('cannot use --stream and --proto simultaneously')
exit(1)
if args.buffered:
print('cannot use --stream and --buffered simultaneously')
exit(1)
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr, loop=loop,
ssl=server_context)
else:
coro = asyncio.start_server(echo_client_streams,
*addr, loop=loop,
ssl=server_context)
srv = loop.run_until_complete(coro)
elif args.proto:
if args.streams:
print('cannot use --stream and --proto simultaneously')
exit(1)
if args.buffered:
print('using buffered protocol')
protocol = EchoBufferedProtocol
else:
print('using simple protocol')
protocol = EchoProtocol
if unix:
coro = loop.create_unix_server(protocol, addr,
ssl=server_context)
else:
coro = loop.create_server(protocol, *addr,
ssl=server_context)
srv = loop.run_until_complete(coro)
else:
if args.ssl:
print('cannot use SSL for loop.sock_* methods')
exit(1)
print('using sock_recv/sock_sendall')
loop.create_task(echo_server(loop, addr, unix))
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
| mit |
djsedulous/namecoind | libs/boost_1_50_0/tools/quickbook/test/python/output-deps.py | 6 | 4511 | #!/usr/bin/env python
import sys, os, subprocess, tempfile, re
def main(args, directory):
if len(args) != 1:
print "Usage: output-deps.py quickbook-command"
exit(1)
quickbook_command = args[0]
failures = 0
failures += run_quickbook(quickbook_command, 'svg_missing.qbk',
deps_gold = 'svg_missing_deps.txt')
failures += run_quickbook(quickbook_command, 'svg_missing.qbk',
locations_gold = 'svg_missing_locs.txt')
failures += run_quickbook(quickbook_command, 'missing_relative.qbk',
deps_gold = 'missing_relative_deps.txt',
locations_gold = 'missing_relative_locs.txt')
failures += run_quickbook(quickbook_command, 'include_path.qbk',
deps_gold = 'include_path_deps.txt',
locations_gold = 'include_path_locs.txt',
input_path = ['sub1', 'sub2'])
if failures == 0:
print "Success"
else:
print "Failures:",failures
exit(failures)
def run_quickbook(quickbook_command, filename, output_gold = None,
deps_gold = None, locations_gold = None, input_path = []):
failures = 0
command = [quickbook_command, '--debug', filename]
output_filename = None
if output_gold:
output_filename = temp_filename('.qbk')
command.extend(['--output-file', output_filename])
deps_filename = None
if deps_gold:
deps_filename = temp_filename('.txt')
command.extend(['--output-deps', deps_filename])
locations_filename = None
if locations_gold:
locations_filename = temp_filename('.txt')
command.extend(['--output-checked-locations', locations_filename])
try:
for path in input_path:
command.extend(['-I', path])
print 'Running: ' + ' '.join(command)
print
exit_code = subprocess.call(command)
print
success = not exit_code
if output_filename:
output = load_file(output_filename)
else:
output = None
if deps_filename:
deps = load_dependencies(deps_filename)
else:
deps = None
if locations_filename:
locations = load_locations(locations_filename)
else:
locations = None
finally:
if output_filename: os.unlink(output_filename)
if deps_filename: os.unlink(deps_filename)
if deps_gold:
gold = load_dependencies(deps_gold, adjust_paths = True)
if deps != gold:
failures = failures + 1
print "Dependencies don't match:"
print "Gold:", gold
print "Result:", deps
print
if locations_gold:
gold = load_locations(locations_gold, adjust_paths = True)
if locations != gold:
failures = failures + 1
print "Dependencies don't match:"
print "Gold:", gold
print "Result:", locations
print
if output_gold:
gold = load_file(output_gold)
if gold != output:
failures = failures + 1
print "Output doesn't match:"
print
print gold
print
print output
print
return failures
def load_dependencies(filename, adjust_paths = False):
dependencies = set()
f = open(filename, 'r')
for path in f:
if adjust_paths:
path = os.path.realpath(path)
if path in dependencies:
raise Exception("Duplicate path (%1s) in %2s" % (path, filename))
dependencies.add(path)
return dependencies
def load_locations(filename, adjust_paths = False):
line_matcher = re.compile("^([+-]) (.*)$")
dependencies = {}
f = open(filename, 'r')
for line in f:
m = line_matcher.match(line)
if not m:
raise Exception("Invalid dependency file: %1s" % filename)
found = m.group(1) == '+'
path = m.group(2)
if adjust_paths:
path = os.path.realpath(path)
if path in dependencies:
raise Exception("Duplicate path (%1s) in %2s" % (path, filename))
dependencies[path] = found
return dependencies
def temp_filename(extension):
file = tempfile.mkstemp(suffix = extension)
os.close(file[0])
return file[1]
def load_file(filename):
f = open(filename, 'r')
try:
return f.read()
finally:
f.close()
return None
main(sys.argv[1:], os.path.dirname(sys.argv[0]))
| mit |
mmcauliffe/python-acoustic-similarity | tests/test_main_func_threading.py | 1 | 4393 | import pytest
from conch import (acoustic_similarity_mapping,
acoustic_similarity_directories,
analyze_segments,
acoustic_similarity_directory, analyze_long_file,
)
from conch.main import axb_mapping
from conch.io import load_path_mapping
from conch.analysis import MfccFunction, FormantTrackFunction, PitchTrackFunction, PraatPitchTrackFunction
from conch.distance import DtwFunction
from conch.analysis.segments import SegmentMapping
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
def test_acoustic_similarity_directories(tts_dir, call_back, praatpath):
func = PraatPitchTrackFunction(praat_path=praatpath)
dist_func = DtwFunction(norm=True)
scores = acoustic_similarity_directory(tts_dir, analysis_function=func, distance_function=dist_func,
call_back=call_back, multiprocessing=False)
# @slow
def test_acoustic_similarity_directory(soundfiles_dir, call_back):
func = PitchTrackFunction()
dist_func = DtwFunction(norm=True)
scores = acoustic_similarity_directory(soundfiles_dir, analysis_function=func, distance_function=dist_func,
call_back=call_back, multiprocessing=False)
def test_axb_mapping(axb_mapping_path):
path_mapping = load_path_mapping(axb_mapping_path)
assert len(path_mapping[0]) == 3
func = MfccFunction()
dist_func = DtwFunction(norm=True)
scores = axb_mapping(path_mapping, func, dist_func, multiprocessing=False)
print(scores)
def test_analyze_long_file_reaper(acoustic_corpus_path, reaper_func):
segments = [(1, 2, 0)]
output = analyze_long_file(acoustic_corpus_path, segments, reaper_func, multiprocessing=False)
for k in output.keys():
print(sorted(output[k].keys()))
assert (all(x >= 1 for x in output[k].keys()))
assert (all(x <= 2 for x in output[k].keys()))
output = analyze_long_file(acoustic_corpus_path, segments, reaper_func, padding=0.5, multiprocessing=False)
for k in output.keys():
print(sorted(output[k].keys()))
assert (all(x >= 1 for x in output[k].keys()))
assert (all(x <= 2 for x in output[k].keys()))
@pytest.mark.xfail
def test_analyze_file_segments_reaper(acoustic_corpus_path, reaper_func):
mapping = SegmentMapping()
seg = (acoustic_corpus_path, 1, 2, 0)
mapping.add_file_segment(*seg)
output = analyze_segments(mapping, reaper_func, multiprocessing=False)
for k in output.keys():
print(sorted(output[k].keys()))
assert (all(x >= 1 for x in output[k].keys()))
assert (all(x <= 2 for x in output[k].keys()))
mapping[0].properties['padding'] = 0.5
output = analyze_segments(mapping, reaper_func, multiprocessing=False)
for k in output.keys():
print(sorted(output[k].keys()))
assert (all(x >= 1 for x in output[k].keys()))
assert (all(x <= 2 for x in output[k].keys()))
def test_analyze_long_file_formants(acoustic_corpus_path, formants_func):
segments = [(1, 2, 0)]
output = analyze_long_file(acoustic_corpus_path, segments, formants_func, multiprocessing=False)
for k in output.keys():
print(sorted(output[k].keys()))
assert (all(x >= 1 for x in output[k].keys()))
assert (all(x <= 2 for x in output[k].keys()))
output = analyze_long_file(acoustic_corpus_path, segments, formants_func, padding=0.5, multiprocessing=False)
for k in output.keys():
print(sorted(output[k].keys()))
assert (all(x >= 1 for x in output[k].keys()))
assert (all(x <= 2 for x in output[k].keys()))
def test_analyze_long_file_pitch(acoustic_corpus_path, pitch_func):
segments = [(1, 2, 0)]
output = analyze_long_file(acoustic_corpus_path, segments, pitch_func, multiprocessing=False)
for k in output.keys():
print(sorted(output[k].keys()))
assert (all(x >= 1 for x in output[k].keys()))
assert (all(x <= 2 for x in output[k].keys()))
output = analyze_long_file(acoustic_corpus_path, segments, pitch_func, padding=0.5, multiprocessing=False)
for k in output.keys():
print(sorted(output[k].keys()))
assert (all(x >= 1 for x in output[k].keys()))
assert (all(x <= 2 for x in output[k].keys()))
| mit |
marcosmodesto/django-testapp | django/django/contrib/contenttypes/tests.py | 40 | 7048 | from __future__ import with_statement
import urllib
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.models import Site
from django.http import HttpRequest, Http404
from django.test import TestCase
from django.utils.encoding import smart_str
from django.test.utils import override_settings
class FooWithoutUrl(models.Model):
"""
Fake model not defining ``get_absolute_url`` for
:meth:`ContentTypesTests.test_shortcut_view_without_get_absolute_url`"""
name = models.CharField(max_length=30, unique=True)
def __unicode__(self):
return self.name
class FooWithUrl(FooWithoutUrl):
"""
Fake model defining ``get_absolute_url`` for
:meth:`ContentTypesTests.test_shortcut_view`
"""
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.name))
class FooWithBrokenAbsoluteUrl(FooWithoutUrl):
"""
Fake model defining a ``get_absolute_url`` method containing an error
"""
def get_absolute_url(self):
return "/users/%s/" % self.unknown_field
class ContentTypesTests(TestCase):
def setUp(self):
self.old_Site_meta_installed = Site._meta.installed
ContentType.objects.clear_cache()
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model, ID
or natural key -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name="john")
if Site._meta.installed:
current_site = Site.objects.get_current()
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % current_site.domain,
response._headers.get("location")[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
def test_shortcut_view_without_get_absolute_url(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns 404 when get_absolute_url is not defined.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name="john")
self.assertRaises(Http404, shortcut, request, user_ct.id, obj.id)
def test_shortcut_view_with_broken_get_absolute_url(self):
"""
Check that the shortcut view does not catch an AttributeError raised
by the model's get_absolute_url method.
Refs #8997.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name="john")
self.assertRaises(AttributeError, shortcut, request, user_ct.id, obj.id)
def test_missing_model(self):
"""
Ensures that displaying content types in admin (or anywhere) doesn't
break on leftover content type records in the DB for which no model
is defined anymore.
"""
ct = ContentType.objects.create(
name = 'Old model',
app_label = 'contenttypes',
model = 'OldModel',
)
self.assertEqual(unicode(ct), u'Old model')
| bsd-3-clause |
delmic/odemis | src/odemis/gui/comp/foldpanelbar.py | 2 | 14600 | # -*- coding: utf-8 -*-
"""
:author: Rinze de Laat
:copyright: © 2012 Rinze de Laat, Delmic
.. license::
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
from odemis.gui import img, BG_COLOUR_MAIN
from odemis.gui.util.conversion import change_brightness, wxcol_to_frgb
import wx
CAPTION_BAR_SIZE = (-1, 40)
CAPTION_PADDING_LEFT = 10
CAPTION_PADDING_RIGHT = 6
SCROLLBAR_WIDTH = 0
wxEVT_CAPTIONBAR = wx.NewEventType()
EVT_CAPTIONBAR = wx.PyEventBinder(wxEVT_CAPTIONBAR, 0)
class FoldPanelBar(wx.Panel):
""" This window can be be used as a vertical side bar which may contain foldable sub panels
created using the FoldPanelItem class.
For proper scrolling, this window should be placed inside a Sizer inside a wx.ScrolledWindow.
"""
def __init__(self, parent, id=-1, pos=(0, 0), size=wx.DefaultSize,
style=wx.TAB_TRAVERSAL | wx.NO_BORDER):
wx.Panel.__init__(self, parent, id, pos, size, style)
assert isinstance(self.Parent, wx.ScrolledWindow)
self._sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sizer)
self.Bind(EVT_CAPTIONBAR, self.on_caption_press)
self.Bind(wx.EVT_SIZE, self.OnSize)
global SCROLLBAR_WIDTH
SCROLLBAR_WIDTH = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)
assert isinstance(parent, wx.ScrolledWindow)
def on_caption_press(self, evt):
if evt.get_fold_status():
evt.get_tag().collapse()
else:
evt.get_tag().expand()
def has_vert_scrollbar(self):
size = self.Parent.GetSize()
vsize = self.Parent.GetVirtualSize()
return vsize[1] > size[1]
def has_horz_scrollbar(self):
size = self.Parent.GetSize()
vsize = self.Parent.GetVirtualSize()
return vsize[0] > size[0]
def OnSize(self, evt):
self.SetSize(self.Parent.GetVirtualSize())
evt.Skip()
##############################
# Fold panel items mutations
##############################
def add_item(self, item):
""" Add a foldpanel item to the bar """
assert isinstance(item, FoldPanelItem)
self._sizer.Add(item, flag=wx.EXPAND)
self.Parent.Layout()
self.Parent.FitInside()
def remove_item(self, item):
assert isinstance(item, FoldPanelItem)
for child in self.GetChildren():
if child == item:
child.Destroy()
self.Parent.Layout()
self.Parent.FitInside()
return
def create_and_add_item(self, label, collapsed):
item = FoldPanelItem(self, label=label, collapsed=collapsed)
self.add_item(item)
return item
def Refresh(self, *args, **kwargs):
wx.Panel.Refresh(self, *args, **kwargs)
# self.Parent.Layout()
self.Parent.FitInside()
class FoldPanelItem(wx.Panel):
""" A foldable panel which should be placed inside a
:py:class:`FoldPanelBar` object.
This class uses a CaptionBar object as a clickable button which allows it
to hide and show its content.
The main layout mechanism used is a vertical BoxSizer. The adding and
removing of child elements should be done using the sub window mutation
methods.
"""
def __init__(self, parent, id=-1, pos=(0, 0), size=wx.DefaultSize,
style=wx.TAB_TRAVERSAL | wx.NO_BORDER, label="",
collapsed=False, nocaption=False):
wx.Panel.__init__(self, parent, id, pos, size, style)
assert isinstance(parent, FoldPanelBar)
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(main_sizer)
self._caption_bar = None
if not nocaption:
self._caption_bar = CaptionBar(self, label, collapsed)
main_sizer.Add(self._caption_bar, flag=wx.EXPAND | wx.BOTTOM, border=1)
self._container = wx.Panel(self)
self._container.SetBackgroundColour(self.Parent.GetBackgroundColour())
self._container_sizer = wx.BoxSizer(wx.VERTICAL)
self._container.SetSizer(self._container_sizer)
main_sizer.Add(self._container, flag=wx.EXPAND | wx.BOTTOM, border=1)
self.Bind(EVT_CAPTIONBAR, self.on_caption_press)
def on_caption_press(self, evt):
evt.set_tag(self)
evt.Skip()
def collapse(self):
self._caption_bar.collapse()
self._container.Hide()
self.Refresh()
def expand(self):
self._caption_bar.expand()
self._container.Show()
self.Refresh()
def Show(self, show=True):
wx.Panel.Show(self, show)
self.Refresh()
def Hide(self):
self.Show(False)
def is_expanded(self):
return not self._caption_bar.is_collapsed()
def has_vert_scrollbar(self):
return self.Parent.has_vert_scrollbar()
def Refresh(self, *args, **kwargs):
""" Refresh the ScrolledWindow grandparent, so it and all it's
children will get the appropriate size
"""
self.Parent.Refresh()
##############################
# Sub window mutations
##############################
def add_item(self, item):
""" Add a wx.Window or Sizer to the end of the panel """
if item.Parent != self._container:
item.Reparent(self._container)
self._container_sizer.Add(item, flag=wx.EXPAND | wx.BOTTOM, border=1)
self.Refresh()
def insert_item(self, item, pos):
""" Insert a wx.Window or Sizer into the panel at location `pos` """
if item.Parent != self._container:
item.Reparent(self._container)
self._container_sizer.Insert(pos, item, flag=wx.EXPAND | wx.BOTTOM, border=1)
self.Refresh()
def remove_item(self, item):
""" Remove the given item from the panel """
for child in self._container.GetChildren():
if child == item:
child.Destroy()
self.Refresh()
return
def remove_all(self):
""" Remove all child windows and sizers from the panel """
for child in self._container.GetChildren():
child.Destroy()
self.Refresh()
def children_to_sizer(self):
""" Move all the children into the main sizer
This method is used by the XRC XML handler that constructs
:py:class:`FoldPanelItem`
objects, so the can just add children in the XRCed program, without
worrying or knowing about the main (private) sizer of this class.
"""
for child in self.GetChildren():
if (child not in (self._caption_bar, self._container) and
not self._container_sizer.GetItem(child)):
self.add_item(child)
if self._caption_bar and self._caption_bar.is_collapsed():
self.collapse()
self._container_sizer.Layout()
class CaptionBar(wx.Window):
""" A small button like header window that displays the :py:class:`FoldPanelItem`'s title and
allows it to fold and unfold.
"""
def __init__(self, parent, caption, collapsed):
"""
:param parent: Parent window (FoldPanelItem)
:param caption: Header caption (str)
:param collapsed: Draw the CaptionBar collapsed or not (boolean)
"""
wx.Window.__init__(self, parent, wx.ID_ANY, pos=(0, 0),
size=CAPTION_BAR_SIZE, style=wx.NO_BORDER)
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
# FIXME: on wx4 with GTK2, the background is always redrawn anyway,
# which causes flickering, especially as the default background colour is
# white. As a workaround, we set a less white background.
self.SetBackgroundColour(BG_COLOUR_MAIN)
self._collapsed = collapsed # The current state of the CaptionBar
self._caption = caption
self._mouse_hovering = False
self._logo = None # wx.Bitmap or None
# Set Icons
self._icon_size = wx.Size(16, 16)
self._foldIcons = wx.ImageList(self._icon_size.x, self._icon_size.y)
bmp = img.getBitmap("icon/arr_down.png")
self._foldIcons.Add(bmp)
bmp = img.getBitmap("icon/arr_right.png")
self._foldIcons.Add(bmp)
self.Bind(wx.EVT_PAINT, self.on_paint)
if isinstance(self.Parent, FoldPanelItem):
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
def set_caption(self, caption):
self._caption = caption
def set_logo(self, logo):
"""
logo (wx.Bitmap or None): bitmap to display on the right. If None, nothing
will be shown
"""
self._logo = logo
def is_collapsed(self):
""" Returns wether the status of the bar is expanded or collapsed. """
return self._collapsed
def collapse(self):
""" Set the internal state of the CaptionBar as collapsed
:note: This does not trigger a L{CaptionBarEvent} to be sent to the parent.
"""
self._collapsed = True
self.redraw_icon_bitmap()
def expand(self):
""" Set the internal state of the CaptionBar as expanded
:note: This does not trigger a L{CaptionBarEvent} to be sent to the parent.
"""
self._collapsed = False
self.redraw_icon_bitmap()
def on_paint(self, _):
""" Handle the ``wx.EVT_PAINT`` event for L{CaptionBar} """
dc = wx.PaintDC(self)
win_rect = self.GetRect()
self._draw_gradient(dc, win_rect)
caption_font = self.Parent.GetFont()
dc.SetFont(caption_font)
if isinstance(self.Parent, FoldPanelItem):
dc.SetTextForeground(self.Parent.GetForegroundColour())
else:
dc.SetTextForeground(self.GetForegroundColour())
y_pos = (win_rect.GetHeight() - abs(caption_font.GetPixelSize().GetHeight())) // 2
dc.DrawText(self._caption, CAPTION_PADDING_LEFT, y_pos)
if self._logo:
dc.DrawBitmap(self._logo,
self.Parent.Size.x
-self._logo.Width - 20 # 20 = extra padding for logo
-self._icon_size.x - CAPTION_PADDING_RIGHT,
(win_rect.Height - self._logo.Height) // 2)
# Only draw the icon if it's part of a FoldPanelItem
if isinstance(self.Parent, FoldPanelItem):
# draw small icon, either collapsed or expanded
# based on the state of the bar.
index = self._collapsed
x_pos = (self.Parent.Size.x - self._icon_size.x - CAPTION_PADDING_RIGHT)
self._foldIcons.Draw(
index, dc, x_pos,
(win_rect.GetHeight() - self._icon_size.y) // 2,
wx.IMAGELIST_DRAW_TRANSPARENT
)
def _draw_gradient(self, dc, rect):
""" Draw a vertical gradient background, using the background colour as a starting point
"""
if rect.height < 1 or rect.width < 1:
return
dc.SetPen(wx.TRANSPARENT_PEN)
# calculate gradient coefficients
bck_col = wxcol_to_frgb(self.Parent.GetBackgroundColour())
if self._mouse_hovering:
col1 = change_brightness(bck_col, 0.15)
col2 = change_brightness(bck_col, 0.10)
else:
col1 = change_brightness(bck_col, 0.10)
col2 = bck_col
r1, g1, b1 = col1
r2, g2, b2 = col2
rstep = (r2 - r1) / rect.height
gstep = (g2 - g1) / rect.height
bstep = (b2 - b1) / rect.height
rf, gf, bf = col1
for y in range(rect.y, rect.y + rect.height):
cur_col = (rf * 255, gf * 255, bf * 255)
dc.SetBrush(wx.Brush(cur_col, wx.BRUSHSTYLE_SOLID))
dc.DrawRectangle(rect.x, rect.y + (y - rect.y), rect.width, rect.height)
rf = rf + rstep
gf = gf + gstep
bf = bf + bstep
def on_mouse_event(self, event):
""" Mouse event handler """
send_event = False
if event.LeftDown():
# Treat all left-clicks on the caption bar as a toggle event
send_event = True
elif event.LeftDClick():
send_event = True
elif event.Entering():
# calculate gradient coefficients
self._mouse_hovering = True
self.Refresh()
elif event.Leaving():
self._mouse_hovering = False
self.Refresh()
# send the collapse, expand event to the parent
if send_event:
event = CaptionBarEvent(wxEVT_CAPTIONBAR)
event.SetId(self.GetId())
event.SetEventObject(self)
event.set_bar(self)
self.GetEventHandler().ProcessEvent(event)
else:
event.Skip()
def redraw_icon_bitmap(self):
""" Redraws the icons (if they exists). """
rect = self.GetRect()
padding_right = CAPTION_PADDING_RIGHT
if isinstance(self.Parent, FoldPanelItem) and not self.Parent.has_vert_scrollbar():
padding_right += SCROLLBAR_WIDTH
x_pos = self.Parent.Parent.Size.x - self._icon_size.x - padding_right
rect.SetX(x_pos)
rect.SetWidth(self._icon_size.x + padding_right)
self.RefreshRect(rect)
class CaptionBarEvent(wx.PyCommandEvent):
""" Custom event class containing extra data """
def __init__(self, evt_type):
wx.PyCommandEvent.__init__(self, evt_type)
self._bar = None
self._parent_foldbar = None
def get_fold_status(self):
return not self._bar.is_collapsed()
def get_bar(self):
""" Returns the selected L{CaptionBar}. """
return self._bar
def set_tag(self, tag):
self._parent_foldbar = tag
def get_tag(self):
""" Returns the tag assigned to the selected L{CaptionBar}. """
return self._parent_foldbar
def set_bar(self, foldbar):
self._bar = foldbar
| gpl-2.0 |
oilshell/blog-code | fd-passing/server.py | 1 | 2964 | #!/usr/bin/env python3
"""
server.py
https://pymotw.com/2/socket/uds.html
"""
from __future__ import print_function
import errno
import optparse
import os
import socket
import sys
import subprocess
import py_fanos
from py_fanos import log
def main(argv):
p = optparse.OptionParser(__doc__)
p.add_option(
'--socket-path', dest='socket_path', default=None,
help='Socket path to connect to')
p.add_option(
'--socket-fd', dest='socket_fd', type='int', default=None,
help='File descriptor for our end of socketpair()')
opts, _ = p.parse_args(argv[1:])
if opts.socket_path: # PATH like /tmp/c5po.socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Make sure the socket does not already exist
# If we don't do this, we get 'address already in use'
try:
os.unlink(opts.socket_path)
except OSError as e:
if e.errno != errno.ENOENT: # some error deleting it
raise
log('Binding to %s', opts.socket_path)
sock.bind(opts.socket_path)
# Listen for incoming connections
try:
sock.listen(1)
except OSError as e:
log('listen error: %s', e)
# TODO: Should we MAINTAIN the connections?
#
# We don't need netstrings if the client opens and closes
# every time? But that's slower for coprocesses.
#
# A typical entry requires 3 commands: prompt, execute, and dump-state
# ECMD echo ${PS1@P}
# ECMD cd /
# ECMD dump-state
# Wait for a connection
log('accept()')
try:
conn, client_address = sock.accept()
except OSError as e:
log("accept error: %s", e)
# Uh what, you don't have to listen() here! socketpair() is different?
conn = sock
else:
log('Connection from %r', client_address)
elif opts.socket_fd:
fd = opts.socket_fd
log('server.py got fd %d', fd)
log('server.py descriptor state')
os.system('ls -l /proc/%d/fd' % os.getpid())
# This creates a NEW SOCKET, which is bad
#sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, fileno=fd)
log('socket %s from FD %d', sock, fd)
# Weird
conn = sock
else:
raise AssertionError()
try:
while True:
# Note: This can raise various exceptions
fd_out = []
msg = py_fanos.recv(conn, fd_out=fd_out)
if msg is None:
break # valid EOF
fd = fd_out[0]
# Why isn't 'ls' enough?
p = subprocess.Popen(['ls', '--color=auto'], stdout=fd)
status = p.wait()
#log('status = %d', status)
#p = subprocess.Popen(['sleep', '1'])
#status = p.wait()
# Close so we don't leak
os.close(fd)
py_fanos.send(conn, b'OK')
log('')
finally:
conn.close()
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| apache-2.0 |
S2R2/viper | viper/modules/rats/pandora.py | 6 | 3215 | # Originally written by Kevin Breen (@KevTheHermit):
# https://github.com/kevthehermit/RATDecoders/blob/master/Pandora.py
import pefile
def version_21(raw_config):
if raw_config != None:
conf_dict = {}
conf_dict['Version'] = '2.1'
conf_dict['Domain'] = raw_config[0]
conf_dict['Port'] = raw_config[1]
conf_dict['Password'] = raw_config[2]
conf_dict['Install Path'] = raw_config[3]
conf_dict['Install Name'] = raw_config[4]
conf_dict['HKCU Key'] = raw_config[5]
conf_dict['ActiveX Key'] = raw_config[6]
conf_dict['Install Flag'] = raw_config[7]
conf_dict['StartupFlag'] = raw_config[8]
conf_dict['ActiveXFlag'] = raw_config[9]
conf_dict['HKCU Flag'] = raw_config[10]
conf_dict['Mutex'] = raw_config[11]
conf_dict['userMode Hooking'] = raw_config[12]
conf_dict['Melt'] = raw_config[13]
conf_dict['Melt'] = raw_config[13]
conf_dict['Keylogger'] = raw_config[14]
conf_dict['Campaign ID'] = raw_config[15]
conf_dict['UnknownFlag9'] = raw_config[16]
return conf_dict
else:
return None
def version_22(raw_config):
if raw_config != None:
conf_dict = {}
conf_dict['Version'] = '2.2'
conf_dict['Domain'] = raw_config[0]
conf_dict['Port'] = raw_config[1]
conf_dict['Password'] = raw_config[2]
conf_dict['Install Path'] = raw_config[3]
conf_dict['Install Name'] = raw_config[4]
conf_dict['HKCU Key'] = raw_config[5]
conf_dict['ActiveX Key'] = raw_config[6]
conf_dict['Install Flag'] = raw_config[7]
conf_dict['StartupFlag'] = raw_config[8]
conf_dict['ActiveXFlag'] = raw_config[9]
conf_dict['HKCU Flag'] = raw_config[10]
conf_dict['Mutex'] = raw_config[11]
conf_dict['userMode Hooking'] = raw_config[12]
conf_dict['Melt'] = raw_config[13]
conf_dict['Melt'] = raw_config[13]
conf_dict['Keylogger'] = raw_config[14]
conf_dict['Campaign ID'] = raw_config[15]
conf_dict['UnknownFlag9'] = raw_config[16]
return conf_dict
else:
return None
def get_config(data):
try:
pe = pefile.PE(data=data)
rt_string_idx = [
entry.id for entry in
pe.DIRECTORY_ENTRY_RESOURCE.entries].index(pefile.RESOURCE_TYPE['RT_RCDATA'])
rt_string_directory = pe.DIRECTORY_ENTRY_RESOURCE.entries[rt_string_idx]
for entry in rt_string_directory.directory.entries:
if str(entry.name) == "CFG":
data_rva = entry.directory.entries[0].data.struct.OffsetToData
size = entry.directory.entries[0].data.struct.Size
data = pe.get_memory_mapped_image()[data_rva:data_rva+size]
cleaned = data.replace('\x00', '')
raw_config = cleaned.split('##')
return raw_config
except:
return
def config(data):
raw_config = get_config(data)
if raw_config:
if len(raw_config) == 19:
return version_21(raw_config)
if len(raw_config) == 20:
return version_22(raw_config)
| bsd-3-clause |
Livit/Livit.Learn.EdX | lms/djangoapps/courseware/tests/test_view_authentication.py | 23 | 17100 | import datetime
import pytz
from django.core.urlresolvers import reverse
from mock import patch
from nose.plugins.attrib import attr
from courseware.access import has_access
from courseware.tests.helpers import CourseAccessTestMixin, LoginEnrollmentTestCase
from courseware.tests.factories import (
BetaTesterFactory,
StaffFactory,
GlobalStaffFactory,
InstructorFactory,
OrgStaffFactory,
OrgInstructorFactory,
)
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
@attr('shard_1')
class TestViewAuth(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that view authentication works properly.
"""
ACCOUNT_INFO = [('[email protected]', 'foo'), ('[email protected]', 'foo')]
@staticmethod
def _reverse_urls(names, course):
"""
Reverse a list of course urls.
`names` is a list of URL names that correspond to sections in a course.
`course` is the instance of CourseDescriptor whose section URLs are to be returned.
Returns a list URLs corresponding to section in the passed in course.
"""
return [reverse(name, kwargs={'course_id': course.id.to_deprecated_string()})
for name in names]
def _check_non_staff_light(self, course):
"""
Check that non-staff have access to light urls.
`course` is an instance of CourseDescriptor.
"""
urls = [reverse('about_course', kwargs={'course_id': course.id.to_deprecated_string()}),
reverse('courses')]
for url in urls:
self.assert_request_status_code(200, url)
def _check_non_staff_dark(self, course):
"""
Check that non-staff don't have access to dark urls.
"""
names = ['courseware', 'instructor_dashboard', 'progress']
urls = self._reverse_urls(names, course)
urls.extend([
reverse('book', kwargs={'course_id': course.id.to_deprecated_string(),
'book_index': index})
for index, __ in enumerate(course.textbooks)
])
for url in urls:
self.assert_request_status_code(404, url)
def _check_staff(self, course):
"""
Check that access is right for staff in course.
"""
names = ['about_course', 'instructor_dashboard', 'progress']
urls = self._reverse_urls(names, course)
urls.extend([
reverse('book', kwargs={'course_id': course.id.to_deprecated_string(),
'book_index': index})
for index in xrange(len(course.textbooks))
])
for url in urls:
self.assert_request_status_code(200, url)
# The student progress tab is not accessible to a student
# before launch, so the instructor view-as-student feature
# should return a 404 as well.
# TODO (vshnayder): If this is not the behavior we want, will need
# to make access checking smarter and understand both the effective
# user (the student), and the requesting user (the prof)
url = reverse(
'student_progress',
kwargs={
'course_id': course.id.to_deprecated_string(),
'student_id': self.enrolled_user.id,
}
)
self.assert_request_status_code(404, url)
# The courseware url should redirect, not 200
url = self._reverse_urls(['courseware'], course)[0]
self.assert_request_status_code(302, url)
def login(self, user):
return super(TestViewAuth, self).login(user.email, 'test')
def setUp(self):
super(TestViewAuth, self).setUp()
self.course = CourseFactory.create(number='999', display_name='Robot_Super_Course')
self.courseware_chapter = ItemFactory.create(display_name='courseware')
self.overview_chapter = ItemFactory.create(
parent_location=self.course.location,
display_name='Super Overview'
)
self.welcome_section = ItemFactory.create(
parent_location=self.overview_chapter.location,
display_name='Super Welcome'
)
self.welcome_unit = ItemFactory.create(
parent_location=self.welcome_section.location,
display_name='Super Unit'
)
self.course = modulestore().get_course(self.course.id)
self.test_course = CourseFactory.create(org=self.course.id.org)
self.other_org_course = CourseFactory.create(org='Other_Org_Course')
self.sub_courseware_chapter = ItemFactory.create(
parent_location=self.test_course.location,
display_name='courseware'
)
self.sub_overview_chapter = ItemFactory.create(
parent_location=self.sub_courseware_chapter.location,
display_name='Overview'
)
self.sub_welcome_section = ItemFactory.create(
parent_location=self.sub_overview_chapter.location,
display_name='Welcome'
)
self.sub_welcome_unit = ItemFactory.create(
parent_location=self.sub_welcome_section.location,
display_name='New Unit'
)
self.test_course = modulestore().get_course(self.test_course.id)
self.global_staff_user = GlobalStaffFactory()
self.unenrolled_user = UserFactory(last_name="Unenrolled")
self.enrolled_user = UserFactory(last_name="Enrolled")
CourseEnrollmentFactory(user=self.enrolled_user, course_id=self.course.id)
CourseEnrollmentFactory(user=self.enrolled_user, course_id=self.test_course.id)
self.staff_user = StaffFactory(course_key=self.course.id)
self.instructor_user = InstructorFactory(course_key=self.course.id)
self.org_staff_user = OrgStaffFactory(course_key=self.course.id)
self.org_instructor_user = OrgInstructorFactory(course_key=self.course.id)
def test_redirection_unenrolled(self):
"""
Verify unenrolled student is redirected to the 'about' section of the chapter
instead of the 'Welcome' section after clicking on the courseware tab.
"""
self.login(self.unenrolled_user)
response = self.client.get(reverse('courseware',
kwargs={'course_id': self.course.id.to_deprecated_string()}))
self.assertRedirects(
response,
reverse(
'about_course',
args=[self.course.id.to_deprecated_string()]
)
)
def test_redirection_enrolled(self):
"""
Verify enrolled student is redirected to the 'Welcome' section of
the chapter after clicking on the courseware tab.
"""
self.login(self.enrolled_user)
response = self.client.get(
reverse(
'courseware',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
)
self.assertRedirects(
response,
reverse(
'courseware_section',
kwargs={'course_id': self.course.id.to_deprecated_string(),
'chapter': self.overview_chapter.url_name,
'section': self.welcome_section.url_name}
)
)
def test_instructor_page_access_nonstaff(self):
"""
Verify non-staff cannot load the instructor
dashboard, the grade views, and student profile pages.
"""
self.login(self.enrolled_user)
urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),
reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]
# Shouldn't be able to get to the instructor pages
for url in urls:
self.assert_request_status_code(404, url)
def test_staff_course_access(self):
"""
Verify staff can load the staff dashboard, the grade views,
and student profile pages for their course.
"""
self.login(self.staff_user)
# Now should be able to get to self.course, but not self.test_course
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})
self.assert_request_status_code(404, url)
def test_instructor_course_access(self):
"""
Verify instructor can load the instructor dashboard, the grade views,
and student profile pages for their course.
"""
self.login(self.instructor_user)
# Now should be able to get to self.course, but not self.test_course
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})
self.assert_request_status_code(404, url)
def test_org_staff_access(self):
"""
Verify org staff can load the instructor dashboard, the grade views,
and student profile pages for course in their org.
"""
self.login(self.org_staff_user)
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.other_org_course.id.to_deprecated_string()})
self.assert_request_status_code(404, url)
def test_org_instructor_access(self):
"""
Verify org instructor can load the instructor dashboard, the grade views,
and student profile pages for course in their org.
"""
self.login(self.org_instructor_user)
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.other_org_course.id.to_deprecated_string()})
self.assert_request_status_code(404, url)
def test_global_staff_access(self):
"""
Verify the global staff user can access any course.
"""
self.login(self.global_staff_user)
# and now should be able to load both
urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),
reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]
for url in urls:
self.assert_request_status_code(200, url)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_dark_launch_enrolled_student(self):
"""
Make sure that before course start, students can't access course
pages.
"""
# Make courses start in the future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
self.course.start = tomorrow
self.test_course.start = tomorrow
self.course = self.update_course(self.course, self.user.id)
self.test_course = self.update_course(self.test_course, self.user.id)
self.assertFalse(self.course.has_started())
self.assertFalse(self.test_course.has_started())
# First, try with an enrolled student
self.login(self.enrolled_user)
# shouldn't be able to get to anything except the light pages
self._check_non_staff_light(self.course)
self._check_non_staff_dark(self.course)
self._check_non_staff_light(self.test_course)
self._check_non_staff_dark(self.test_course)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_dark_launch_instructor(self):
"""
Make sure that before course start instructors can access the
page for their course.
"""
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
self.course.start = tomorrow
self.test_course.start = tomorrow
self.course = self.update_course(self.course, self.user.id)
self.test_course = self.update_course(self.test_course, self.user.id)
self.login(self.instructor_user)
# Enroll in the classes---can't see courseware otherwise.
self.enroll(self.course, True)
self.enroll(self.test_course, True)
# should now be able to get to everything for self.course
self._check_non_staff_light(self.test_course)
self._check_non_staff_dark(self.test_course)
self._check_staff(self.course)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_dark_launch_global_staff(self):
"""
Make sure that before course start staff can access
course pages.
"""
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
self.course.start = tomorrow
self.test_course.start = tomorrow
self.course = self.update_course(self.course, self.user.id)
self.test_course = self.update_course(self.test_course, self.user.id)
self.login(self.global_staff_user)
self.enroll(self.course, True)
self.enroll(self.test_course, True)
# and now should be able to load both
self._check_staff(self.course)
self._check_staff(self.test_course)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_enrollment_period(self):
"""
Check that enrollment periods work.
"""
# Make courses start in the future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
yesterday = now - datetime.timedelta(days=1)
# self.course's enrollment period hasn't started
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
# test_course course's has
self.test_course.enrollment_start = yesterday
self.test_course.enrollment_end = tomorrow
self.course = self.update_course(self.course, self.user.id)
self.test_course = self.update_course(self.test_course, self.user.id)
# First, try with an enrolled student
self.login(self.unenrolled_user)
self.assertFalse(self.enroll(self.course))
self.assertTrue(self.enroll(self.test_course))
# Then, try as an instructor
self.logout()
self.login(self.instructor_user)
self.assertTrue(self.enroll(self.course))
# Then, try as global staff
self.logout()
self.login(self.global_staff_user)
self.assertTrue(self.enroll(self.course))
@attr('shard_1')
class TestBetatesterAccess(ModuleStoreTestCase, CourseAccessTestMixin):
"""
Tests for the beta tester feature
"""
def setUp(self):
super(TestBetatesterAccess, self).setUp()
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
self.course = CourseFactory(days_early_for_beta=2, start=tomorrow)
self.content = ItemFactory(parent=self.course)
self.normal_student = UserFactory()
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_course_beta_period(self):
"""
Check that beta-test access works for courses.
"""
self.assertFalse(self.course.has_started())
self.assertCannotAccessCourse(self.normal_student, 'load', self.course)
self.assertCanAccessCourse(self.beta_tester, 'load', self.course)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_content_beta_period(self):
"""
Check that beta-test access works for content.
"""
# student user shouldn't see it
self.assertFalse(has_access(self.normal_student, 'load', self.content, self.course.id))
# now the student should see it
self.assertTrue(has_access(self.beta_tester, 'load', self.content, self.course.id))
| agpl-3.0 |
AndrewGrossman/django | tests/template_backends/test_django.py | 199 | 4793 | from template_tests.test_response import test_processor_name
from django.template import RequestContext
from django.template.backends.django import DjangoTemplates
from django.template.library import InvalidTemplateLibrary
from django.test import RequestFactory, ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango110Warning
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
def test_context_has_priority_over_template_context_processors(self):
# See ticket #23789.
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'context_processors': [test_processor_name],
},
})
template = engine.from_string('{{ processors }}')
request = RequestFactory().get('/')
# Check that context processors run
content = template.render({}, request)
self.assertEqual(content, 'yes')
# Check that context overrides context processors
content = template.render({'processors': 'no'}, request)
self.assertEqual(content, 'no')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_request_context_conflicts_with_request(self):
template = self.engine.from_string('hello')
request = RequestFactory().get('/')
request_context = RequestContext(request)
# This doesn't raise an exception.
template.render(request_context, request)
other_request = RequestFactory().get('/')
msg = ("render() was called with a RequestContext and a request "
"argument which refer to different requests. Make sure "
"that the context argument is a dict or at least that "
"the two arguments refer to the same request.")
with self.assertRaisesMessage(ValueError, msg):
template.render(request_context, other_request)
@override_settings(INSTALLED_APPS=['template_backends.apps.good'])
def test_templatetag_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'libraries': {
'alternate': 'template_backends.apps.good.templatetags.good_tags',
'override': 'template_backends.apps.good.templatetags.good_tags',
},
},
})
# libraries are discovered from installed applications
self.assertEqual(
engine.engine.libraries['good_tags'],
'template_backends.apps.good.templatetags.good_tags',
)
self.assertEqual(
engine.engine.libraries['subpackage.tags'],
'template_backends.apps.good.templatetags.subpackage.tags',
)
# libraries are discovered from django.templatetags
self.assertEqual(
engine.engine.libraries['static'],
'django.templatetags.static',
)
# libraries passed in OPTIONS are registered
self.assertEqual(
engine.engine.libraries['alternate'],
'template_backends.apps.good.templatetags.good_tags',
)
# libraries passed in OPTIONS take precedence over discovered ones
self.assertEqual(
engine.engine.libraries['override'],
'template_backends.apps.good.templatetags.good_tags',
)
@override_settings(INSTALLED_APPS=['template_backends.apps.importerror'])
def test_templatetag_discovery_import_error(self):
"""
Import errors in tag modules should be reraised with a helpful message.
"""
with self.assertRaisesMessage(
InvalidTemplateLibrary,
"ImportError raised when trying to load "
"'template_backends.apps.importerror.templatetags.broken_tags'"
):
DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {},
})
def test_builtins_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'builtins': ['template_backends.apps.good.templatetags.good_tags'],
},
})
self.assertEqual(
engine.engine.builtins, [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
'template_backends.apps.good.templatetags.good_tags',
]
)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.