text
stringlengths 4
1.02M
| meta
dict |
---|---|
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._notebook_workspaces_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_database_account_request,
build_list_connection_info_request,
build_regenerate_auth_token_request,
build_start_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NotebookWorkspacesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.cosmosdb.aio.CosmosDBManagementClient`'s
:attr:`notebook_workspaces` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_database_account(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> AsyncIterable["_models.NotebookWorkspace"]:
"""Gets the notebook workspace resources of an existing Cosmos DB account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NotebookWorkspace or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.NotebookWorkspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.NotebookWorkspaceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_database_account_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_database_account.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("NotebookWorkspaceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_database_account.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
**kwargs: Any
) -> _models.NotebookWorkspace:
"""Gets the notebook workspace for a Cosmos DB account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param notebook_workspace_name: The name of the notebook workspace resource. "default"
Required.
:type notebook_workspace_name: str or ~azure.mgmt.cosmosdb.models.NotebookWorkspaceName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NotebookWorkspace or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.NotebookWorkspace
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.NotebookWorkspace]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("NotebookWorkspace", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
notebook_create_update_parameters: Union[_models.NotebookWorkspaceCreateUpdateParameters, IO],
**kwargs: Any
) -> _models.NotebookWorkspace:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.NotebookWorkspace]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(notebook_create_update_parameters, (IO, bytes)):
_content = notebook_create_update_parameters
else:
_json = self._serialize.body(notebook_create_update_parameters, "NotebookWorkspaceCreateUpdateParameters")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("NotebookWorkspace", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}"} # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
notebook_create_update_parameters: _models.NotebookWorkspaceCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.NotebookWorkspace]:
"""Creates the notebook workspace for a Cosmos DB account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param notebook_workspace_name: The name of the notebook workspace resource. "default"
Required.
:type notebook_workspace_name: str or ~azure.mgmt.cosmosdb.models.NotebookWorkspaceName
:param notebook_create_update_parameters: The notebook workspace to create for the current
database account. Required.
:type notebook_create_update_parameters:
~azure.mgmt.cosmosdb.models.NotebookWorkspaceCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NotebookWorkspace or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.NotebookWorkspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
notebook_create_update_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.NotebookWorkspace]:
"""Creates the notebook workspace for a Cosmos DB account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param notebook_workspace_name: The name of the notebook workspace resource. "default"
Required.
:type notebook_workspace_name: str or ~azure.mgmt.cosmosdb.models.NotebookWorkspaceName
:param notebook_create_update_parameters: The notebook workspace to create for the current
database account. Required.
:type notebook_create_update_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NotebookWorkspace or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.NotebookWorkspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
notebook_create_update_parameters: Union[_models.NotebookWorkspaceCreateUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.NotebookWorkspace]:
"""Creates the notebook workspace for a Cosmos DB account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param notebook_workspace_name: The name of the notebook workspace resource. "default"
Required.
:type notebook_workspace_name: str or ~azure.mgmt.cosmosdb.models.NotebookWorkspaceName
:param notebook_create_update_parameters: The notebook workspace to create for the current
database account. Is either a model type or a IO type. Required.
:type notebook_create_update_parameters:
~azure.mgmt.cosmosdb.models.NotebookWorkspaceCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NotebookWorkspace or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.NotebookWorkspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.NotebookWorkspace]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
notebook_create_update_parameters=notebook_create_update_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("NotebookWorkspace", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}"} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the notebook workspace for a Cosmos DB account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param notebook_workspace_name: The name of the notebook workspace resource. "default"
Required.
:type notebook_workspace_name: str or ~azure.mgmt.cosmosdb.models.NotebookWorkspaceName
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}"} # type: ignore
@distributed_trace_async
async def list_connection_info(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
**kwargs: Any
) -> _models.NotebookWorkspaceConnectionInfoResult:
"""Retrieves the connection info for the notebook workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param notebook_workspace_name: The name of the notebook workspace resource. "default"
Required.
:type notebook_workspace_name: str or ~azure.mgmt.cosmosdb.models.NotebookWorkspaceName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NotebookWorkspaceConnectionInfoResult or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.NotebookWorkspaceConnectionInfoResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.NotebookWorkspaceConnectionInfoResult]
request = build_list_connection_info_request(
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_connection_info.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("NotebookWorkspaceConnectionInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connection_info.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}/listConnectionInfo"} # type: ignore
async def _regenerate_auth_token_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_regenerate_auth_token_request(
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._regenerate_auth_token_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_regenerate_auth_token_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}/regenerateAuthToken"} # type: ignore
@distributed_trace_async
async def begin_regenerate_auth_token(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Regenerates the auth token for the notebook workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param notebook_workspace_name: The name of the notebook workspace resource. "default"
Required.
:type notebook_workspace_name: str or ~azure.mgmt.cosmosdb.models.NotebookWorkspaceName
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._regenerate_auth_token_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_regenerate_auth_token.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}/regenerateAuthToken"} # type: ignore
async def _start_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_start_request(
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}/start"} # type: ignore
@distributed_trace_async
async def begin_start(
self,
resource_group_name: str,
account_name: str,
notebook_workspace_name: Union[str, _models.NotebookWorkspaceName],
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts the notebook workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param notebook_workspace_name: The name of the notebook workspace resource. "default"
Required.
:type notebook_workspace_name: str or ~azure.mgmt.cosmosdb.models.NotebookWorkspaceName
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
notebook_workspace_name=notebook_workspace_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}/start"} # type: ignore
| {
"content_hash": "d488a232e49deb6acc819af0e043e6ef",
"timestamp": "",
"source": "github",
"line_count": 914,
"max_line_length": 264,
"avg_line_length": 48.7691466083151,
"alnum_prop": 0.6522938867077959,
"repo_name": "Azure/azure-sdk-for-python",
"id": "28205c6250ea568f9de842ccabac2c6375097994",
"size": "45075",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/aio/operations/_notebook_workspaces_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from frappe.model.document import Document
class WorkflowActionMaster(Document):
pass
| {
"content_hash": "5c2af89d8bf97a11530c6a4e018f7fe1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 17.8,
"alnum_prop": 0.8314606741573034,
"repo_name": "frappe/frappe",
"id": "95c7aa7cee704624e0a3a5385e1c832bcce4252f",
"size": "177",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "frappe/workflow/doctype/workflow_action_master/workflow_action_master.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250850"
},
{
"name": "JavaScript",
"bytes": "2523337"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3618097"
},
{
"name": "SCSS",
"bytes": "261690"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
} |
'''
DEMONYMS
FUNCTIONS OF MAI-ANLP DEMONYM PROJECT
AUTHORS: DAVID SANCHEZ & ALEX PARDO
'''
# IMPORTS
import csv
import re
from string import *
import sys
from nltk import *
import locale
from wikitools import wiki
from wikitools import api
from wikitools import page
from wikitools import category
import numpy as np
from matplotlib import pyplot as plt
# FUNCTIONS
''' EXTRACT PAIRS OF COUNTRY-DEMONYM FROM WP '''
def demonyms_downloadDemonymsWP(filename, skip=False):
locale.setlocale(locale.LC_ALL, '')
lang = 'en'
demonymPages = {'en':u'Demonym'}
wikiAPI = {'en': "http://en.wikipedia.org/w/api.php"}
extractionRules = [
re.compile(u'^\*\[\[([\w]+)\]\] \u2192 ([\w]+)$',re.L),
re.compile(u'^\*\[\[([\w]+)\]\] \u2192 ([\w]+) \([\w]+ \"\'\'[\w]+\'\'\"\)$',re.L),
re.compile(u'^\*\[\[([\w]+)\]\] \u2192 ([\w]+) \([\w|\W]+\)$',re.L),
re.compile(u'^\*\[\[([\w]+)\]\] \u2192 ([\w]+) ([\w|\W]+)$',re.L),
re.compile(u'^\*\[\[([\w]+ [\w]+)\]\] \u2192 ([\w]+ [\w]+)$',re.L),
re.compile(u'^\*\[\[([\w]+ [\w]+)\]\] \u2192 ([\w]+ [\w]+) ([\w|\W]+)$',re.L)]
##inicializing the list of triples to extract
##The elements of each triple are:
## location
## demonym
## identifier of the rule applied (just the index in the list)
##The identifier is useful for computing the coverage of each rule
demonymTriples = []
# An object of the class Wiki associated to the WP is built
site = wiki.Wiki(wikiAPI[lang])
##An object of the class Page associated to the demonyms page
pageDemonym = page.Page(site,demonymPages[lang])
##The lines of the page are read and translated into Unicode
##We print the number of lines
lines=pageDemonym.getWikiText().split('\n')
lines = map(lambda x:demonyms_guessEncoding(x)[0],lines)
len(lines)
for linea in lines:
if skip and ('==Irregular forms==' in linea.encode('utf-8')):
break
for ir in range(len(extractionRules)):
r = extractionRules[ir]
m = re.match(r,linea)
if not m:
#print linea.encode('utf-8')
continue
demonymTriples.append((m.group(1), m.group(2), ir))
break
print len(demonymTriples), 'triples have been obtained '
with open(filename, 'w') as out_file:
for demonym in demonymTriples:
out_file.write(str(demonym[0] + ','+ demonym[1] + ',' + str(demonym[2])+'\n'))
''' PARSES THE RULE FILE AND EXTRACTS ADDING AND REPLACING RULES:
EXAMPLE:
adding rule: Italia + an = Italian (->an)// America + n = American (->n)
replacing rule: Catalonia - onia + an = Catalan (onia->an) // Spain - in + nish = Spanish (in -> nish)
'''
def demonyms_showHistogram(filename, THRESHOLD=1, quiet=True):
count_rules_diff, count_rules_end, total_words, n_end = demonyms_parseFile(filename, quiet)
with open('replace_rules.csv','w') as f:
d1 = {}
for rule in count_rules_diff:
if count_rules_diff[rule] > THRESHOLD:
d1[rule] = count_rules_diff[rule]
tmp = rule.split("-->")
f.write(tmp[0]+','+tmp[1]+'\n')
with open('add_rules.csv','w') as f:
d2 = {}
for rule in count_rules_end:
if count_rules_end[rule] > THRESHOLD:
d2[rule] = count_rules_end[rule]
tmp = rule.split("-->")
f.write(tmp[0]+','+tmp[1]+'\n')
if not quiet:
print "\nFinal rules\n------------\n", 'DIFF:\n', sorted(count_rules_diff.items(), key=lambda x:x[1], reverse=True), 'ENDING:\n', sorted(count_rules_end.items(), key=lambda x:x[1], reverse=True)
X = np.arange(len(d1)+len(d2))
plt.bar(X[:len(d1)], d1.values(), align='center', width=0.5, color='blue', label='SUBSTITUTION')
plt.hold('on')
plt.bar(X[len(d1):], d2.values(), align='center', width=0.5, color='green', label='ENDING->ADDING')
plt.xticks(X, d1.keys()+d2.keys(), rotation='vertical')
ymax = max(d1.values() + d2.values()) + 1
plt.ylim(0, ymax)
plt.legend(loc=2)
plt.savefig('rules.png', bbox_inches='tight')
print '\n\n####################################'
print 'Total number of rules: ', len(count_rules_diff)+len(count_rules_end)
print 'Number of rules (occurences >', str(THRESHOLD)+') : ', len(d1)+len(d2), '\n\n'
''' PARSES THE DEMONYM FILE '''
def demonyms_parseFile(filename, quiet=True):
count_rules_diff={}
count_rules_end={}
total_words = 0
n_end = 4
try:
with open(filename, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
#Read csv lines
for row in data:
total_words += 1
count_rules_diff, count_rules_end, total_words, n_end = demonyms_generateRules(row, count_rules_diff, count_rules_end, total_words, n_end, quiet = quiet, method='diff')
count_rules_diff, count_rules_end, total_words, n_end = demonyms_generateRules(row, count_rules_diff, count_rules_end, total_words, n_end, quiet = quiet, method='ending')
except Exception as ex:
print ex
return count_rules_diff, count_rules_end, total_words, n_end
''' EXTRACTS THE RULE FOR A GIVEN PAIR COUNTRY,DEMONYM'''
def demonyms_generateRules(row, count_rules_diff, count_rules_end, total_words, n_end, quiet=True, method='diff'):
country = row[0]
denomyn = row[1]
addList = [] #List with the new letters in the denonym that you will need to add in the country word
delList = [] #List with the letters that you will need to remove in your contry word
rule = {}
#Iterate over the bigger word (normally it is the denomym)
for i in xrange(0,len(denomyn)):
if i<len(country):
if(country[i]!=denomyn[i]):
delList.append(country[i]) #Letter that needs to be removed
addList.append(denomyn[i]) #Letter that it is not appearing in the country
#Case that your country word is finished but you have letters in the denonym yet
else:
addList.append(denomyn[i])
#Case that you is not needed to remove any letter of the country
if len(delList)==0:
if method == 'diff':
delList.append(" ")
#return
else:
#add a certain number of letters from the ending of the country
for i in range(n_end):
cnt = country[-i:]
den = ''.join(addList)
try:
count_rules_end[cnt+"-->"+den] += 1
except:
count_rules_end[cnt+"-->"+den] = 1
#Prints
if not quiet:
print country
print denomyn
print "Del list:"+str(delList)
print "Add list:"+str(addList)
if method == 'diff':
key = ''.join(delList)
value = ''.join(addList)
if key+"-->"+value in count_rules_diff:
count_rules_diff[key+"-->"+value] += 1
else:
count_rules_diff[key+"-->"+value] = 1
return count_rules_diff, count_rules_end, total_words, n_end
''' DOWNLOADS A LIST OF CITIES AND A LIST OF COUNTRIES FROM WIKIPEDIA '''
def demonyms_parseCitiesAndCountries():
wikiAPI = {'en': "http://en.wikipedia.org/w/api.php"}
site = wiki.Wiki(wikiAPI['en'])
cities = []
countries = []
rule = re.compile(u'.*\[\[([\w\s]+)\]\].*',re.L)
r1 = re.compile(r'.*\[\[((List of )([A-Za-z]{1,}[\s]?)+)\]\].*')
r2 = re.compile(r'.*\[\[([A-Z]{1}([a-z]{1,}[\s]*)+)\]\].*')
re_country = re.compile(r'in\ *t*h*e*\ ([A-Z][a-z]+)')
lists = ['List_of_cities_in_Africa', 'List_of_cities_in_Asia', 'List_of_cities_in_Oceania', 'List_of_cities_in_Europe']
for l in lists:
p = page.Page(site, l, sectionnumber='1')
for line in p.getWikiText().split('\n'):
tmp = r1.findall(line)
if len(tmp) > 0:
link = tmp[0][0]
print link.encode('utf-8')
try:
if link.encode('utf-8').endswith(re_country.findall(link.encode('utf-8'))[0]):
countries.append(re_country.findall(link.encode('utf-8'))[0])
except:
pass
sc = page.Page(site, link, sectionnumber='1')
try:
text = sc.getWikiText().split('\n')
except:
continue
text = map(lambda x:demonyms_guessEncoding(x)[0],text)
#print text
for line in text:
if 'ref' in line:
continue
try:
tmp = rule.findall(line)
if len(tmp) > 0:
if tmp[0] not in cities:
if len(tmp[0].split(' ')) < 2:
cities.append(tmp[0])
except Exception, e:
pass
print len(cities)
with open("cities.csv", 'w') as f:
for city in cities:
f.write(str(city + '\n'))
with open("countries.csv", 'w') as f:
for country in countries:
f.write(str(country + '\n'))
''' GENERATES ALL THE POSSIBLE DEMONYMS FOR A GIVEN PLACE '''
def demonyms_generateDemonym(place, add, replace):
candidates = []
for rule in replace:
if len(rule[0]) > 0 and place.endswith(rule[0]):
candidates.append(place[:-len(rule[0])]+rule[1])
for rule in add:
if len(rule[0]) == 0 or place.endswith(rule[0]):
candidates.append(place+rule[1])
return candidates
''' FINDS THE GIVEN DEMONYM CANDIDATES IN THE WP PAGE OF THE GIVEN PLACE '''
def demonyms_matchCandidates(link, candidates):
wikiAPI = {
'en': "http://en.wikipedia.org/w/api.php"}
site = wiki.Wiki(wikiAPI['en'])
text = page.Page(site, link).getWikiText()
score = 0
rules = [0]*len(candidates)
pos = 0
for candidate in candidates:
if demonyms_findWholeWord(candidate.lower())(text.lower()):
score += 1
rules[pos] += 1
pos += 1
return score, rules
''' FINDS A WHOLE WORD '''
def demonyms_findWholeWord(w):
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
''' EXTRACTS THE RULES FROM THE FILES, READS THE SAMPLES (i.e. PLACES) AND PERFORMS THE MATCHING '''
def demonyms_findDemonyms(filename):
add = []
replace = []
with open('add_rules.csv', 'r') as f:
for line in f.readlines():
line = line.replace('\n','')
tmp = line.split(',')
add.append((tmp[0],tmp[1]))
with open('replace_rules.csv', 'r') as f:
for line in f.readlines():
line = line.replace('\n','')
tmp = line.split(',')
replace.append((tmp[0],tmp[1]))
print 'There are', len(add), 'add rules and ', len(replace), 'replace rules.'
matchings = 0
test_len = 0
output_lines = []
f = open(filename, 'r')
for line in f.readlines():
line = line.replace('\n','')
try:
candidates = demonyms_generateDemonym(line, add, replace)
score, rules = demonyms_matchCandidates(line, candidates)
if score > 0:
matching_rules = []
for r in range(0, len(candidates)):
if rules[r]:
matching_rules.append(candidates[r])
output_lines.append(line + ',' + str(matching_rules)+'\n')
if score > 0:
matchings += 1
test_len += 1
except:
pass
f.close()
with open(filename.replace('.csv', '_res.csv'),'w') as f:
for line in output_lines:
f.write(line)
print '--> Results for', filename.replace('.csv', ''),':\n'
print 'Number of matchings:', matchings, '; number of samples:', test_len, '(',np.round((matchings/float(test_len))*10000)/100.0,'%)'
##For detecting coding and transforming to Unicode
##########################################################################
# Guess Character Encoding
##########################################################################
# adapted from io.py in the docutils extension module (http://docutils.sourceforge.net)
# http://www.pyzine.com/Issue008/Section_Articles/article_Encodings.html
def demonyms_guessEncoding(data):
"""
Given a byte string, attempt to decode it.
Tries the standard 'UTF8' and 'latin-1' encodings,
Plus several gathered from locale information.
The calling program *must* first call::
locale.setlocale(locale.LC_ALL, '')
If successful it returns C{(decoded_unicode, successful_encoding)}.
If unsuccessful it raises a C{UnicodeError}.
"""
successful_encoding = None
# we make 'utf-8' the first encoding
encodings = ['utf-8']
#
# next we add anything we can learn from the locale
try:
encodings.append(locale.nl_langinfo(locale.CODESET))
except AttributeError:
pass
try:
encodings.append(locale.getlocale()[1])
except (AttributeError, IndexError):
pass
try:
encodings.append(locale.getdefaultlocale()[1])
except (AttributeError, IndexError):
pass
#
# we try 'latin-1' last
encodings.append('latin-1')
for enc in encodings:
# some of the locale calls
# may have returned None
if not enc:
continue
try:
decoded = unicode(data, enc)
successful_encoding = enc
except (UnicodeError, LookupError):
pass
else:
break
if not successful_encoding:
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: %s.'
% ', '.join([repr(enc) for enc in encodings if enc]))
else:
return (decoded, successful_encoding)
| {
"content_hash": "1adc0d88f4efb509aa2676a5c01e2513",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 197,
"avg_line_length": 29.889908256880734,
"alnum_prop": 0.5950736648250461,
"repo_name": "alex-pardo/ANLP-PROJECT",
"id": "063c29df5f6be0f3323433e35ab05130f62366e5",
"size": "13034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36843"
},
{
"name": "TeX",
"bytes": "18370"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import json
import itertools
from collections import defaultdict
from scipy.optimize import linear_sum_assignment
from ..utils import TrackEvalException
from ._base_dataset import _BaseDataset
from .. import utils
from .. import _timing
class TAO_OW(_BaseDataset):
"""Dataset class for TAO tracking"""
@staticmethod
def get_default_dataset_config():
"""Default class config values"""
code_path = utils.get_code_path()
default_config = {
'GT_FOLDER': os.path.join(code_path, 'data/gt/tao/tao_training'), # Location of GT data
'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/tao/tao_training'), # Trackers location
'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes)
'SPLIT_TO_EVAL': 'training', # Valid: 'training', 'val'
'PRINT_CONFIG': True, # Whether to print current config
'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
'MAX_DETECTIONS': 300, # Number of maximal allowed detections per image (0 for unlimited)
'SUBSET': 'all'
}
return default_config
def __init__(self, config=None):
"""Initialise dataset, checking that all required files are present"""
super().__init__()
# Fill non-given config values with defaults
self.config = utils.init_config(config, self.get_default_dataset_config(), self.get_name())
self.gt_fol = self.config['GT_FOLDER']
self.tracker_fol = self.config['TRACKERS_FOLDER']
self.should_classes_combine = True
self.use_super_categories = False
self.tracker_sub_fol = self.config['TRACKER_SUB_FOLDER']
self.output_fol = self.config['OUTPUT_FOLDER']
if self.output_fol is None:
self.output_fol = self.tracker_fol
self.output_sub_fol = self.config['OUTPUT_SUB_FOLDER']
gt_dir_files = [file for file in os.listdir(self.gt_fol) if file.endswith('.json')]
if len(gt_dir_files) != 1:
raise TrackEvalException(self.gt_fol + ' does not contain exactly one json file.')
with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
self.gt_data = json.load(f)
self.subset = self.config['SUBSET']
if self.subset != 'all':
# Split GT data into `known`, `unknown` or `distractor`
self._split_known_unknown_distractor()
self.gt_data = self._filter_gt_data(self.gt_data)
# merge categories marked with a merged tag in TAO dataset
self._merge_categories(self.gt_data['annotations'] + self.gt_data['tracks'])
# Get sequences to eval and sequence information
self.seq_list = [vid['name'].replace('/', '-') for vid in self.gt_data['videos']]
self.seq_name_to_seq_id = {vid['name'].replace('/', '-'): vid['id'] for vid in self.gt_data['videos']}
# compute mappings from videos to annotation data
self.videos_to_gt_tracks, self.videos_to_gt_images = self._compute_vid_mappings(self.gt_data['annotations'])
# compute sequence lengths
self.seq_lengths = {vid['id']: 0 for vid in self.gt_data['videos']}
for img in self.gt_data['images']:
self.seq_lengths[img['video_id']] += 1
self.seq_to_images_to_timestep = self._compute_image_to_timestep_mappings()
self.seq_to_classes = {vid['id']: {'pos_cat_ids': list({track['category_id'] for track
in self.videos_to_gt_tracks[vid['id']]}),
'neg_cat_ids': vid['neg_category_ids'],
'not_exhaustively_labeled_cat_ids': vid['not_exhaustive_category_ids']}
for vid in self.gt_data['videos']}
# Get classes to eval
considered_vid_ids = [self.seq_name_to_seq_id[vid] for vid in self.seq_list]
seen_cats = set([cat_id for vid_id in considered_vid_ids for cat_id
in self.seq_to_classes[vid_id]['pos_cat_ids']])
# only classes with ground truth are evaluated in TAO
self.valid_classes = [cls['name'] for cls in self.gt_data['categories'] if cls['id'] in seen_cats]
# cls_name_to_cls_id_map = {cls['name']: cls['id'] for cls in self.gt_data['categories']}
if self.config['CLASSES_TO_EVAL']:
# self.class_list = [cls.lower() if cls.lower() in self.valid_classes else None
# for cls in self.config['CLASSES_TO_EVAL']]
self.class_list = ["object"] # class-agnostic
if not all(self.class_list):
raise TrackEvalException('Attempted to evaluate an invalid class. Only classes ' +
', '.join(self.valid_classes) +
' are valid (classes present in ground truth data).')
else:
# self.class_list = [cls for cls in self.valid_classes]
self.class_list = ["object"] # class-agnostic
# self.class_name_to_class_id = {k: v for k, v in cls_name_to_cls_id_map.items() if k in self.class_list}
self.class_name_to_class_id = {"object": 1} # class-agnostic
# Get trackers to eval
if self.config['TRACKERS_TO_EVAL'] is None:
self.tracker_list = os.listdir(self.tracker_fol)
else:
self.tracker_list = self.config['TRACKERS_TO_EVAL']
if self.config['TRACKER_DISPLAY_NAMES'] is None:
self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
elif (self.config['TRACKERS_TO_EVAL'] is not None) and (
len(self.config['TRACKER_DISPLAY_NAMES']) == len(self.tracker_list)):
self.tracker_to_disp = dict(zip(self.tracker_list, self.config['TRACKER_DISPLAY_NAMES']))
else:
raise TrackEvalException('List of tracker files and tracker display names do not match.')
self.tracker_data = {tracker: dict() for tracker in self.tracker_list}
for tracker in self.tracker_list:
tr_dir_files = [file for file in os.listdir(os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol))
if file.endswith('.json')]
if len(tr_dir_files) != 1:
raise TrackEvalException(os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
+ ' does not contain exactly one json file.')
with open(os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol, tr_dir_files[0])) as f:
curr_data = json.load(f)
# limit detections if MAX_DETECTIONS > 0
if self.config['MAX_DETECTIONS']:
curr_data = self._limit_dets_per_image(curr_data)
# fill missing video ids
self._fill_video_ids_inplace(curr_data)
# make track ids unique over whole evaluation set
self._make_track_ids_unique(curr_data)
# merge categories marked with a merged tag in TAO dataset
self._merge_categories(curr_data)
# get tracker sequence information
curr_videos_to_tracker_tracks, curr_videos_to_tracker_images = self._compute_vid_mappings(curr_data)
self.tracker_data[tracker]['vids_to_tracks'] = curr_videos_to_tracker_tracks
self.tracker_data[tracker]['vids_to_images'] = curr_videos_to_tracker_images
def get_display_name(self, tracker):
return self.tracker_to_disp[tracker]
def _load_raw_file(self, tracker, seq, is_gt):
"""Load a file (gt or tracker) in the TAO format
If is_gt, this returns a dict which contains the fields:
[gt_ids, gt_classes] : list (for each timestep) of 1D NDArrays (for each det).
[gt_dets]: list (for each timestep) of lists of detections.
[classes_to_gt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
keys and corresponding segmentations as values) for each track
[classes_to_gt_track_ids, classes_to_gt_track_areas, classes_to_gt_track_lengths]: dictionary with class values
as keys and lists (for each track) as values
if not is_gt, this returns a dict which contains the fields:
[tracker_ids, tracker_classes, tracker_confidences] : list (for each timestep) of 1D NDArrays (for each det).
[tracker_dets]: list (for each timestep) of lists of detections.
[classes_to_dt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
keys and corresponding segmentations as values) for each track
[classes_to_dt_track_ids, classes_to_dt_track_areas, classes_to_dt_track_lengths]: dictionary with class values
as keys and lists as values
[classes_to_dt_track_scores]: dictionary with class values as keys and 1D numpy arrays as values
"""
seq_id = self.seq_name_to_seq_id[seq]
# File location
if is_gt:
imgs = self.videos_to_gt_images[seq_id]
else:
imgs = self.tracker_data[tracker]['vids_to_images'][seq_id]
# Convert data to required format
num_timesteps = self.seq_lengths[seq_id]
img_to_timestep = self.seq_to_images_to_timestep[seq_id]
data_keys = ['ids', 'classes', 'dets']
if not is_gt:
data_keys += ['tracker_confidences']
raw_data = {key: [None] * num_timesteps for key in data_keys}
for img in imgs:
# some tracker data contains images without any ground truth information, these are ignored
try:
t = img_to_timestep[img['id']]
except KeyError:
continue
annotations = img['annotations']
raw_data['dets'][t] = np.atleast_2d([ann['bbox'] for ann in annotations]).astype(float)
raw_data['ids'][t] = np.atleast_1d([ann['track_id'] for ann in annotations]).astype(int)
raw_data['classes'][t] = np.atleast_1d([1 for _ in annotations]).astype(int) # class-agnostic
if not is_gt:
raw_data['tracker_confidences'][t] = np.atleast_1d([ann['score'] for ann in annotations]).astype(float)
for t, d in enumerate(raw_data['dets']):
if d is None:
raw_data['dets'][t] = np.empty((0, 4)).astype(float)
raw_data['ids'][t] = np.empty(0).astype(int)
raw_data['classes'][t] = np.empty(0).astype(int)
if not is_gt:
raw_data['tracker_confidences'][t] = np.empty(0)
if is_gt:
key_map = {'ids': 'gt_ids',
'classes': 'gt_classes',
'dets': 'gt_dets'}
else:
key_map = {'ids': 'tracker_ids',
'classes': 'tracker_classes',
'dets': 'tracker_dets'}
for k, v in key_map.items():
raw_data[v] = raw_data.pop(k)
# all_classes = [self.class_name_to_class_id[cls] for cls in self.class_list]
all_classes = [1] # class-agnostic
if is_gt:
classes_to_consider = all_classes
all_tracks = self.videos_to_gt_tracks[seq_id]
else:
# classes_to_consider = self.seq_to_classes[seq_id]['pos_cat_ids'] \
# + self.seq_to_classes[seq_id]['neg_cat_ids']
classes_to_consider = all_classes # class-agnostic
all_tracks = self.tracker_data[tracker]['vids_to_tracks'][seq_id]
# classes_to_tracks = {cls: [track for track in all_tracks if track['category_id'] == cls]
# if cls in classes_to_consider else [] for cls in all_classes}
classes_to_tracks = {cls: [track for track in all_tracks]
if cls in classes_to_consider else [] for cls in all_classes} # class-agnostic
# mapping from classes to track information
raw_data['classes_to_tracks'] = {cls: [{det['image_id']: np.atleast_1d(det['bbox'])
for det in track['annotations']} for track in tracks]
for cls, tracks in classes_to_tracks.items()}
raw_data['classes_to_track_ids'] = {cls: [track['id'] for track in tracks]
for cls, tracks in classes_to_tracks.items()}
raw_data['classes_to_track_areas'] = {cls: [track['area'] for track in tracks]
for cls, tracks in classes_to_tracks.items()}
raw_data['classes_to_track_lengths'] = {cls: [len(track['annotations']) for track in tracks]
for cls, tracks in classes_to_tracks.items()}
if not is_gt:
raw_data['classes_to_dt_track_scores'] = {cls: np.array([np.mean([float(x['score'])
for x in track['annotations']])
for track in tracks])
for cls, tracks in classes_to_tracks.items()}
if is_gt:
key_map = {'classes_to_tracks': 'classes_to_gt_tracks',
'classes_to_track_ids': 'classes_to_gt_track_ids',
'classes_to_track_lengths': 'classes_to_gt_track_lengths',
'classes_to_track_areas': 'classes_to_gt_track_areas'}
else:
key_map = {'classes_to_tracks': 'classes_to_dt_tracks',
'classes_to_track_ids': 'classes_to_dt_track_ids',
'classes_to_track_lengths': 'classes_to_dt_track_lengths',
'classes_to_track_areas': 'classes_to_dt_track_areas'}
for k, v in key_map.items():
raw_data[v] = raw_data.pop(k)
raw_data['num_timesteps'] = num_timesteps
raw_data['neg_cat_ids'] = self.seq_to_classes[seq_id]['neg_cat_ids']
raw_data['not_exhaustively_labeled_cls'] = self.seq_to_classes[seq_id]['not_exhaustively_labeled_cat_ids']
raw_data['seq'] = seq
return raw_data
@_timing.time
def get_preprocessed_seq_data(self, raw_data, cls):
""" Preprocess data for a single sequence for a single class ready for evaluation.
Inputs:
- raw_data is a dict containing the data for the sequence already read in by get_raw_seq_data().
- cls is the class to be evaluated.
Outputs:
- data is a dict containing all of the information that metrics need to perform evaluation.
It contains the following fields:
[num_timesteps, num_gt_ids, num_tracker_ids, num_gt_dets, num_tracker_dets] : integers.
[gt_ids, tracker_ids, tracker_confidences]: list (for each timestep) of 1D NDArrays (for each det).
[gt_dets, tracker_dets]: list (for each timestep) of lists of detections.
[similarity_scores]: list (for each timestep) of 2D NDArrays.
Notes:
General preprocessing (preproc) occurs in 4 steps. Some datasets may not use all of these steps.
1) Extract only detections relevant for the class to be evaluated (including distractor detections).
2) Match gt dets and tracker dets. Remove tracker dets that are matched to a gt det that is of a
distractor class, or otherwise marked as to be removed.
3) Remove unmatched tracker dets if they fall within a crowd ignore region or don't meet a certain
other criteria (e.g. are too small).
4) Remove gt dets that were only useful for preprocessing and not for actual evaluation.
After the above preprocessing steps, this function also calculates the number of gt and tracker detections
and unique track ids. It also relabels gt and tracker ids to be contiguous and checks that ids are
unique within each timestep.
TAO:
In TAO, the 4 preproc steps are as follow:
1) All classes present in the ground truth data are evaluated separately.
2) No matched tracker detections are removed.
3) Unmatched tracker detections are removed if there is not ground truth data and the class does not
belong to the categories marked as negative for this sequence. Additionally, unmatched tracker
detections for classes which are marked as not exhaustively labeled are removed.
4) No gt detections are removed.
Further, for TrackMAP computation track representations for the given class are accessed from a dictionary
and the tracks from the tracker data are sorted according to the tracker confidence.
"""
cls_id = self.class_name_to_class_id[cls]
is_not_exhaustively_labeled = cls_id in raw_data['not_exhaustively_labeled_cls']
is_neg_category = cls_id in raw_data['neg_cat_ids']
data_keys = ['gt_ids', 'tracker_ids', 'gt_dets', 'tracker_dets', 'tracker_confidences', 'similarity_scores']
data = {key: [None] * raw_data['num_timesteps'] for key in data_keys}
unique_gt_ids = []
unique_tracker_ids = []
num_gt_dets = 0
num_tracker_dets = 0
for t in range(raw_data['num_timesteps']):
# Only extract relevant dets for this class for preproc and eval (cls)
gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id)
gt_class_mask = gt_class_mask.astype(np.bool)
gt_ids = raw_data['gt_ids'][t][gt_class_mask]
gt_dets = raw_data['gt_dets'][t][gt_class_mask]
tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id)
tracker_class_mask = tracker_class_mask.astype(np.bool)
tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask]
tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask]
tracker_confidences = raw_data['tracker_confidences'][t][tracker_class_mask]
similarity_scores = raw_data['similarity_scores'][t][gt_class_mask, :][:, tracker_class_mask]
# Match tracker and gt dets (with hungarian algorithm).
unmatched_indices = np.arange(tracker_ids.shape[0])
if gt_ids.shape[0] > 0 and tracker_ids.shape[0] > 0:
matching_scores = similarity_scores.copy()
matching_scores[matching_scores < 0.5 - np.finfo('float').eps] = 0
match_rows, match_cols = linear_sum_assignment(-matching_scores)
actually_matched_mask = matching_scores[match_rows, match_cols] > 0 + np.finfo('float').eps
match_cols = match_cols[actually_matched_mask]
unmatched_indices = np.delete(unmatched_indices, match_cols, axis=0)
if gt_ids.shape[0] == 0 and not is_neg_category:
to_remove_tracker = unmatched_indices
elif is_not_exhaustively_labeled:
to_remove_tracker = unmatched_indices
else:
to_remove_tracker = np.array([], dtype=np.int)
# remove all unwanted unmatched tracker detections
data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0)
data['tracker_dets'][t] = np.delete(tracker_dets, to_remove_tracker, axis=0)
data['tracker_confidences'][t] = np.delete(tracker_confidences, to_remove_tracker, axis=0)
similarity_scores = np.delete(similarity_scores, to_remove_tracker, axis=1)
data['gt_ids'][t] = gt_ids
data['gt_dets'][t] = gt_dets
data['similarity_scores'][t] = similarity_scores
unique_gt_ids += list(np.unique(data['gt_ids'][t]))
unique_tracker_ids += list(np.unique(data['tracker_ids'][t]))
num_tracker_dets += len(data['tracker_ids'][t])
num_gt_dets += len(data['gt_ids'][t])
# Re-label IDs such that there are no empty IDs
if len(unique_gt_ids) > 0:
unique_gt_ids = np.unique(unique_gt_ids)
gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
for t in range(raw_data['num_timesteps']):
if len(data['gt_ids'][t]) > 0:
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)
if len(unique_tracker_ids) > 0:
unique_tracker_ids = np.unique(unique_tracker_ids)
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
for t in range(raw_data['num_timesteps']):
if len(data['tracker_ids'][t]) > 0:
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)
# Record overview statistics.
data['num_tracker_dets'] = num_tracker_dets
data['num_gt_dets'] = num_gt_dets
data['num_tracker_ids'] = len(unique_tracker_ids)
data['num_gt_ids'] = len(unique_gt_ids)
data['num_timesteps'] = raw_data['num_timesteps']
data['seq'] = raw_data['seq']
# get track representations
data['gt_tracks'] = raw_data['classes_to_gt_tracks'][cls_id]
data['gt_track_ids'] = raw_data['classes_to_gt_track_ids'][cls_id]
data['gt_track_lengths'] = raw_data['classes_to_gt_track_lengths'][cls_id]
data['gt_track_areas'] = raw_data['classes_to_gt_track_areas'][cls_id]
data['dt_tracks'] = raw_data['classes_to_dt_tracks'][cls_id]
data['dt_track_ids'] = raw_data['classes_to_dt_track_ids'][cls_id]
data['dt_track_lengths'] = raw_data['classes_to_dt_track_lengths'][cls_id]
data['dt_track_areas'] = raw_data['classes_to_dt_track_areas'][cls_id]
data['dt_track_scores'] = raw_data['classes_to_dt_track_scores'][cls_id]
data['not_exhaustively_labeled'] = is_not_exhaustively_labeled
data['iou_type'] = 'bbox'
# sort tracker data tracks by tracker confidence scores
if data['dt_tracks']:
idx = np.argsort([-score for score in data['dt_track_scores']], kind="mergesort")
data['dt_track_scores'] = [data['dt_track_scores'][i] for i in idx]
data['dt_tracks'] = [data['dt_tracks'][i] for i in idx]
data['dt_track_ids'] = [data['dt_track_ids'][i] for i in idx]
data['dt_track_lengths'] = [data['dt_track_lengths'][i] for i in idx]
data['dt_track_areas'] = [data['dt_track_areas'][i] for i in idx]
# Ensure that ids are unique per timestep.
self._check_unique_ids(data)
return data
def _calculate_similarities(self, gt_dets_t, tracker_dets_t):
similarity_scores = self._calculate_box_ious(gt_dets_t, tracker_dets_t)
return similarity_scores
def _merge_categories(self, annotations):
"""
Merges categories with a merged tag. Adapted from https://github.com/TAO-Dataset
:param annotations: the annotations in which the classes should be merged
:return: None
"""
merge_map = {}
for category in self.gt_data['categories']:
if 'merged' in category:
for to_merge in category['merged']:
merge_map[to_merge['id']] = category['id']
for ann in annotations:
ann['category_id'] = merge_map.get(ann['category_id'], ann['category_id'])
def _compute_vid_mappings(self, annotations):
"""
Computes mappings from Videos to corresponding tracks and images.
:param annotations: the annotations for which the mapping should be generated
:return: the video-to-track-mapping, the video-to-image-mapping
"""
vids_to_tracks = {}
vids_to_imgs = {}
vid_ids = [vid['id'] for vid in self.gt_data['videos']]
# compute an mapping from image IDs to images
images = {}
for image in self.gt_data['images']:
images[image['id']] = image
for ann in annotations:
ann["area"] = ann["bbox"][2] * ann["bbox"][3]
vid = ann["video_id"]
if ann["video_id"] not in vids_to_tracks.keys():
vids_to_tracks[ann["video_id"]] = list()
if ann["video_id"] not in vids_to_imgs.keys():
vids_to_imgs[ann["video_id"]] = list()
# Fill in vids_to_tracks
tid = ann["track_id"]
exist_tids = [track["id"] for track in vids_to_tracks[vid]]
try:
index1 = exist_tids.index(tid)
except ValueError:
index1 = -1
if tid not in exist_tids:
curr_track = {"id": tid, "category_id": ann['category_id'],
"video_id": vid, "annotations": [ann]}
vids_to_tracks[vid].append(curr_track)
else:
vids_to_tracks[vid][index1]["annotations"].append(ann)
# Fill in vids_to_imgs
img_id = ann['image_id']
exist_img_ids = [img["id"] for img in vids_to_imgs[vid]]
try:
index2 = exist_img_ids.index(img_id)
except ValueError:
index2 = -1
if index2 == -1:
curr_img = {"id": img_id, "annotations": [ann]}
vids_to_imgs[vid].append(curr_img)
else:
vids_to_imgs[vid][index2]["annotations"].append(ann)
# sort annotations by frame index and compute track area
for vid, tracks in vids_to_tracks.items():
for track in tracks:
track["annotations"] = sorted(
track['annotations'],
key=lambda x: images[x['image_id']]['frame_index'])
# Computer average area
track["area"] = (sum(x['area'] for x in track['annotations']) / len(track['annotations']))
# Ensure all videos are present
for vid_id in vid_ids:
if vid_id not in vids_to_tracks.keys():
vids_to_tracks[vid_id] = []
if vid_id not in vids_to_imgs.keys():
vids_to_imgs[vid_id] = []
return vids_to_tracks, vids_to_imgs
def _compute_image_to_timestep_mappings(self):
"""
Computes a mapping from images to the corresponding timestep in the sequence.
:return: the image-to-timestep-mapping
"""
images = {}
for image in self.gt_data['images']:
images[image['id']] = image
seq_to_imgs_to_timestep = {vid['id']: dict() for vid in self.gt_data['videos']}
for vid in seq_to_imgs_to_timestep:
curr_imgs = [img['id'] for img in self.videos_to_gt_images[vid]]
curr_imgs = sorted(curr_imgs, key=lambda x: images[x]['frame_index'])
seq_to_imgs_to_timestep[vid] = {curr_imgs[i]: i for i in range(len(curr_imgs))}
return seq_to_imgs_to_timestep
def _limit_dets_per_image(self, annotations):
"""
Limits the number of detections for each image to config['MAX_DETECTIONS']. Adapted from
https://github.com/TAO-Dataset/
:param annotations: the annotations in which the detections should be limited
:return: the annotations with limited detections
"""
max_dets = self.config['MAX_DETECTIONS']
img_ann = defaultdict(list)
for ann in annotations:
img_ann[ann["image_id"]].append(ann)
for img_id, _anns in img_ann.items():
if len(_anns) <= max_dets:
continue
_anns = sorted(_anns, key=lambda x: x["score"], reverse=True)
img_ann[img_id] = _anns[:max_dets]
return [ann for anns in img_ann.values() for ann in anns]
def _fill_video_ids_inplace(self, annotations):
"""
Fills in missing video IDs inplace. Adapted from https://github.com/TAO-Dataset/
:param annotations: the annotations for which the videos IDs should be filled inplace
:return: None
"""
missing_video_id = [x for x in annotations if 'video_id' not in x]
if missing_video_id:
image_id_to_video_id = {
x['id']: x['video_id'] for x in self.gt_data['images']
}
for x in missing_video_id:
x['video_id'] = image_id_to_video_id[x['image_id']]
@staticmethod
def _make_track_ids_unique(annotations):
"""
Makes the track IDs unqiue over the whole annotation set. Adapted from https://github.com/TAO-Dataset/
:param annotations: the annotation set
:return: the number of updated IDs
"""
track_id_videos = {}
track_ids_to_update = set()
max_track_id = 0
for ann in annotations:
t = ann['track_id']
if t not in track_id_videos:
track_id_videos[t] = ann['video_id']
if ann['video_id'] != track_id_videos[t]:
# Track id is assigned to multiple videos
track_ids_to_update.add(t)
max_track_id = max(max_track_id, t)
if track_ids_to_update:
print('true')
next_id = itertools.count(max_track_id + 1)
new_track_ids = defaultdict(lambda: next(next_id))
for ann in annotations:
t = ann['track_id']
v = ann['video_id']
if t in track_ids_to_update:
ann['track_id'] = new_track_ids[t, v]
return len(track_ids_to_update)
def _split_known_unknown_distractor(self):
all_ids = set([i for i in range(1, 2000)]) # 2000 is larger than the max category id in TAO-OW.
# `knowns` includes 78 TAO_category_ids that corresponds to 78 COCO classes.
# (The other 2 COCO classes do not have corresponding classes in TAO).
self.knowns = {4, 13, 1038, 544, 1057, 34, 35, 36, 41, 45, 58, 60, 579, 1091, 1097, 1099, 78, 79, 81, 91, 1115,
1117, 95, 1122, 99, 1132, 621, 1135, 625, 118, 1144, 126, 642, 1155, 133, 1162, 139, 154, 174, 185,
699, 1215, 714, 717, 1229, 211, 729, 221, 229, 747, 235, 237, 779, 276, 805, 299, 829, 852, 347,
371, 382, 896, 392, 926, 937, 428, 429, 961, 452, 979, 980, 982, 475, 480, 993, 1001, 502, 1018}
# `distractors` is defined as in the paper "Opening up Open-World Tracking"
self.distractors = {20, 63, 108, 180, 188, 204, 212, 247, 303, 403, 407, 415, 490, 504, 507, 513, 529, 567,
569, 588, 672, 691, 702, 708, 711, 720, 736, 737, 798, 813, 815, 827, 831, 851, 877, 883,
912, 971, 976, 1130, 1133, 1134, 1169, 1184, 1220}
self.unknowns = all_ids.difference(self.knowns.union(self.distractors))
def _filter_gt_data(self, raw_gt_data):
"""
Filter out irrelevant data in the raw_gt_data
Args:
raw_gt_data: directly loaded from json.
Returns:
filtered gt_data
"""
valid_cat_ids = list()
if self.subset == "known":
valid_cat_ids = self.knowns
elif self.subset == "distractor":
valid_cat_ids = self.distractors
elif self.subset == "unknown":
valid_cat_ids = self.unknowns
# elif self.subset == "test_only_unknowns":
# valid_cat_ids = test_only_unknowns
else:
raise Exception("The parameter `SUBSET` is incorrect")
filtered = dict()
filtered["videos"] = raw_gt_data["videos"]
# filtered["videos"] = list()
unwanted_vid = set()
# for video in raw_gt_data["videos"]:
# datasrc = video["name"].split('/')[1]
# if datasrc in data_srcs:
# filtered["videos"].append(video)
# else:
# unwanted_vid.add(video["id"])
filtered["annotations"] = list()
for ann in raw_gt_data["annotations"]:
if (ann["video_id"] not in unwanted_vid) and (ann["category_id"] in valid_cat_ids):
filtered["annotations"].append(ann)
filtered["tracks"] = list()
for track in raw_gt_data["tracks"]:
if (track["video_id"] not in unwanted_vid) and (track["category_id"] in valid_cat_ids):
filtered["tracks"].append(track)
filtered["images"] = list()
for image in raw_gt_data["images"]:
if image["video_id"] not in unwanted_vid:
filtered["images"].append(image)
filtered["categories"] = list()
for cat in raw_gt_data["categories"]:
if cat["id"] in valid_cat_ids:
filtered["categories"].append(cat)
filtered["info"] = raw_gt_data["info"]
filtered["licenses"] = raw_gt_data["licenses"]
return filtered
| {
"content_hash": "8f0d67030a40cea6590374d529fdbcfe",
"timestamp": "",
"source": "github",
"line_count": 652,
"max_line_length": 120,
"avg_line_length": 52.035276073619634,
"alnum_prop": 0.5707548560143838,
"repo_name": "JonathonLuiten/TrackEval",
"id": "40f80d7876ed9f27fb108b732315c4f8c6fc0984",
"size": "33927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trackeval/datasets/tao_ow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "625196"
}
],
"symlink_target": ""
} |
import os
hostname = "www.google.com" #example
response = os.system("ping -c 1 " + hostname)
#and then check the response...
if response == 0:
print (hostname, 'is up!')
else:
print (hostname, 'is down!')
| {
"content_hash": "478d0cfacf65a142a317e4b28e021f33",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 21.1,
"alnum_prop": 0.6587677725118484,
"repo_name": "mikestebbins/openapsdev",
"id": "e17754fa2e89e952fdaaa2b8af14dbf738d6278a",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/ping_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17852"
},
{
"name": "Shell",
"bytes": "4439"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
import sys
sys.exit(pytest.main(self.test_args))
setup(
name='disqus-python',
version='0.4.2',
author='DISQUS',
author_email='[email protected]',
url='https://github.com/disqus/disqus-python',
description='Disqus API Bindings',
packages=find_packages(),
zip_safe=False,
license='Apache License 2.0',
install_requires=[],
setup_requires=[],
tests_require=[
'pytest',
'mock',
],
cmdclass={'test': PyTest},
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
'Topic :: Software Development',
],
)
| {
"content_hash": "43f191bf4cc5294d1ed731939db8233c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 55,
"avg_line_length": 29.224489795918366,
"alnum_prop": 0.6075418994413407,
"repo_name": "disqus/disqus-python",
"id": "61dfdd89ee6ed944cf64c9b004fb8bf3fbc95c9c",
"size": "1455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "23832"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import datetime as dt
import html
import random
import re
import typing as t
import aiosql
import bbcode
from htmlslacker import HTMLSlacker
from psycopg2.extensions import connection
from gargbot_3000 import config
forum_queries = aiosql.from_path("sql/post.sql", "psycopg2")
msn_queries = aiosql.from_path("sql/message.sql", "psycopg2")
def _sanitize_post(inp, bbcode_uid: str):
smls = re.compile(
r'<!-- s.*? --><img src=\\?"\{SMILIES_PATH\}/.*?\\?" '
'alt=\\?"(.*?)\\?" title=\\?".*?" /><!-- s.*? -->'
)
inp = re.sub(smls, r"\1", inp)
inp = html.unescape(inp)
inp = inp.replace(":" + bbcode_uid, "")
img_tags = re.compile(r"\[/?img\]")
inp = re.sub(img_tags, "", inp)
youtube_embeds = re.compile(
r'\[html\]<iframe width="\d+" height="\d+" '
'src="//www.youtube.com/embed/([^"]+)" frameborder='
r'"0" allowfullscreen></iframe>\[/html\]'
)
inp = re.sub(youtube_embeds, r"https://www.youtube.com/watch?v=\1", inp)
inp = bbcode.render_html(
inp, drop_unrecognized=True, escape_html=False, replace_links=False
)
inp = inp.replace('rel="nofollow"', "")
inp = HTMLSlacker(inp).get_output()
return inp
def forum(
conn: connection, args: t.Optional[list[str]]
) -> tuple[str, str, str, dt.datetime, str, str]:
user = args[0] if args else None
desc = " "
post = None
if user:
post = forum_queries.post_for_user(conn, slack_nick=user)
if not post:
desc = (
f"Gargling not found: {user}. Husk å bruke slack nick. "
"Her er et tilfeldig quote i stedet."
)
if not post:
post = forum_queries.random_post(conn)
text = _sanitize_post(post["content"], post["bbcode_uid"])
avatarurl = f"{config.forum_url}/download/file.php?avatar={post['avatar']}".strip()
url = f"{config.forum_url}/viewtopic.php?p={post['id']}#p{post['id']}"
return (text, post["slack_nick"], avatarurl, post["posted_at"], url, desc)
def msn(
conn: connection, args: t.Optional[list[str]]
) -> tuple[dt.datetime, list, t.Optional[str]]:
user = args[0] if args else None
desc = None
messages = None
if user:
messages = msn_queries.message_session_for_user_id(conn, slack_nick=user)
if messages:
first = next(i for i, message in enumerate(messages) if message["is_user"])
else:
desc = (
f"Gargling not found: {user}. Husk å bruke slack nick. "
"Her er en tilfeldig samtale i stedet."
)
if not messages:
messages = msn_queries.random_message_session(conn)
if len(messages) <= 10:
first = 0
else: # no test coverage
first = random.randint(0, len(messages) - 10)
conversation = messages[first : first + 10]
date = conversation[0]["sent_at"].strftime("%d.%m.%y %H:%M")
squashed: list[list[str]] = []
for message in conversation:
if squashed:
prev_from_user, prev_content, prev_color = squashed[-1]
if message["from_user"] == prev_from_user: # no test coverage
squashed[-1][1] = "\n".join([prev_content, message["content"]])
continue
squashed.append([message["from_user"], message["content"], message["color"]])
return date, squashed, desc
| {
"content_hash": "2850635700b4d97e8d881cdf834d813a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 87,
"avg_line_length": 33.262135922330096,
"alnum_prop": 0.5846468184471687,
"repo_name": "eirki/gargbot_3000",
"id": "b704c2179ef54c74b3da924e6f2bec891b8839a9",
"size": "3468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gargbot_3000/quotes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2800"
},
{
"name": "Dockerfile",
"bytes": "1039"
},
{
"name": "HTML",
"bytes": "6032"
},
{
"name": "JavaScript",
"bytes": "36277"
},
{
"name": "Python",
"bytes": "254223"
},
{
"name": "Shell",
"bytes": "994"
}
],
"symlink_target": ""
} |
import BaseHTTPServer
import json
import urlparse
__author__ = 'umairghani'
class SimpleTCPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""
Base class for HTTP request handler
"""
def do_GET(self):
"""
Handle the HTTP GET request
:return: Serve a GET request
"""
url = urlparse.urlparse(self.path)
method = url.path.split("/")[1]
try:
result = self.server._dispatch(method, url.params)
response = self._dump(method, result)
self._send(response)
except TypeError, e:
self.send_error(400, str(e))
except NameError, e:
self.send_error(405, str(e))
except ValueError, e:
self.send_error(500, str(e))
def do_POST(self):
"""
Handles the HTTP Post request
:return:
"""
if self.headers.has_key("content-length"):
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
if not chunk:
break
L.append(chunk)
size_remaining -= len(L[-1])
data = ''.join(L)
else:
self.send_error(411)
try:
method, params = self._load(data)
#params = [params] if params else ""
result = self.server._dispatch(method, params)
response = self._dump(method, result)
self._send(response)
except ValueError, e:
self.send_error(500, str(e))
except NameError, e:
self.send_error(405, str(e))
def _send(self, data):
"""
Writes data to the response
:param data:
:return: Nothing
"""
self.send_response(200, str(data))
self.send_header("Content-type", "application/json")
self.send_header("Accept", "application/json")
self.send_header("Content-length", str(len(data)))
self.end_headers()
self.wfile.write(data)
def _load(self, data):
"""
Parses the data
:param data:
:return: key, value
"""
r = json.loads(data)
return r.items()[0]
def _dump(self, method_name, data):
"""
converts it to json
:param data:
:return: json object
"""
result = {method_name: data}
return json.dumps(result)
def log_request(self, code='-', size='-'):
"""
Logging function
:param code:
:param size:
"""
if self.server.logging:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
| {
"content_hash": "b5c8c31ca587cd43d9050e19b70afb05",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 28.95959595959596,
"alnum_prop": 0.5235437739797698,
"repo_name": "umairghani/py-jrpc",
"id": "d0f2a51af665dd2c30b43137d71879d0276660ac",
"size": "2867",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/jrpc/jrpcServer/SimpleTCPRequestHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35594"
}
],
"symlink_target": ""
} |
'''
Created on 10 nov 2014
@author: rogerlarsson
The MIT License (MIT)
Copyright (c) 2014 rogerlarsson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import string, random
class PasswordGenerator:
def __init__(self):
random.seed()
self.symbols = [str(i) for i in range(10)] + list(string.ascii_letters)
def generate(self, length = 8):
password = ""
for i in range(length):
password += random.choice(self.symbols)
return password
if __name__ == '__main__':
pwg = PasswordGenerator()
print pwg.generate()
| {
"content_hash": "4fee8c1ff7563691edbdb19fef4029eb",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 34.26086956521739,
"alnum_prop": 0.7315989847715736,
"repo_name": "rogerlarsson/passwordGenerator",
"id": "a5e5729b1bdca696b8421547e4cd5cdaf57eab9f",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/passwordGenerator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1576"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from collections import OrderedDict
from re import compile as re_compile
from sys import version_info
from unicodedata import category
from .compat import ustr
if version_info >= (3,):
number = (int, float)
string = (bytes, str)
unicode = str
unichr = chr
else:
number = (int, long, float)
string = (str, unicode)
unicode = unicode
unichr = unichr
NULL = u"null"
TRUE = u"true"
FALSE = u"false"
ID_START = {u"_"} | {unichr(x) for x in range(0xFFFF)
if category(unichr(x)) in ("LC", "Ll", "Lm", "Lo", "Lt", "Lu", "Nl")}
ID_CONTINUE = ID_START | {unichr(x) for x in range(0xFFFF)
if category(unichr(x)) in ("Mn", "Mc", "Nd", "Pc", "Sc")}
DOUBLE_QUOTE = u'"'
SINGLE_QUOTE = u"'"
ESCAPED_DOUBLE_QUOTE = u'\\"'
ESCAPED_SINGLE_QUOTE = u"\\'"
X_ESCAPE = re_compile(r"(\\x([0-9a-f]{2}))")
DOUBLE_QUOTED_SAFE = re_compile(r"([ -!#-\[\]-~]+)")
SINGLE_QUOTED_SAFE = re_compile(r"([ -&(-\[\]-~]+)")
class LabelSetView(object):
def __init__(self, elements=(), selected=(), **kwargs):
self.__elements = frozenset(elements)
self.__selected = tuple(selected)
self.__kwargs = kwargs
def __repr__(self):
if self.__selected:
return "".join(":%s" % cypher_escape(e, **self.__kwargs) for e in self.__selected if e in self.__elements)
else:
return "".join(":%s" % cypher_escape(e, **self.__kwargs) for e in sorted(self.__elements))
def __getattr__(self, element):
if element in self.__selected:
return self.__class__(self.__elements, self.__selected)
else:
return self.__class__(self.__elements, self.__selected + (element,))
def __len__(self):
return len(self.__elements)
def __iter__(self):
return iter(self.__elements)
def __contains__(self, element):
return element in self.__elements
def __and__(self, other):
return self.__elements & set(other)
def __or__(self, other):
return self.__elements | set(other)
def __sub__(self, other):
return self.__elements - set(other)
def __xor__(self, other):
return self.__elements ^ set(other)
class PropertyDictView(object):
def __init__(self, items=(), selected=(), **kwargs):
self.__items = dict(items)
self.__selected = tuple(selected)
self.__kwargs = kwargs
def __repr__(self):
if self.__selected:
properties = OrderedDict((key, self.__items[key]) for key in self.__selected if key in self.__items)
else:
properties = OrderedDict((key, self.__items[key]) for key in sorted(self.__items))
return cypher_repr(properties, **self.__kwargs)
def __getattr__(self, key):
if key in self.__selected:
return self.__class__(self.__items, self.__selected)
else:
return self.__class__(self.__items, self.__selected + (key,))
def __len__(self):
return len(self.__items)
def __iter__(self):
return iter(self.__items)
def __contains__(self, key):
return key in self.__items
class PropertySelector(object):
def __init__(self, items=(), default_value=None, **kwargs):
self.__items = dict(items)
self.__default_value = default_value
self.__kwargs = kwargs
def __getattr__(self, key):
return cypher_str(self.__items.get(key, self.__default_value), **self.__kwargs)
class CypherEncoder(object):
__default_instance = None
def __new__(cls, *args, **kwargs):
if not kwargs:
if cls.__default_instance is None:
cls.__default_instance = super(CypherEncoder, cls).__new__(cls)
return cls.__default_instance
return super(CypherEncoder, cls).__new__(cls)
encoding = "utf-8"
quote = None
sequence_separator = u", "
key_value_separator = u": "
node_template = u"#{id}{labels} {properties}"
related_node_template = u"#{id}"
relationship_template = u"{type} {properties}"
def __init__(self, encoding=None, quote=None, sequence_separator=None, key_value_separator=None,
node_template=None, related_node_template=None, relationship_template=None):
if encoding:
self.encoding = encoding
if quote:
self.quote = quote
if sequence_separator:
self.sequence_separator = sequence_separator
if key_value_separator:
self.key_value_separator = key_value_separator
if node_template:
self.node_template = node_template
if related_node_template:
self.related_node_template = related_node_template
if relationship_template:
self.relationship_template = relationship_template
def encode_key(self, key):
if isinstance(key, bytes):
key = key.decode(self.encoding)
assert isinstance(key, unicode)
if not key:
raise ValueError("Keys cannot be empty")
if key[0] in ID_START and all(key[i] in ID_CONTINUE for i in range(1, len(key))):
return key
else:
return u"`" + key.replace(u"`", u"``") + u"`"
def encode_value(self, value):
if value is None:
return NULL
if value is True:
return TRUE
if value is False:
return FALSE
if isinstance(value, number):
return unicode(value)
if isinstance(value, string):
return self.encode_string(value)
if hasattr(value, "nodes"):
if hasattr(value, "relationships"):
return self.encode_path(value)
else:
return self.encode_relationship(value)
if hasattr(value, "labels"):
return self.encode_node(value)
if isinstance(value, list):
return self.encode_list(value)
if isinstance(value, dict):
return self.encode_map(value)
raise TypeError("Values of type %s are not supported" % value.__class__.__name__)
def encode_string(self, value):
if isinstance(value, bytes):
value = value.decode(self.encoding)
assert isinstance(value, unicode)
quote = self.quote
if quote is None:
num_single = value.count(u"'")
num_double = value.count(u'"')
quote = SINGLE_QUOTE if num_single <= num_double else DOUBLE_QUOTE
if quote == SINGLE_QUOTE:
escaped_quote = ESCAPED_SINGLE_QUOTE
safe = SINGLE_QUOTED_SAFE
elif quote == DOUBLE_QUOTE:
escaped_quote = ESCAPED_DOUBLE_QUOTE
safe = DOUBLE_QUOTED_SAFE
else:
raise ValueError("Unsupported quote character %r" % quote)
if not value:
return quote + quote
parts = safe.split(value)
for i in range(0, len(parts), 2):
parts[i] = (X_ESCAPE.sub(u"\\\\u00\\2", parts[i].encode("unicode-escape").decode("utf-8")).
replace(quote, escaped_quote).replace(u"\\u0008", u"\\b").replace(u"\\u000c", u"\\f"))
return quote + u"".join(parts) + quote
def encode_list(self, values):
return u"[" + self.sequence_separator.join(map(self.encode_value, values)) + u"]"
def encode_map(self, values):
return u"{" + self.sequence_separator.join(self.encode_key(key) + self.key_value_separator + self.encode_value(value)
for key, value in values.items()) + u"}"
def encode_node(self, node):
return self._encode_node(node, self.node_template)
def encode_relationship(self, relationship):
nodes = relationship.nodes()
return u"{}-{}->{}".format(
self._encode_node(nodes[0], self.related_node_template),
self._encode_relationship_detail(relationship, self.relationship_template),
self._encode_node(nodes[-1], self.related_node_template),
)
def encode_path(self, path):
encoded = []
append = encoded.append
nodes = path.nodes()
for i, relationship in enumerate(path.relationships()):
append(self._encode_node(nodes[i], self.related_node_template))
related_nodes = relationship.nodes()
if self._node_id(related_nodes[0]) == self._node_id(nodes[i]):
append(u"-")
append(self._encode_relationship_detail(relationship, self.relationship_template))
append(u"->")
else:
append(u"<-")
append(self._encode_relationship_detail(relationship, self.relationship_template))
append(u"-")
append(self._encode_node(nodes[-1], self.related_node_template))
return u"".join(encoded)
@classmethod
def _node_id(cls, node):
return node.id if hasattr(node, "id") else node
def _encode_node(self, node, template):
return u"(" + template.format(
id=node.id,
labels=LabelSetView(node.labels(), encoding=self.encoding, quote=self.quote),
properties=PropertyDictView(node, encoding=self.encoding, quote=self.quote),
property=PropertySelector(node, u""),
).strip() + u")"
def _encode_relationship_detail(self, relationship, template):
return u"[" + template.format(
id=relationship.id,
type=u":" + ustr(type(relationship).__name__),
properties=PropertyDictView(relationship, encoding=self.encoding, quote=self.quote),
property=PropertySelector(relationship, u""),
).strip() + u"]"
def cypher_escape(identifier, **kwargs):
""" Escape a Cypher identifier in backticks.
::
>>> cypher_escape("simple_identifier")
'simple_identifier'
>>> cypher_escape("identifier with spaces")
'`identifier with spaces`'
>>> cypher_escape("identifier with `backticks`")
'`identifier with ``backticks```'
:arg identifier: any non-empty string
"""
if not isinstance(identifier, string):
raise TypeError(type(identifier).__name__)
encoder = CypherEncoder(**kwargs)
return encoder.encode_key(identifier)
def cypher_repr(value, **kwargs):
""" Return the Cypher representation of a value.
"""
encoder = CypherEncoder(**kwargs)
return encoder.encode_value(value)
def cypher_str(value, **kwargs):
""" Convert a Cypher value to a Python Unicode string.
"""
if isinstance(value, unicode):
return value
elif isinstance(value, bytes):
return value.decode(kwargs.get("encoding", "utf-8"))
else:
return cypher_repr(value, **kwargs)
| {
"content_hash": "160c911240c538fa1d9e8b61821e3a41",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 125,
"avg_line_length": 34.2246835443038,
"alnum_prop": 0.5815071659731854,
"repo_name": "technige/cypy",
"id": "95fa772c12f483931bf5849c83d4a515f3daa509",
"size": "11441",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cypy/encoding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "164557"
},
{
"name": "Shell",
"bytes": "254"
}
],
"symlink_target": ""
} |
import m5
from m5.objects import *
from m5.defines import buildEnv
from .Ruby import create_topology
def define_options(parser):
parser.add_argument("--chi-config", action="store", type=str,
default=None,
help="NoC config. parameters and bindings. "
"Required for CustomMesh topology")
parser.add_argument("--enable-dvm", default=False, action="store_true")
def read_config_file(file):
''' Read file as a module and return it '''
import types
import importlib.machinery
loader = importlib.machinery.SourceFileLoader('chi_configs', file)
chi_configs = types.ModuleType(loader.name)
loader.exec_module(chi_configs)
return chi_configs
def create_system(options, full_system, system, dma_ports, bootmem,
ruby_system, cpus):
if buildEnv['PROTOCOL'] != 'CHI':
m5.panic("This script requires the CHI build")
if options.num_dirs < 1:
m5.fatal('--num-dirs must be at least 1')
if options.num_l3caches < 1:
m5.fatal('--num-l3caches must be at least 1')
if full_system and options.enable_dvm:
if len(cpus) <= 1:
m5.fatal("--enable-dvm can't be used with a single CPU")
for cpu in cpus:
for decoder in cpu.decoder:
decoder.dvm_enabled = True
# read specialized classes from config file if provided
if options.chi_config:
chi_defs = read_config_file(options.chi_config)
elif options.topology == 'CustomMesh':
m5.fatal('--noc-config must be provided if topology is CustomMesh')
else:
# Use the defaults from CHI_config
from . import CHI_config as chi_defs
# NoC params
params = chi_defs.NoC_Params
# Node types
CHI_RNF = chi_defs.CHI_RNF
CHI_HNF = chi_defs.CHI_HNF
CHI_MN = chi_defs.CHI_MN
CHI_SNF_MainMem = chi_defs.CHI_SNF_MainMem
CHI_SNF_BootMem = chi_defs.CHI_SNF_BootMem
CHI_RNI_DMA = chi_defs.CHI_RNI_DMA
CHI_RNI_IO = chi_defs.CHI_RNI_IO
# Declare caches and controller types used by the protocol
# Notice tag and data accesses are not concurrent, so the a cache hit
# latency = tag + data + response latencies.
# Default response latencies are 1 cy for all controllers.
# For L1 controllers the mandatoryQueue enqueue latency is always 1 cy and
# this is deducted from the initial tag read latency for sequencer requests
# dataAccessLatency may be set to 0 if one wants to consider parallel
# data and tag lookups
class L1ICache(RubyCache):
dataAccessLatency = 1
tagAccessLatency = 1
size = options.l1i_size
assoc = options.l1i_assoc
class L1DCache(RubyCache):
dataAccessLatency = 2
tagAccessLatency = 1
size = options.l1d_size
assoc = options.l1d_assoc
class L2Cache(RubyCache):
dataAccessLatency = 6
tagAccessLatency = 2
size = options.l2_size
assoc = options.l2_assoc
class HNFCache(RubyCache):
dataAccessLatency = 10
tagAccessLatency = 2
size = options.l3_size
assoc = options.l3_assoc
# other functions use system.cache_line_size assuming it has been set
assert(system.cache_line_size.value == options.cacheline_size)
cpu_sequencers = []
mem_cntrls = []
mem_dests = []
network_nodes = []
network_cntrls = []
hnf_dests = []
all_cntrls = []
# Creates on RNF per cpu with priv l2 caches
assert(len(cpus) == options.num_cpus)
ruby_system.rnf = [ CHI_RNF([cpu], ruby_system, L1ICache, L1DCache,
system.cache_line_size.value)
for cpu in cpus ]
for rnf in ruby_system.rnf:
rnf.addPrivL2Cache(L2Cache)
cpu_sequencers.extend(rnf.getSequencers())
all_cntrls.extend(rnf.getAllControllers())
network_nodes.append(rnf)
network_cntrls.extend(rnf.getNetworkSideControllers())
# Creates one Misc Node
ruby_system.mn = [ CHI_MN(ruby_system, [cpu.l1d for cpu in cpus]) ]
for mn in ruby_system.mn:
all_cntrls.extend(mn.getAllControllers())
network_nodes.append(mn)
network_cntrls.extend(mn.getNetworkSideControllers())
assert(mn.getAllControllers() == mn.getNetworkSideControllers())
# Look for other memories
other_memories = []
if bootmem:
other_memories.append(bootmem)
if getattr(system, 'sram', None):
other_memories.append(getattr(system, 'sram', None))
on_chip_mem_ports = getattr(system, '_on_chip_mem_ports', None)
if on_chip_mem_ports:
other_memories.extend([p.simobj for p in on_chip_mem_ports])
# Create the LLCs cntrls
sysranges = [] + system.mem_ranges
for m in other_memories:
sysranges.append(m.range)
hnf_list = [i for i in range(options.num_l3caches)]
CHI_HNF.createAddrRanges(sysranges, system.cache_line_size.value,
hnf_list)
ruby_system.hnf = [ CHI_HNF(i, ruby_system, HNFCache, None)
for i in range(options.num_l3caches) ]
for hnf in ruby_system.hnf:
network_nodes.append(hnf)
network_cntrls.extend(hnf.getNetworkSideControllers())
assert(hnf.getAllControllers() == hnf.getNetworkSideControllers())
all_cntrls.extend(hnf.getAllControllers())
hnf_dests.extend(hnf.getAllControllers())
# Create the memory controllers
# Notice we don't define a Directory_Controller type so we don't use
# create_directories shared by other protocols.
ruby_system.snf = [ CHI_SNF_MainMem(ruby_system, None, None)
for i in range(options.num_dirs) ]
for snf in ruby_system.snf:
network_nodes.append(snf)
network_cntrls.extend(snf.getNetworkSideControllers())
assert(snf.getAllControllers() == snf.getNetworkSideControllers())
mem_cntrls.extend(snf.getAllControllers())
all_cntrls.extend(snf.getAllControllers())
mem_dests.extend(snf.getAllControllers())
if len(other_memories) > 0:
ruby_system.rom_snf = [ CHI_SNF_BootMem(ruby_system, None, m)
for m in other_memories ]
for snf in ruby_system.rom_snf:
network_nodes.append(snf)
network_cntrls.extend(snf.getNetworkSideControllers())
all_cntrls.extend(snf.getAllControllers())
mem_dests.extend(snf.getAllControllers())
# Creates the controller for dma ports and io
if len(dma_ports) > 0:
ruby_system.dma_rni = [ CHI_RNI_DMA(ruby_system, dma_port, None)
for dma_port in dma_ports ]
for rni in ruby_system.dma_rni:
network_nodes.append(rni)
network_cntrls.extend(rni.getNetworkSideControllers())
all_cntrls.extend(rni.getAllControllers())
if full_system:
ruby_system.io_rni = CHI_RNI_IO(ruby_system, None)
network_nodes.append(ruby_system.io_rni)
network_cntrls.extend(ruby_system.io_rni.getNetworkSideControllers())
all_cntrls.extend(ruby_system.io_rni.getAllControllers())
# Assign downstream destinations
for rnf in ruby_system.rnf:
rnf.setDownstream(hnf_dests)
if len(dma_ports) > 0:
for rni in ruby_system.dma_rni:
rni.setDownstream(hnf_dests)
if full_system:
ruby_system.io_rni.setDownstream(hnf_dests)
for hnf in ruby_system.hnf:
hnf.setDownstream(mem_dests)
# Setup data message size for all controllers
for cntrl in all_cntrls:
cntrl.data_channel_size = params.data_width
# Network configurations
# virtual networks: 0=request, 1=snoop, 2=response, 3=data
ruby_system.network.number_of_virtual_networks = 4
ruby_system.network.control_msg_size = params.cntrl_msg_size
ruby_system.network.data_msg_size = params.data_width
ruby_system.network.buffer_size = params.router_buffer_size
# Incorporate the params into options so it's propagated to
# makeTopology and create_topology the parent scripts
for k in dir(params):
if not k.startswith('__'):
setattr(options, k, getattr(params, k))
if options.topology == 'CustomMesh':
topology = create_topology(network_nodes, options)
elif options.topology in ['Crossbar', 'Pt2Pt']:
topology = create_topology(network_cntrls, options)
else:
m5.fatal("%s not supported!" % options.topology)
return (cpu_sequencers, mem_cntrls, topology)
| {
"content_hash": "05edff0657b3f887aa163cfa3434dc9a",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 37.24242424242424,
"alnum_prop": 0.643728931767988,
"repo_name": "gem5/gem5",
"id": "c94dc943f9ea424660435c815a3a57cd888d8ba3",
"size": "10673",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "configs/ruby/CHI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
} |
from django.conf.urls import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^fake-login$', 'tests.views.fake_login', name='sentry-fake-login'),
url(r'^trigger-500$', 'tests.views.raise_exc', name='sentry-raise-exc'),
url(r'^trigger-500-decorated$', 'tests.views.decorated_raise_exc', name='sentry-raise-exc-decor'),
url(r'^trigger-500-django$', 'tests.views.django_exc', name='sentry-django-exc'),
url(r'^trigger-500-template$', 'tests.views.template_exc', name='sentry-template-exc'),
url(r'^trigger-500-log-request$', 'tests.views.logging_request_exc', name='sentry-log-request-exc'),
url(r'', include('sentry.web.urls')),
) | {
"content_hash": "42ba748e9f11ec6bf23eb7e1a83b24e1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 104,
"avg_line_length": 49.666666666666664,
"alnum_prop": 0.6859060402684564,
"repo_name": "primepix/django-sentry",
"id": "2a45d69b6b8b8530923f49d336468fc8426dfdf8",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20952"
},
{
"name": "JavaScript",
"bytes": "10544"
},
{
"name": "Python",
"bytes": "300510"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
r"""Infers detections on a TFRecord of TFExamples given an inference graph.
Example usage:
./infer_detections \
--input_tfrecord_paths=/path/to/input/tfrecord1,/path/to/input/tfrecord2 \
--output_tfrecord_path_prefix=/path/to/output/detections.tfrecord \
--inference_graph=/path/to/frozen_weights_inference_graph.pb
The output is a TFRecord of TFExamples. Each TFExample from the input is first
augmented with detections from the inference graph and then copied to the
output.
The input and output nodes of the inference graph are expected to have the same
types, shapes, and semantics, as the input and output nodes of graphs produced
by export_inference_graph.py, when run with --input_type=image_tensor.
The script can also discard the image pixels in the output. This greatly
reduces the output size and can potentially accelerate reading data in
subsequent processing steps that don't require the images (e.g. computing
metrics).
"""
import itertools
import tensorflow as tf
from object_detection.inference import detection_inference
tf.flags.DEFINE_string('input_tfrecord_paths', None,
'A comma separated list of paths to input TFRecords.')
tf.flags.DEFINE_string('output_tfrecord_path', None,
'Path to the output TFRecord.')
tf.flags.DEFINE_string('inference_graph', None,
'Path to the inference graph with embedded weights.')
tf.flags.DEFINE_boolean('discard_image_pixels', False,
'Discards the images in the output TFExamples. This'
' significantly reduces the output size and is useful'
' if the subsequent tools don\'t need access to the'
' images (e.g. when computing evaluation measures).')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = ['input_tfrecord_paths', 'output_tfrecord_path',
'inference_graph']
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
with tf.Session() as sess:
input_tfrecord_paths = [
v for v in FLAGS.input_tfrecord_paths.split(',') if v]
tf.logging.info('Reading input from %d files', len(input_tfrecord_paths))
serialized_example_tensor, image_tensor = detection_inference.build_input(
input_tfrecord_paths)
tf.logging.info('Reading graph and building model...')
(detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor) = detection_inference.build_inference_graph(
image_tensor, FLAGS.inference_graph)
tf.logging.info('Running inference and writing output to {}'.format(
FLAGS.output_tfrecord_path))
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
with tf.python_io.TFRecordWriter(
FLAGS.output_tfrecord_path) as tf_record_writer:
try:
for counter in itertools.count():
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10,
counter)
tf_example = detection_inference.infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor,
detected_scores_tensor, detected_labels_tensor,
FLAGS.discard_image_pixels)
tf_record_writer.write(tf_example.SerializeToString())
except tf.errors.OutOfRangeError:
tf.logging.info('Finished processing records')
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "648cdafc4c4ce1cd4d7ee838ae25817f",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 43.58536585365854,
"alnum_prop": 0.682988248461108,
"repo_name": "cshallue/models",
"id": "a251009ef0d415e08395be038dbc4ed42d804ff7",
"size": "4263",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "research/object_detection/inference/infer_detections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "2829707"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "13149300"
},
{
"name": "Shell",
"bytes": "146035"
}
],
"symlink_target": ""
} |
import numpy as np
def load(fileno, training=True):
"""
takes the training file no and return training and test data
Ex. fileno = 1 for u1.base and u1.test
fileno = 5 for u5.base and u5.test
"""
# userid songid user-rating
basedir = "ml-100k/u%s." % (fileno)
with open(basedir + 'base') as f:
training = np.loadtxt(f)
with open(basedir + 'test') as f:
test = np.loadtxt(f)
with open('ml-100k/u.info') as f:
metafile = np.genfromtxt(f)
metafile = np.delete(metafile, len(metafile[0])-1, 1)
metadata = {}
metadata['users'] = metafile[0][0]
metadata['items'] = metafile[1][0]
metadata['ratings'] = metafile[2][0]
return training[:, :-1], test[:, :-1], metadata
| {
"content_hash": "10f6e4e4a12112b3a62a35e1eb6d0aea",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 34.31818181818182,
"alnum_prop": 0.5947019867549669,
"repo_name": "chandan5/Music-Recommendation-System",
"id": "cc928a49d6fcaec0d7c9bef8876f3cb8a7e5b10c",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "716"
},
{
"name": "Python",
"bytes": "4547"
},
{
"name": "Shell",
"bytes": "643"
}
],
"symlink_target": ""
} |
import hackarena.constants
from hackarena.constants import Classes
from hackarena.game_objects import BaseGameObject
class Player(BaseGameObject):
def __init__(
self,
username,
character_class,
team,
hp=0,
last_death=0,
):
self.MAX_HP = Classes.MAX_HP[character_class]
self.username = username
self.character_class = character_class
self.available_spells = Classes.AVAILABLE_SPELLS[character_class]
self.spell_cast_times = dict((spell, 0) for spell in self.available_spells)
self.team = team
self.reset()
self.last_death = last_death
def reset(self):
self.hp = Classes.MAX_HP[self.character_class]
self.position = {
'x': 7 if self.team == 'blue' else hackarena.constants.MAP_TILES_WIDTH - 8,
'y': 8 if self.team == 'blue' else hackarena.constants.MAP_TILES_HEIGHT - 9,
}
| {
"content_hash": "709383255985cde0704662dba94cdacc",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 30.580645161290324,
"alnum_prop": 0.6170886075949367,
"repo_name": "verekia/hackarena",
"id": "8ea08fd6b1ed9b9ae2942e6f142e280e71f6bc8b",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hackarena/player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "36"
},
{
"name": "CSS",
"bytes": "2815"
},
{
"name": "HTML",
"bytes": "3430"
},
{
"name": "JavaScript",
"bytes": "2809068"
},
{
"name": "Python",
"bytes": "46304"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
} |
import logging
import numpy as np
import paddle
__all__ = []
def _simple_network():
"""
Define a simple network composed by a single linear layer.
"""
input = paddle.static.data(
name="input", shape=[None, 2, 2], dtype="float32"
)
weight = paddle.create_parameter(
shape=[2, 3],
dtype="float32",
attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.1)),
)
bias = paddle.create_parameter(shape=[3], dtype="float32")
linear_out = paddle.nn.functional.linear(x=input, weight=weight, bias=bias)
out = paddle.tensor.sum(linear_out)
return input, out, weight
def _prepare_data(device_count):
"""
Prepare feeding data for simple network. The shape is [device_count, 2, 2].
Args:
device_count (int): The number of devices.
"""
# Prepare the feeding data.
np_input_single = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
if device_count == 1:
return np_input_single.reshape(device_count, 2, 2)
else:
input_list = []
for i in range(device_count):
input_list.append(np_input_single)
np_input_muti = np.array(input_list)
np_input_muti = np_input_muti.reshape(device_count, 2, 2)
return np_input_muti
def _is_cuda_available():
"""
Check whether CUDA is avaiable.
"""
try:
assert len(paddle.static.cuda_places()) > 0
return True
except Exception as e:
logging.warning(
"You are using GPU version PaddlePaddle, but there is no GPU "
"detected on your machine. Maybe CUDA devices is not set properly."
"\n Original Error is {}".format(e)
)
return False
def _is_npu_available():
"""
Check whether NPU is avaiable.
"""
try:
assert len(paddle.static.npu_places()) > 0
return True
except Exception as e:
logging.warning(
"You are using NPU version PaddlePaddle, but there is no NPU "
"detected on your machine. Maybe NPU devices is not set properly."
"\n Original Error is {}".format(e)
)
return False
def _is_xpu_available():
"""
Check whether XPU is avaiable.
"""
try:
assert len(paddle.static.xpu_places()) > 0
return True
except Exception as e:
logging.warning(
"You are using XPU version PaddlePaddle, but there is no XPU "
"detected on your machine. Maybe XPU devices is not set properly."
"\n Original Error is {}".format(e)
)
return False
def _run_dygraph_single(use_cuda, use_xpu, use_npu):
"""
Testing the simple network in dygraph mode using one CPU/GPU/XPU/NPU.
Args:
use_cuda (bool): Whether running with CUDA.
use_xpu (bool): Whether running with XPU.
use_npu (bool): Whether running with NPU.
"""
paddle.disable_static()
if use_cuda:
paddle.set_device('gpu')
elif use_xpu:
paddle.set_device('xpu')
elif use_npu:
paddle.set_device('npu')
else:
paddle.set_device('cpu')
weight_attr = paddle.ParamAttr(
name="weight", initializer=paddle.nn.initializer.Constant(value=0.5)
)
bias_attr = paddle.ParamAttr(
name="bias", initializer=paddle.nn.initializer.Constant(value=1.0)
)
linear = paddle.nn.Linear(
2, 4, weight_attr=weight_attr, bias_attr=bias_attr
)
input_np = _prepare_data(1)
input_tensor = paddle.to_tensor(input_np)
linear_out = linear(input_tensor)
out = paddle.tensor.sum(linear_out)
out.backward()
opt = paddle.optimizer.Adam(
learning_rate=0.001, parameters=linear.parameters()
)
opt.step()
def _run_static_single(use_cuda, use_xpu, use_npu):
"""
Testing the simple network with executor running directly, using one CPU/GPU/XPU/NPU.
Args:
use_cuda (bool): Whether running with CUDA.
use_xpu (bool): Whether running with XPU.
use_npu (bool): Whether running with NPU.
"""
paddle.enable_static()
with paddle.static.scope_guard(paddle.static.Scope()):
train_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
startup_prog.random_seed = 1
with paddle.static.program_guard(train_prog, startup_prog):
input, out, weight = _simple_network()
param_grads = paddle.static.append_backward(
out, parameter_list=[weight.name]
)[0]
if use_cuda:
place = paddle.CUDAPlace(0)
elif use_xpu:
place = paddle.XPUPlace(0)
elif use_npu:
place = paddle.NPUPlace(0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
exe.run(
train_prog,
feed={input.name: _prepare_data(1)},
fetch_list=[out.name, param_grads[1].name],
)
paddle.disable_static()
def _run_static_parallel(use_cuda, use_xpu, use_npu, device_list):
"""
Testing the simple network in data parallel mode, using multiple CPU/GPU.
Args:
use_cuda (bool): Whether running with CUDA.
use_xpu (bool): Whether running with XPU.
use_npu (bool): Whether running with NPU.
device_list (int): The specified devices.
"""
paddle.enable_static()
with paddle.static.scope_guard(paddle.static.Scope()):
train_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(train_prog, startup_prog):
input, out, _ = _simple_network()
loss = paddle.tensor.mean(out)
loss.persistable = True
paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
compiled_prog = paddle.static.CompiledProgram(
train_prog
).with_data_parallel(loss_name=loss.name, places=device_list)
if use_cuda:
place = paddle.CUDAPlace(0)
elif use_xpu:
place = paddle.XPUPlace(0)
compiled_prog = train_prog
elif use_npu:
place = paddle.NPUPlace(0)
compiled_prog = train_prog
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
exe.run(
compiled_prog,
feed={input.name: _prepare_data(len(device_list))},
fetch_list=[loss.name],
)
paddle.disable_static()
def run_check():
"""
Check whether PaddlePaddle is installed correctly and running successfully
on your system.
Examples:
.. code-block:: python
import paddle
paddle.utils.run_check()
# Running verify PaddlePaddle program ...
# W1010 07:21:14.972093 8321 device_context.cc:338] Please NOTE: device: 0, CUDA Capability: 70, Driver API Version: 11.0, Runtime API Version: 10.1
# W1010 07:21:14.979770 8321 device_context.cc:346] device: 0, cuDNN Version: 7.6.
# PaddlePaddle works well on 1 GPU.
# PaddlePaddle works well on 8 GPUs.
# PaddlePaddle is installed successfully! Let's start deep learning with PaddlePaddle now.
"""
print("Running verify PaddlePaddle program ... ")
use_cuda = False
use_xpu = False
use_npu = False
if paddle.is_compiled_with_cuda():
use_cuda = _is_cuda_available()
elif paddle.is_compiled_with_xpu():
use_xpu = _is_xpu_available()
elif paddle.is_compiled_with_npu():
use_npu = _is_npu_available()
if use_cuda:
device_str = "GPU"
device_list = paddle.static.cuda_places()
elif use_xpu:
device_str = "XPU"
device_list = paddle.static.xpu_places()
elif use_npu:
device_str = "NPU"
device_list = paddle.static.npu_places()
else:
device_str = "CPU"
device_list = paddle.static.cpu_places(device_count=2)
device_count = len(device_list)
_run_static_single(use_cuda, use_xpu, use_npu)
_run_dygraph_single(use_cuda, use_xpu, use_npu)
print("PaddlePaddle works well on 1 {}.".format(device_str))
try:
_run_static_parallel(use_cuda, use_xpu, use_npu, device_list)
print(
"PaddlePaddle works well on {} {}s.".format(
device_count, device_str
)
)
print(
"PaddlePaddle is installed successfully! Let's start deep learning with PaddlePaddle now."
)
except Exception as e:
logging.warning(
"PaddlePaddle meets some problem with {} {}s. This may be caused by:"
"\n 1. There is not enough GPUs visible on your system"
"\n 2. Some GPUs are occupied by other process now"
"\n 3. NVIDIA-NCCL2 is not installed correctly on your system. Please follow instruction on https://github.com/NVIDIA/nccl-tests "
"\n to test your NCCL, or reinstall it following https://docs.nvidia.com/deeplearning/sdk/nccl-install-guide/index.html".format(
device_count, device_str
)
)
logging.warning("\n Original Error is: {}".format(e))
print(
"PaddlePaddle is installed successfully ONLY for single {}! "
"Let's start deep learning with PaddlePaddle now.".format(
device_str
)
)
| {
"content_hash": "e40a57985bf2ec5ef6a601f049c4d888",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 161,
"avg_line_length": 32.37074829931973,
"alnum_prop": 0.5953556793107072,
"repo_name": "luotao1/Paddle",
"id": "c9cc1bb7a49e274e45f4ed07803ea21e6f416ada",
"size": "10128",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/utils/install_check.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe
import os, base64, re
import hashlib
import mimetypes
from frappe.utils import get_hook_method, get_files_path, random_string, encode, cstr, call_hook_method, cint
from frappe import _
from frappe import conf
from copy import copy
from six.moves.urllib.parse import unquote
from six import text_type
class MaxFileSizeReachedError(frappe.ValidationError): pass
def get_file_url(file_data_name):
data = frappe.db.get_value("File", file_data_name, ["file_name", "file_url"], as_dict=True)
return data.file_url or data.file_name
def upload():
# get record details
dt = frappe.form_dict.doctype
dn = frappe.form_dict.docname
file_url = frappe.form_dict.file_url
filename = frappe.form_dict.filename
frappe.form_dict.is_private = cint(frappe.form_dict.is_private)
if not filename and not file_url:
frappe.msgprint(_("Please select a file or url"),
raise_exception=True)
file_doc = get_file_doc()
comment = {}
if dt and dn:
comment = frappe.get_doc(dt, dn).add_comment("Attachment",
_("added {0}").format("<a href='{file_url}' target='_blank'>{file_name}</a>{icon}".format(**{
"icon": ' <i class="fa fa-lock text-warning"></i>' \
if file_doc.is_private else "",
"file_url": file_doc.file_url.replace("#", "%23") \
if file_doc.file_name else file_doc.file_url,
"file_name": file_doc.file_name or file_doc.file_url
})))
return {
"name": file_doc.name,
"file_name": file_doc.file_name,
"file_url": file_doc.file_url,
"is_private": file_doc.is_private,
"comment": comment.as_dict() if comment else {}
}
def get_file_doc(dt=None, dn=None, folder=None, is_private=None):
'''returns File object (Document) from given parameters or form_dict'''
r = frappe.form_dict
if dt is None: dt = r.doctype
if dn is None: dn = r.docname
if folder is None: folder = r.folder
if is_private is None: is_private = r.is_private
if r.filedata:
file_doc = save_uploaded(dt, dn, folder, is_private)
elif r.file_url:
file_doc = save_url(r.file_url, r.filename, dt, dn, folder, is_private)
return file_doc
def save_uploaded(dt, dn, folder, is_private):
fname, content = get_uploaded_content()
if content:
return save_file(fname, content, dt, dn, folder, is_private=is_private);
else:
raise Exception
def save_url(file_url, filename, dt, dn, folder, is_private):
# if not (file_url.startswith("http://") or file_url.startswith("https://")):
# frappe.msgprint("URL must start with 'http://' or 'https://'")
# return None, None
file_url = unquote(file_url)
f = frappe.get_doc({
"doctype": "File",
"file_url": file_url,
"file_name": filename,
"attached_to_doctype": dt,
"attached_to_name": dn,
"folder": folder,
"is_private": is_private
})
f.flags.ignore_permissions = True
try:
f.insert();
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_uploaded_content():
# should not be unicode when reading a file, hence using frappe.form
if 'filedata' in frappe.form_dict:
if "," in frappe.form_dict.filedata:
frappe.form_dict.filedata = frappe.form_dict.filedata.rsplit(",", 1)[1]
frappe.uploaded_content = base64.b64decode(frappe.form_dict.filedata)
frappe.uploaded_filename = frappe.form_dict.filename
return frappe.uploaded_filename, frappe.uploaded_content
else:
frappe.msgprint(_('No file attached'))
return None, None
def save_file(fname, content, dt, dn, folder=None, decode=False, is_private=0):
if decode:
if isinstance(content, text_type):
content = content.encode("utf-8")
if "," in content:
content = content.split(",")[1]
content = base64.b64decode(content)
file_size = check_max_file_size(content)
content_hash = get_content_hash(content)
content_type = mimetypes.guess_type(fname)[0]
fname = get_file_name(fname, content_hash[-6:])
file_data = get_file_data_from_hash(content_hash, is_private=is_private)
if not file_data:
call_hook_method("before_write_file", file_size=file_size)
write_file_method = get_hook_method('write_file', fallback=save_file_on_filesystem)
file_data = write_file_method(fname, content, content_type=content_type, is_private=is_private)
file_data = copy(file_data)
file_data.update({
"doctype": "File",
"attached_to_doctype": dt,
"attached_to_name": dn,
"folder": folder,
"file_size": file_size,
"content_hash": content_hash,
"is_private": is_private
})
f = frappe.get_doc(file_data)
f.flags.ignore_permissions = True
try:
f.insert()
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_file_data_from_hash(content_hash, is_private=0):
for name in frappe.db.sql_list("select name from `tabFile` where content_hash=%s and is_private=%s", (content_hash, is_private)):
b = frappe.get_doc('File', name)
return {k:b.get(k) for k in frappe.get_hooks()['write_file_keys']}
return False
def save_file_on_filesystem(fname, content, content_type=None, is_private=0):
fpath = write_file(content, fname, is_private)
if is_private:
file_url = "/private/files/{0}".format(fname)
else:
file_url = "/files/{0}".format(fname)
return {
'file_name': os.path.basename(fpath),
'file_url': file_url
}
def get_max_file_size():
return conf.get('max_file_size') or 10485760
def check_max_file_size(content):
max_file_size = get_max_file_size()
file_size = len(content)
if file_size > max_file_size:
frappe.msgprint(_("File size exceeded the maximum allowed size of {0} MB").format(
max_file_size / 1048576),
raise_exception=MaxFileSizeReachedError)
return file_size
def write_file(content, fname, is_private=0):
"""write file to disk with a random name (to compare)"""
file_path = get_files_path(is_private=is_private)
# create directory (if not exists)
frappe.create_folder(file_path)
# write the file
with open(os.path.join(file_path.encode('utf-8'), fname.encode('utf-8')), 'w+') as f:
f.write(content)
return get_files_path(fname, is_private=is_private)
def remove_all(dt, dn, from_delete=False):
"""remove all files in a transaction"""
try:
for fid in frappe.db.sql_list("""select name from `tabFile` where
attached_to_doctype=%s and attached_to_name=%s""", (dt, dn)):
remove_file(fid, dt, dn, from_delete)
except Exception as e:
if e.args[0]!=1054: raise # (temp till for patched)
def remove_file_by_url(file_url, doctype=None, name=None):
if doctype and name:
fid = frappe.db.get_value("File", {"file_url": file_url,
"attached_to_doctype": doctype, "attached_to_name": name})
else:
fid = frappe.db.get_value("File", {"file_url": file_url})
if fid:
return remove_file(fid)
def remove_file(fid, attached_to_doctype=None, attached_to_name=None, from_delete=False):
"""Remove file and File entry"""
file_name = None
if not (attached_to_doctype and attached_to_name):
attached = frappe.db.get_value("File", fid,
["attached_to_doctype", "attached_to_name", "file_name"])
if attached:
attached_to_doctype, attached_to_name, file_name = attached
ignore_permissions, comment = False, None
if attached_to_doctype and attached_to_name and not from_delete:
doc = frappe.get_doc(attached_to_doctype, attached_to_name)
ignore_permissions = doc.has_permission("write") or False
if frappe.flags.in_web_form:
ignore_permissions = True
if not file_name:
file_name = frappe.db.get_value("File", fid, "file_name")
comment = doc.add_comment("Attachment Removed", _("Removed {0}").format(file_name))
frappe.delete_doc("File", fid, ignore_permissions=ignore_permissions)
return comment
def delete_file_data_content(doc, only_thumbnail=False):
method = get_hook_method('delete_file_data_content', fallback=delete_file_from_filesystem)
method(doc, only_thumbnail=only_thumbnail)
def delete_file_from_filesystem(doc, only_thumbnail=False):
"""Delete file, thumbnail from File document"""
if only_thumbnail:
delete_file(doc.thumbnail_url)
else:
delete_file(doc.file_url)
delete_file(doc.thumbnail_url)
def delete_file(path):
"""Delete file from `public folder`"""
if path:
if ".." in path.split("/"):
frappe.msgprint(_("It is risky to delete this file: {0}. Please contact your System Manager.").format(path))
parts = os.path.split(path.strip("/"))
if parts[0]=="files":
path = frappe.utils.get_site_path("public", "files", parts[-1])
else:
path = frappe.utils.get_site_path("private", "files", parts[-1])
path = encode(path)
if os.path.exists(path):
os.remove(path)
def get_file(fname):
"""Returns [`file_name`, `content`] for given file name `fname`"""
file_path = get_file_path(fname)
# read the file
with open(encode(file_path), 'r') as f:
content = f.read()
return [file_path.rsplit("/", 1)[-1], content]
def get_file_path(file_name):
"""Returns file path from given file name"""
f = frappe.db.sql("""select file_url from `tabFile`
where name=%s or file_name=%s""", (file_name, file_name))
if f:
file_name = f[0][0]
file_path = file_name
if "/" not in file_path:
file_path = "/files/" + file_path
if file_path.startswith("/private/files/"):
file_path = get_files_path(*file_path.split("/private/files/", 1)[1].split("/"), is_private=1)
elif file_path.startswith("/files/"):
file_path = get_files_path(*file_path.split("/files/", 1)[1].split("/"))
else:
frappe.throw(_("There is some problem with the file url: {0}").format(file_path))
return file_path
def get_content_hash(content):
if isinstance(content, text_type):
content = content.encode()
return hashlib.md5(content).hexdigest()
def get_file_name(fname, optional_suffix):
# convert to unicode
fname = cstr(fname)
n_records = frappe.db.sql("select name from `tabFile` where file_name=%s", fname)
if len(n_records) > 0 or os.path.exists(encode(get_files_path(fname))):
f = fname.rsplit('.', 1)
if len(f) == 1:
partial, extn = f[0], ""
else:
partial, extn = f[0], "." + f[1]
return '{partial}{suffix}{extn}'.format(partial=partial, extn=extn, suffix=optional_suffix)
return fname
@frappe.whitelist()
def download_file(file_url):
"""
Download file using token and REST API. Valid session or
token is required to download private files.
Method : GET
Endpoint : frappe.utils.file_manager.download_file
URL Params : file_name = /path/to/file relative to site path
"""
file_doc = frappe.get_doc("File", {"file_url":file_url})
file_doc.check_permission("read")
with open(getattr(frappe.local, "site_path", None) + file_url, "rb") as fileobj:
filedata = fileobj.read()
frappe.local.response.filename = file_url[file_url.rfind("/")+1:]
frappe.local.response.filecontent = filedata
frappe.local.response.type = "download"
def extract_images_from_doc(doc, fieldname):
content = doc.get(fieldname)
content = extract_images_from_html(doc, content)
if frappe.flags.has_dataurl:
doc.set(fieldname, content)
def extract_images_from_html(doc, content):
frappe.flags.has_dataurl = False
def _save_file(match):
data = match.group(1)
data = data.split("data:")[1]
headers, content = data.split(",")
if "filename=" in headers:
filename = headers.split("filename=")[-1]
# decode filename
if not isinstance(filename, text_type):
filename = text_type(filename, 'utf-8')
else:
mtype = headers.split(";")[0]
filename = get_random_filename(content_type=mtype)
doctype = doc.parenttype if doc.parent else doc.doctype
name = doc.parent or doc.name
# TODO fix this
file_url = save_file(filename, content, doctype, name, decode=True).get("file_url")
if not frappe.flags.has_dataurl:
frappe.flags.has_dataurl = True
return '<img src="{file_url}"'.format(file_url=file_url)
if content:
content = re.sub('<img[^>]*src\s*=\s*["\'](?=data:)(.*?)["\']', _save_file, content)
return content
def get_random_filename(extn=None, content_type=None):
if extn:
if not extn.startswith("."):
extn = "." + extn
elif content_type:
extn = mimetypes.guess_extension(content_type)
return random_string(7) + (extn or "")
| {
"content_hash": "e85369653effb60a34e3692da77f85d2",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 130,
"avg_line_length": 31.303896103896104,
"alnum_prop": 0.6913375373382011,
"repo_name": "maxtorete/frappe",
"id": "822b63e240ca96384eda2c04dbdb633e95977b26",
"size": "12153",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "frappe/utils/file_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "406441"
},
{
"name": "HTML",
"bytes": "213724"
},
{
"name": "JavaScript",
"bytes": "1742788"
},
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "1966420"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
class NormalizationLayersTest(test.TestCase):
def test_basic_batchnorm(self):
with self.cached_session():
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
def test_batchnorm_weights(self):
with self.cached_session():
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
def test_batchnorm_regularization(self):
with self.cached_session():
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
def test_batchnorm_correctness(self):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
def test_batchnorm_mixed_precision(self):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(
loc=5.0, scale=10.0, size=(1000, 10)).astype(np.float16)
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
def test_batchnorm_convnet_channel_last(self):
with self.cached_session():
# keras.backend.set_learning_phase(True)
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
def test_shared_batchnorm(self):
"""Test that a BN layer can be shared across different data streams.
"""
with self.cached_session():
# Test single layer reuse
bn = keras.layers.BatchNormalization()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
self.assertEqual(len(bn.updates), 4)
self.assertEqual(len(model.updates), 2)
self.assertEqual(len(model.get_updates_for(x1)), 0)
self.assertEqual(len(model.get_updates_for(x2)), 2)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
self.assertEqual(len(new_model.updates), 2)
self.assertEqual(len(model.updates), 4)
self.assertEqual(len(new_model.get_updates_for(x3)), 2)
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = keras.layers.BatchNormalization(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
assert not model.updates
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
"""
with self.cached_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = keras.layers.BatchNormalization()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
keras.backend.set_learning_phase(1)
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
if __name__ == '__main__':
test.main()
| {
"content_hash": "e5e0ce9ebe66608892b9a741271e958b",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 80,
"avg_line_length": 34.61943319838057,
"alnum_prop": 0.6123260437375746,
"repo_name": "girving/tensorflow",
"id": "ff705183ef1aa5298030c0dbd630c23baffb4203",
"size": "9240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/normalization_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from nose.tools import * # flake8: noqa
from urlparse import urlparse
from framework.auth.core import Auth
from website.models import NodeLog
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory
)
from tests.utils import assert_logs
node_url_for = lambda n_id: '/{}nodes/{}/'.format(API_BASE, n_id)
class TestNodeLinksList(ApiTestCase):
def setUp(self):
super(TestNodeLinksList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False, creator=self.user)
self.pointer_project = ProjectFactory(is_public=False, creator=self.user)
self.project.add_pointer(self.pointer_project, auth=Auth(self.user))
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.project._id)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_project.add_pointer(self.public_pointer_project, auth=Auth(self.user))
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
self.user_two = AuthUserFactory()
def test_return_public_node_pointers_logged_out(self):
res = self.app.get(self.public_url)
res_json = res.json['data']
assert_equal(len(res_json), 1)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
expected_path = node_url_for(self.public_pointer_project._id)
actual_path = urlparse(res_json[0]['relationships']['target_node']['links']['related']['href']).path
assert_equal(expected_path, actual_path)
def test_return_public_node_pointers_logged_in(self):
res = self.app.get(self.public_url, auth=self.user_two.auth)
res_json = res.json['data']
assert_equal(len(res_json), 1)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
expected_path = node_url_for(self.public_pointer_project._id)
actual_path = urlparse(res_json[0]['relationships']['target_node']['links']['related']['href']).path
assert_equal(expected_path, actual_path)
def test_return_private_node_pointers_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_return_private_node_pointers_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res_json), 1)
expected_path = node_url_for(self.pointer_project._id)
actual_path = urlparse(res_json[0]['relationships']['target_node']['links']['related']['href']).path
assert_equal(expected_path, actual_path)
def test_return_private_node_pointers_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_deleted_links_not_returned(self):
res = self.app.get(self.public_url, expect_errors=True)
res_json = res.json['data']
original_length = len(res_json)
self.public_pointer_project.is_deleted = True
self.public_pointer_project.save()
res = self.app.get(self.public_url)
res_json = res.json['data']
assert_equal(len(res_json), original_length - 1)
class TestNodeLinkCreate(ApiTestCase):
def setUp(self):
super(TestNodeLinkCreate, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False, creator=self.user)
self.pointer_project = ProjectFactory(is_public=False, creator=self.user)
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.project._id)
self.private_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.pointer_project._id,
'type': 'nodes'
}
}
}
}
}
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
self.public_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
}
self.fake_url = '/{}nodes/{}/node_links/'.format(API_BASE, 'fdxlq')
self.fake_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': 'fdxlq',
'type': 'nodes'
}
}
}
}
}
self.point_to_itself_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.public_project._id,
'type': 'nodes'
}
}
}
}
}
self.user_two = AuthUserFactory()
self.user_two_project = ProjectFactory(is_public=True, creator=self.user_two)
self.user_two_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.user_two_project._id)
self.user_two_payload = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
def test_add_node_link_relationships_is_a_list(self):
data = {
'data': {
'type': 'node_links',
'relationships': [{'target_node_id': self.public_pointer_project._id}]
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_create_node_link_invalid_data(self):
res = self.app.post_json_api(self.public_url, "Incorrect data", auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_add_node_link_no_relationships(self):
data = {
'data': {
'type': 'node_links',
'attributes': {
'id': self.public_pointer_project._id
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
def test_add_node_links_empty_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
def test_add_node_links_no_nodes_key_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Malformed request.')
def test_add_node_links_no_data_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data.')
def test_add_node_links_no_target_type_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.public_pointer_project._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /type.')
def test_add_node_links_no_target_id_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/id')
def test_add_node_links_incorrect_target_id_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': '12345'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_add_node_links_incorrect_target_type_in_relationships(self):
data = {
'data': {
'type': 'nodes',
'relationships': {
'nodes': {
'data': {
'type': 'Incorrect!',
'id': self.public_pointer_project._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_creates_node_link_target_not_nested(self):
payload = {
'data': {
'type': 'node_links',
'id': self.pointer_project._id
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/relationships.')
def test_creates_public_node_pointer_logged_out(self):
res = self.app.post_json_api(self.public_url, self.public_payload, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_creates_public_node_pointer_logged_in(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
expected_path = node_url_for(self.public_pointer_project._id)
actual_path = urlparse(res_json['relationships']['target_node']['links']['related']['href']).path
assert_equal(expected_path, actual_path)
def test_creates_private_node_pointer_logged_out(self):
res = self.app.post_json_api(self.private_url, self.private_payload, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_creates_private_node_pointer_logged_in_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
res_json = res.json['data']
expected_path = node_url_for(self.pointer_project._id)
actual_path = urlparse(res_json['relationships']['target_node']['links']['related']['href']).path
assert_equal(expected_path, actual_path)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_creates_private_node_pointer_logged_in_non_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_create_node_pointer_non_contributing_node_to_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'project')
def test_create_node_pointer_contributing_node_to_non_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
expected_path = node_url_for(self.user_two_project._id)
actual_path = urlparse(res_json['relationships']['target_node']['links']['related']['href']).path
assert_equal(expected_path, actual_path)
def test_create_pointer_non_contributing_node_to_fake_node(self):
res = self.app.post_json_api(self.private_url, self.fake_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_create_pointer_contributing_node_to_fake_node(self):
res = self.app.post_json_api(self.private_url, self.fake_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
def test_create_fake_node_pointing_to_contributing_node(self):
res = self.app.post_json_api(self.fake_url, self.private_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
res = self.app.post_json_api(self.fake_url, self.private_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_create_node_pointer_to_itself(self):
res = self.app.post_json_api(self.public_url, self.point_to_itself_payload, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
expected_path = node_url_for(self.public_project._id)
actual_path = urlparse(res_json['relationships']['target_node']['links']['related']['href']).path
assert_equal(expected_path, actual_path)
def test_create_node_pointer_to_itself_unauthorized(self):
res = self.app.post_json_api(self.public_url, self.point_to_itself_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_create_node_pointer_already_connected(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
expected_path = node_url_for(self.public_pointer_project._id)
actual_path = urlparse(res_json['relationships']['target_node']['links']['related']['href']).path
assert_equal(expected_path, actual_path)
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('detail', res.json['errors'][0])
def test_cannot_add_link_to_registration(self):
registration = RegistrationFactory(creator=self.user)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
payload = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_create_node_pointer_no_type(self):
payload = {
'data': {
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type')
def test_create_node_pointer_incorrect_type(self):
payload = {
'data': {
'type': 'Wrong type.',
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(res.json['errors'][0]['detail'], 'Resource identifier does not match server endpoint.')
| {
"content_hash": "51f41fc56df5d55d724115c5f21d6f50",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 128,
"avg_line_length": 42.01449275362319,
"alnum_prop": 0.5458532498891243,
"repo_name": "haoyuchen1992/osf.io",
"id": "34ac186da35bf571f0edf28ccbf40eb530ffdfc2",
"size": "20317",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api_tests/nodes/views/test_node_links_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119433"
},
{
"name": "HTML",
"bytes": "34310"
},
{
"name": "JavaScript",
"bytes": "1214045"
},
{
"name": "Mako",
"bytes": "542037"
},
{
"name": "Python",
"bytes": "3730523"
},
{
"name": "Shell",
"bytes": "1927"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
from pygments.lexers.data import YamlLexer
from pygments.lexers.data import JsonLexer
from pygments.lexers.templates import TwigHtmlLexer
from pygments.lexers.templates import CheetahHtmlLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
lexers['yaml'] = YamlLexer()
lexers['twig'] = TwigHtmlLexer()
lexers['html'] = CheetahHtmlLexer()
lexers['json'] = JsonLexer()
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinxcontrib.inlinesyntaxhighlight'
]
project = u'Polyphonic Polymer Bundle'
copyright = u'2015, Sean Hickey'
author = u'Sean Hickey'
version = '0.0.4'
release = '0.0.4'
language = "en"
templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
pygments_style = 'sphinx'
html_title = "Polyphonic Polymer Bundle"
html_short_title = "Polyphonic"
html_static_path = ['_static']
htmlhelp_basename = 'PolyphonicPolymerBundledoc'
latex_documents = [
(master_doc, 'PolyphonicPolymerBundle.tex', u'Polyphonic Polymer Bundle Documentation',
u'Sean Hickey', 'manual'),
]
man_pages = [
(master_doc, 'polyphonicpolymerbundle', u'Polyphonic Polymer Bundle Documentation',
[u'Sean Hickey'], 1)
]
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| {
"content_hash": "34fed612f46fcd67b67f308b3f368665",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 89,
"avg_line_length": 28.362068965517242,
"alnum_prop": 0.7319148936170212,
"repo_name": "robfrawley/polymer-bundle",
"id": "7da22bf619b5608c583f53d332be0216bd9ba372",
"size": "1645",
"binary": false,
"copies": "3",
"ref": "refs/heads/polymer_1",
"path": "Resources/doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1790"
},
{
"name": "Makefile",
"bytes": "7477"
},
{
"name": "PHP",
"bytes": "73696"
},
{
"name": "Python",
"bytes": "1645"
}
],
"symlink_target": ""
} |
from django.conf import settings
FTS_BACKEND = getattr(settings, 'FTS_BACKEND', 'simple://')
| {
"content_hash": "810708f051cff3e91a8414f00f3deb9f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 59,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.711340206185567,
"repo_name": "ryszard/django-fts",
"id": "04c854fa63a903de2272fa4df8aa7c642b16d8bc",
"size": "97",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fts/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "59297"
}
],
"symlink_target": ""
} |
import collections
import time
import pytest
import requests
import verta
from verta.dataset import Path
class TestMetadata:
def test_description(self, client, created_entities, strs):
first_desc, second_desc = strs[:2]
dataset = client.create_dataset(desc=first_desc)
created_entities.append(dataset)
assert dataset.get_description() == first_desc
dataset.set_description(second_desc)
assert dataset.get_description() == second_desc
assert client.get_dataset(id=dataset.id).get_description() == second_desc
def test_tags(self, client, created_entities, strs):
tag1, tag2, tag3, tag4 = strs[:4]
dataset = client.create_dataset(tags=[tag1])
created_entities.append(dataset)
assert set(dataset.get_tags()) == {tag1}
dataset.add_tag(tag2)
assert set(dataset.get_tags()) == {tag1, tag2}
dataset.add_tags([tag3, tag4])
assert set(dataset.get_tags()) == {tag1, tag2, tag3, tag4}
dataset.del_tag(tag3)
dataset.del_tag(tag3) # no error if nonexistent
assert set(dataset.get_tags()) == {tag1, tag2, tag4}
assert set(client.get_dataset(id=dataset.id).get_tags()) == {tag1, tag2, tag4}
def test_attributes(self, client, created_entities):
Attr = collections.namedtuple('Attr', ['key', 'value'])
attr1 = Attr('key1', {'a': 1})
attr2 = Attr('key2', ['a', 1])
attr3 = Attr('key3', 'a')
attr4 = Attr('key4', 1)
dataset = client.create_dataset(attrs=dict([attr1]))
created_entities.append(dataset)
assert dataset.get_attributes() == dict([attr1])
dataset.add_attribute(*attr2)
assert dataset.get_attributes() == dict([attr1, attr2])
dataset.add_attributes(dict([attr3, attr4]))
assert dataset.get_attributes() == dict([attr1, attr2, attr3, attr4])
dataset.del_attribute(attr3.key)
dataset.del_attribute(attr3.key) # no error if nonexistent
assert dataset.get_attributes() == dict([attr1, attr2, attr4])
assert client.get_dataset(id=dataset.id).get_attributes() == dict([attr1, attr2, attr4])
for attr in [attr1, attr2, attr4]:
assert dataset.get_attribute(attr.key) == attr.value
# overwrite
new_val = 'b'
dataset.add_attribute(attr1.key, new_val)
assert dataset.get_attribute(attr1.key) == new_val
class TestCreateGet:
def test_creation_updates_dataset_timestamp(self, client, dataset):
"""Version creation should update its dataset's time_updated field."""
time_updated = dataset._msg.time_updated
dataset_version = dataset.create_version(Path(["conftest.py"]))
time.sleep(60) # wait for reconciler
dataset._fetch_with_no_cache()
assert dataset._msg.time_updated > time_updated
assert dataset._msg.time_updated == dataset_version._msg.time_logged
def test_create(self, client, created_entities):
dataset = client.set_dataset()
assert dataset
created_entities.append(dataset)
name = verta._internal_utils._utils.generate_default_name()
dataset = client.create_dataset(name)
assert dataset
created_entities.append(dataset)
with pytest.raises(requests.HTTPError, match="already exists"):
assert client.create_dataset(name)
with pytest.warns(UserWarning, match="already exists"):
client.set_dataset(name=dataset.name, time_created=123)
def test_get(self, client, created_entities):
name = verta._internal_utils._utils.generate_default_name()
with pytest.raises(ValueError):
client.get_dataset(name)
dataset = client.set_dataset(name)
created_entities.append(dataset)
assert dataset.id == client.get_dataset(dataset.name).id
assert dataset.id == client.get_dataset(id=dataset.id).id
# Deleting non-existing key:
dataset.del_attribute("non-existing")
def test_find(self, client, created_entities, strs):
name1, name2, name3, name4, tag1, tag2 = (
s + str(verta._internal_utils._utils.now())
for s in strs[:6]
)
dataset1 = client.create_dataset(name1, tags=[tag1])
dataset2 = client.create_dataset(name2, tags=[tag1])
dataset3 = client.create_dataset(name3, tags=[tag2])
dataset4 = client.create_dataset(name4, tags=[tag2])
created_entities.extend([dataset1, dataset2, dataset3, dataset4])
datasets = client.datasets.find("name == {}".format(name3))
assert len(datasets) == 1
assert datasets[0].id == dataset3.id
datasets = client.datasets.find("tags ~= {}".format(tag1))
assert len(datasets) == 2
assert set(dataset.id for dataset in datasets) == {dataset1.id, dataset2.id}
def test_repr(self, client, created_entities):
description = "this is a cool dataset"
tags = [u"tag1", u"tag2"]
dataset = client.set_dataset(desc=description, tags=tags)
created_entities.append(dataset)
str_repr = repr(dataset)
assert "name: {}".format(dataset.name) in str_repr
assert "id: {}".format(dataset.id) in str_repr
assert "time created" in str_repr
assert "time updated" in str_repr
assert "description: {}".format(description) in str_repr
assert "tags: {}".format(tags) in str_repr
| {
"content_hash": "999998957b9f97a2110c3656871036ef",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 96,
"avg_line_length": 36.66,
"alnum_prop": 0.6330241862156756,
"repo_name": "mitdbg/modeldb",
"id": "713fa6f362ce22bb3f16ed91021229b10f14ef82",
"size": "5499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/verta/tests/test_dataset_versioning/test_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43352"
},
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "Java",
"bytes": "393927"
},
{
"name": "JavaScript",
"bytes": "1017682"
},
{
"name": "Python",
"bytes": "178774"
},
{
"name": "Scala",
"bytes": "251259"
},
{
"name": "Shell",
"bytes": "16870"
},
{
"name": "Thrift",
"bytes": "55683"
}
],
"symlink_target": ""
} |
import logging
import logging.handlers
import os
import shutil
import sys
import time
import traceback
from argparse import ArgumentParser
from collections import defaultdict
from Bio import Entrez, SeqIO
# Parse command-line
def parse_cmdline(args):
"""Parse command-line arguments"""
parser = ArgumentParser(prog="genbacnk_get_genomes_by_taxon.py")
parser.add_argument("-o", "--outdir", dest="outdirname",
action="store", default=None,
help="Output directory")
parser.add_argument("-t", "--taxon", dest="taxon",
action="store", default=None,
help="NCBI taxonomy ID")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="Give verbose output")
parser.add_argument("-f", "--force", dest="force",
action="store_true", default=False,
help="Force file overwriting")
parser.add_argument("--noclobber", dest="noclobber",
action="store_true", default=False,
help="Don't nuke existing files")
parser.add_argument("-l", "--logfile", dest="logfile",
action="store", default=None,
help="Logfile location")
parser.add_argument("--format", dest="format",
action="store", default="gbk,fasta",
help="Output file format [gbk|fasta]")
parser.add_argument("--email", dest="email",
action="store", default=None,
help="Email associated with NCBI queries")
return parser.parse_args()
# Set contact email for NCBI
def set_ncbi_email():
"""Set contact email for NCBI."""
Entrez.email = args.email
logger.info("Set NCBI contact email to %s" % args.email)
Entrez.tool = "genbank_get_genomes_by_taxon.py"
# Create output directory if it doesn't exist
def make_outdir():
"""Make the output directory, if required.
This is a little involved. If the output directory already exists,
we take the safe option by default, and stop with an error. We can,
however, choose to force the program to go on, in which case we can
either clobber the existing directory, or not. The options turn out
as the following, if the directory exists:
DEFAULT: stop and report the collision
FORCE: continue, and remove the existing output directory
NOCLOBBER+FORCE: continue, but do not remove the existing output
"""
if os.path.exists(args.outdirname):
if not args.force:
logger.error("Output directory %s would " % args.outdirname +
"overwrite existing files (exiting)")
sys.exit(1)
else:
logger.info("--force output directory use")
if args.noclobber:
logger.warning("--noclobber: existing output directory kept")
else:
logger.info("Removing directory %s and everything below it" %
args.outdirname)
shutil.rmtree(args.outdirname)
logger.info("Creating directory %s" % args.outdirname)
try:
os.makedirs(args.outdirname) # We make the directory recursively
except OSError:
# This gets thrown if the directory exists. If we've forced overwrite/
# delete and we're not clobbering, we let things slide
if args.noclobber and args.force:
logger.info("NOCLOBBER+FORCE: not creating directory")
else:
logger.error(last_exception)
sys.exit(1)
# Get assembly UIDs for the root taxon
def get_asm_uids(taxon_uid):
"""Returns a set of NCBI UIDs associated with the passed taxon.
This query at NCBI returns all assemblies for the taxon subtree
rooted at the passed taxon_uid.
"""
asm_ids = set() # Holds unique assembly UIDs
query = "txid%s[Organism:exp]" % taxon_uid
logger.info("ESearch for %s" % query)
# Perform initial search with usehistory
handle = Entrez.esearch(db="assembly", term=query, format="xml",
usehistory="y")
record = Entrez.read(handle)
result_count = int(record['Count'])
logger.info("Entrez ESearch returns %d assembly IDs" % result_count)
# Recover all child nodes
batch_size = 250
for start in range(0, result_count, batch_size):
tries, success = 0, False
while not success and tries < 20:
try:
batch_handle = Entrez.efetch(db="assembly", retmode="xml",
retstart=start,
retmax=batch_size,
webenv=record["WebEnv"],
query_key=record["QueryKey"])
batch_record = Entrez.read(batch_handle)
asm_ids = asm_ids.union(set(batch_record))
success = True
except:
tries += 1
logger.warning("Entrez batch query failed (#%d)" % tries)
if not success:
logger.error("Too many download attempt failures (exiting)")
sys.exit(1)
logger.info("Identified %d unique assemblies" % len(asm_ids))
return asm_ids
# Get contig UIDs for a specified assembly UID
def get_contig_uids(asm_uid):
"""Returns a set of NCBI UIDs associated with the passed assembly.
The UIDs returns are for assembly_nuccore_insdc sequences - the
assembly contigs."""
logger.info("Finding contig UIDs for assembly %s" % asm_uid)
contig_ids = set() # Holds unique contig UIDs
links = Entrez.read(Entrez.elink(dbfrom="assembly", db="nucleotide",
retmode="gb", from_uid=asm_uid))
contigs = [l for l in links[0]['LinkSetDb'] \
if l['LinkName'] == 'assembly_nuccore_insdc'][0]
contig_uids = set([e['Id'] for e in contigs['Link']])
logger.info("Identified %d contig UIDs" % len(contig_uids))
return contig_uids
# Write contigs for a single assembly out to file
def write_contigs(asm_uid, contig_uids):
"""Writes assembly contigs out to a single FASTA file in the script's
designated output directory.
FASTA records are returned, as GenBank and even GenBankWithParts format
records don't reliably give correct sequence in all cases.
The script returns two strings for each assembly, a 'class' and a 'label'
string - this is for use with, e.g. pyani.
"""
logger.info("Collecting contig data for %s" % asm_uid)
# Assembly record - get binomial and strain names
asm_record = Entrez.read(Entrez.esummary(db='assembly', id=asm_uid,
rettype='text'))
asm_organism = asm_record['DocumentSummarySet']['DocumentSummary']\
[0]['SpeciesName']
try:
asm_strain = asm_record['DocumentSummarySet']['DocumentSummary']\
[0]['Biosource']['InfraspeciesList'][0]['Sub_value']
except:
asm_strain = ""
# Assembly UID (long form) for the output filename
gname = asm_record['DocumentSummarySet']['DocumentSummary']\
[0]['AssemblyAccession'].split('.')[0]
outfilename = "%s.fasta" % os.path.join(args.outdirname, gname)
# Create label and class strings
genus, species = asm_organism.split(' ', 1)
ginit = genus[0] + '.'
labeltxt = "%s\t%s %s %s" % (gname, ginit, species, asm_strain)
classtxt = "%s\t%s" % (gname, asm_organism)
# Get FASTA records for contigs
logger.info("Downloading FASTA records for assembly %s (%s)" %
(asm_uid, ' '.join([ginit, species, asm_strain])))
query_uids = ','.join(contig_uids)
tries, success = 0, False
while not success and tries < 20:
try:
tries += 1
records = list(SeqIO.parse(Entrez.efetch(db='nucleotide',
id=query_uids,
rettype='fasta',
retmode='text'), 'fasta'))
# Check only that correct number of records returned.
if len(records) == len(contig_uids):
success = True
else:
logger.warning("FASTA download for assembly %s failed" %
asm_uid)
logger.warning("try %d/20" % tries)
# Could also check expected assembly sequence length?
totlen = sum([len(r) for r in records])
logger.info("Downloaded genome size: %d" % totlen)
except:
logger.warning("FASTA download for assembly %s failed" % asm_uid)
logger.warning("try %d/20" % tries)
if not success:
logger.error("Failed to download records for %s (exiting)" % asm_uid)
sys.exit(1)
# Write contigs to file
retval = SeqIO.write(records, outfilename, 'fasta')
logger.info("Wrote %d contigs to %s" % (retval, outfilename))
# Return labels for this genome
return classtxt, labeltxt
# Run as script
if __name__ == '__main__':
# Parse command-line
args = parse_cmdline(sys.argv)
# Set up logging
logger = logging.getLogger('genbank_get_genomes_by_taxon.py')
logger.setLevel(logging.DEBUG)
err_handler = logging.StreamHandler(sys.stderr)
err_formatter = logging.Formatter('%(levelname)s: %(message)s')
err_handler.setFormatter(err_formatter)
# Was a logfile specified? If so, use it
if args.logfile is not None:
try:
logstream = open(args.logfile, 'w')
err_handler_file = logging.StreamHandler(logstream)
err_handler_file.setFormatter(err_formatter)
err_handler_file.setLevel(logging.INFO)
logger.addHandler(err_handler_file)
except:
logger.error("Could not open %s for logging" %
args.logfile)
sys.exit(1)
# Do we need verbosity?
if args.verbose:
err_handler.setLevel(logging.INFO)
else:
err_handler.setLevel(logging.WARNING)
logger.addHandler(err_handler)
# Report arguments, if verbose
logger.info("genbank_get_genomes_by_taxon.py: %s" % time.asctime())
logger.info("command-line: %s" % ' '.join(sys.argv))
logger.info(args)
# Have we got an output directory? If not, exit.
if args.email is None:
logger.error("No email contact address provided (exiting)")
sys.exit(1)
set_ncbi_email()
# Have we got an output directory? If not, exit.
if args.outdirname is None:
logger.error("No output directory name (exiting)")
sys.exit(1)
make_outdir()
logger.info("Output directory: %s" % args.outdirname)
# We might have more than one taxon in a comma-separated list
taxon_ids = args.taxon.split(',')
logger.info("Passed taxon IDs: %s" % ', '.join(taxon_ids))
# Get all NCBI assemblies for each taxon UID
asm_dict = defaultdict(set)
for tid in taxon_ids:
asm_dict[tid] = get_asm_uids(tid)
for tid, asm_uids in asm_dict.items():
logger.info("Taxon %s: %d assemblies" % (tid, len(asm_uids)))
# Get links to the nucleotide database for each assembly UID
contig_dict = defaultdict(set)
for tid, asm_uids in asm_dict.items():
for asm_uid in asm_uids:
contig_dict[asm_uid] = get_contig_uids(asm_uid)
for asm_uid, contig_uids in contig_dict.items():
logger.info("Assembly %s: %d contigs" % (asm_uid, len(contig_uids)))
# Write each recovered assembly's contig set to a file in the
# specified output directory, and collect string labels to write in
# class and label files (e.g. for use with pyani).
classes, labels = [], []
for asm_uid, contig_uids in contig_dict.items():
classtxt, labeltxt = write_contigs(asm_uid, contig_uids)
classes.append(classtxt)
labels.append(labeltxt)
# Write class and label files
classfilename = os.path.join(args.outdirname, 'classes.txt')
labelfilename = os.path.join(args.outdirname, 'labels.txt')
logger.info("Writing classes file to %s" % classfilename)
with open(classfilename, 'w') as fh:
fh.write('\n'.join(classes))
logger.info("Writing labels file to %s" % labelfilename)
with open(labelfilename, 'w') as fh:
fh.write('\n'.join(labels))
| {
"content_hash": "0521a6aa33c6b0b688e858d1ba579611",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 79,
"avg_line_length": 41.11400651465798,
"alnum_prop": 0.5964189510378703,
"repo_name": "widdowquinn/scripts",
"id": "7979ea142a14815de65c8cb0bf02470194a6079f",
"size": "12998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bioinformatics/genbank_get_genomes_by_taxon.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "3147"
},
{
"name": "Gnuplot",
"bytes": "1911"
},
{
"name": "Python",
"bytes": "226696"
},
{
"name": "Shell",
"bytes": "9491"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from dragonflow._i18n import _
df_snat_app_opts = [
cfg.BoolOpt('enable_goto_flows',
default=True,
help=_("Enable install of common goto flows to ingress/egress "
"NAT tables or re-use goto flows installed by "
"other DF application"),
deprecated_for_removal=True),
cfg.StrOpt('external_network_bridge',
default='br-ex',
help=_("Name of bridge used for external network traffic")),
]
def register_opts():
cfg.CONF.register_opts(df_snat_app_opts, group='df_snat_app')
def list_opts():
return {'df_snat_app': df_snat_app_opts}
| {
"content_hash": "69ed4ae0afbc19ac151a396951b99dc2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 30.391304347826086,
"alnum_prop": 0.5851216022889842,
"repo_name": "openstack/dragonflow",
"id": "a6ae786baf3aa54b3a264e50166fb915825bb75e",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragonflow/conf/df_snat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2386"
},
{
"name": "Dockerfile",
"bytes": "690"
},
{
"name": "Mako",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "1740942"
},
{
"name": "Ruby",
"bytes": "4449"
},
{
"name": "Shell",
"bytes": "70410"
}
],
"symlink_target": ""
} |
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import Normalizer
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .imputation import Imputer
from ._weights import balance_weights
__all__ = [
'Binarizer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'balance_weights',
'binarize',
'normalize',
'scale',
'label_binarize',
]
| {
"content_hash": "85a0777d22506f7a0e84cfda60e908b6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 68,
"avg_line_length": 22.674418604651162,
"alnum_prop": 0.7323076923076923,
"repo_name": "treycausey/scikit-learn",
"id": "5585a976a83a30e4544e28e6db56525c4954bafd",
"size": "975",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/preprocessing/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18150950"
},
{
"name": "C++",
"bytes": "1807769"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Python",
"bytes": "5083789"
},
{
"name": "Shell",
"bytes": "3768"
}
],
"symlink_target": ""
} |
""" Todoist to Airmail
"""
#!/usr/bin/python
# encoding: utf8
import sys
import os
from workflow import Workflow3
from Foundation import *
from ScriptingBridge import *
__version__ = '1.0.0'
LOG = None
API_KEY = None
def create_task(content):
"""
"""
todo = todoist.TodoistAPI(API_KEY)
if not wf.settings.get('inbox_id', None):
todo.sync()
inbox = [p for p in todo.state['projects'] if p['name'] == 'Inbox'][0]
wf.settings['inbox_id'] = inbox['id']
task = todo.items.add(content, wf.settings['inbox_id'])
# print task
airmail = SBApplication.applicationWithBundleIdentifier_("it.bloop.airmail2")
# Format note text from message subject
task_note_text = airmail.selectedMessageUrl() + " (" + airmail.selectedMessage().subject() + ")"
# print airmail.selectedMessage().subject()
todo.notes.add(task['id'], task_note_text)
todo.commit()
def main(wf):
"""
"""
if len(wf.args):
query = wf.args[0]
LOG.debug(query)
else:
query = None
LOG.debug(query)
create_task(query)
if __name__ == u"__main__":
wf = Workflow3(libraries=['./lib'], update_settings={
# Github repo
'github_slug': 'markgrovs/alfred-airmail-to-todoist',
'version': __version__,
'frequency': 7
})
if wf.update_available:
# Download new version and tell Alfred to install it
wf.start_update()
import todoist
LOG = wf.logger
API_KEY = os.environ['API_KEY']
sys.exit(wf.run(main))
| {
"content_hash": "f40ca278baad650a981e04e668326cc8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 100,
"avg_line_length": 21.625,
"alnum_prop": 0.6017983301220295,
"repo_name": "markgrovs/alfred-airmail-to-todoist",
"id": "1d276de26e8cbdd346565361f40d6cf60243ec4f",
"size": "1557",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/airmail-to-todoist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1306242"
}
],
"symlink_target": ""
} |
from datetime import datetime
import pytest
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy_norm import Normalizable
@pytest.fixture
def User(Base):
class User(Base, Normalizable):
__tablename__ = 'users'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
nickname = sa.Column(sa.String)
point = sa.Column(sa.Integer)
created_at = sa.Column(sa.DateTime)
primary_address = relationship("Address", uselist=False)
__includes__ = ['addresses']
@property
def display_name(self):
return "%s (%s)" % (self.nickname, self.name)
def __init__(self, name, created_at):
self.name = name
self.created_at = created_at
return User
@pytest.fixture
def Address(Base):
class Address(Base, Normalizable):
__tablename__ = 'addresses'
id = sa.Column(sa.Integer, primary_key=True)
street = sa.Column(sa.String)
suburb = sa.Column(sa.String)
state = sa.Column(sa.String)
country = sa.Column(sa.String)
postcode = sa.Column(sa.String)
phone = sa.Column(sa.String)
user_id = sa.Column(sa.Integer, ForeignKey('users.id'))
user = relationship("User", backref=backref('addresses', order_by=id))
__excludes__ = ['phone']
def __init__(self, street=None, suburb=None,
state=None, country=None, postcode=None, phone=None):
self.street = street
self.suburb = suburb
self.state = state
self.country = country
self.postcode = postcode
self.phone = phone
return Address
@pytest.fixture
def UserWithinAddresses(User, Address):
me = User("Edward", datetime.now())
addr1 = Address(
street = '100 Flinders Ave',
suburb = 'Melburne',
state = 'Victoria',
country = 'Australia',
postcode = '3000',
phone = '0400000001'
)
addr2 = Address(
street = '20 Albert Ave',
suburb = 'South Melbourne',
state = 'Victoria',
country = 'Australia',
postcode = '3205',
phone = '0400000002'
)
me.addresses = [ addr1, addr2 ]
me.primary_address = addr1
return me
class TestDotNotation():
def test_type(self, UserWithinAddresses):
norm = UserWithinAddresses.vars()
assert isinstance(norm, dict)
def test_dot_notation_includes(self, UserWithinAddresses):
norm = UserWithinAddresses.vars(includes=[
'addresses.phone'
])
assert "addresses" in norm
assert isinstance(norm["addresses"], list)
assert len(norm["addresses"]) == len(UserWithinAddresses.addresses)
assert "phone" in norm["addresses"][0]
assert "country" in norm["addresses"][0]
assert "postcode" in norm["addresses"][0]
assert "suburb" in norm["addresses"][0]
assert "state" in norm["addresses"][0]
def test_dot_notation_excludes(self, UserWithinAddresses):
norm = UserWithinAddresses.vars(excludes=[
'addresses.suburb',
'addresses.state'
])
assert "addresses" in norm
assert isinstance(norm["addresses"], list)
assert len(norm["addresses"]) == len(UserWithinAddresses.addresses)
assert "street" in norm["addresses"][0]
assert "country" in norm["addresses"][0]
assert "postcode" in norm["addresses"][0]
assert "suburb" not in norm["addresses"][0]
assert "state" not in norm["addresses"][0]
assert "phone" not in norm["addresses"][0]
def test_dot_notation_complex(self, UserWithinAddresses):
norm = UserWithinAddresses.vars(
includes=['primary_address'],
excludes=[
'primary_address.suburb',
'primary_address.state'
]
)
assert "primary_address" in norm
assert isinstance(norm["primary_address"], dict)
assert "street" in norm["primary_address"]
assert "country" in norm["primary_address"]
assert "postcode" in norm["primary_address"]
assert "suburb" not in norm["primary_address"]
assert "state" not in norm["primary_address"]
assert "phone" not in norm["primary_address"]
def test_dot_notation_complex_includes_only(self, UserWithinAddresses):
norm = UserWithinAddresses.vars(
includes_only=[
'name',
'primary_address',
'primary_address.suburb',
'primary_address.state'
]
)
assert "name" in norm
assert "id" not in norm
assert "nickname" not in norm
assert "point" not in norm
assert "primary_address" in norm
assert isinstance(norm["primary_address"], dict)
assert "street" not in norm["primary_address"]
assert "country" not in norm["primary_address"]
assert "postcode" not in norm["primary_address"]
assert "suburb" in norm["primary_address"]
assert "state" in norm["primary_address"]
assert "phone" not in norm["primary_address"]
| {
"content_hash": "4f388a9144049c63d4ef36c283098543",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 78,
"avg_line_length": 31.21764705882353,
"alnum_prop": 0.5939325419257584,
"repo_name": "haruair/sqlalchemy-norm",
"id": "1920aa8a699f9383aabcfe9ea2d062cfb434ab2d",
"size": "5307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dotnotation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18270"
}
],
"symlink_target": ""
} |
import amazon
from setuptools import setup, find_packages
try:
long_description=open('READMxE.md', 'r').read()
except IOError:
long_description=""
setup(name='python-amazon-simple-product-api',
version=amazon.__version__,
description="A simple Python wrapper for the Amazon.com Product Advertising API",
long_description=long_description,
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
keywords='amazon, product advertising, api',
author='Yoav Aviram',
author_email='[email protected]',
url='https://github.com/yoavaviram/python-amazon-simple-product-api',
license='Apache 2.0',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=["bottlenose", "lxml", "python-dateutil"],
)
| {
"content_hash": "57fb54c2fe8b019ed5962eb3c8441da8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 87,
"avg_line_length": 38.38095238095238,
"alnum_prop": 0.6228287841191067,
"repo_name": "yoavaviram/python-amazon-simple-product-api",
"id": "1b77dcd42797aa31c63b1e84f34607a75c0aca57",
"size": "1612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66850"
}
],
"symlink_target": ""
} |
import os
import random
import string
from ..models import ServerListResult
from ..models import Server
from ..models import MailosaurException
class ServersOperations(object):
"""ServersOperations operations.
"""
def __init__(self, session, base_url, handle_http_error):
self.session = session
self.base_url = base_url
self.handle_http_error = handle_http_error
def generate_email_address(self, server):
host = os.getenv('MAILOSAUR_SMTP_HOST', 'mailosaur.net')
randomString = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
return "%s@%s.%s" % (randomString, server, host)
def list(self):
"""List all servers.
Returns a list of your virtual SMTP servers. Servers are returned
sorted in alphabetical order.
:return: ServerListResult
:rtype: ~mailosaur.models.ServerListResult
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/servers" % (self.base_url)
response = self.session.get(url)
if response.status_code not in [200]:
self.handle_http_error(response)
return
data = response.json()
return ServerListResult(data)
def create(self, server_create_options):
"""Create a server.
Creates a new virtual SMTP server and returns it.
:param server_create_options:
:type server_create_options: ~mailosaur.models.ServerCreateOptions
:return: Server
:rtype: ~mailosaur.models.Server
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/servers" % (self.base_url)
response = self.session.post(url, json=server_create_options.to_json())
if response.status_code not in [200]:
self.handle_http_error(response)
return
data = response.json()
return Server(data)
def get(self, id):
"""Retrieve a server.
Retrieves the detail for a single server. Simply supply the unique
identifier for the required server.
:param id: The identifier of the server to be retrieved.
:type id: str
:return: Server
:rtype: ~mailosaur.models.Server
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/servers/%s" % (self.base_url, id)
response = self.session.get(url)
if response.status_code not in [200]:
self.handle_http_error(response)
return
data = response.json()
return Server(data)
def get_password(self, id):
"""Retrieve server password.
Retrieves the password for use with SMTP and POP3 for a single server.
Simply supply the unique identifier for the required server.
:param id: The identifier of the server.
:type id: str
:return: str
:rtype: str
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/servers/%s/password" % (self.base_url, id)
response = self.session.get(url)
if response.status_code not in [200]:
self.handle_http_error(response)
return
data = response.json()
return data.get('value', None)
def update(
self, id, server):
"""Update a server.
Updats a single server and returns it.
:param id: The identifier of the server to be updated.
:type id: str
:param server:
:type server: ~mailosaur.models.Server
:param dict custom_headers: headers that will be added to the request
:return: Server
:rtype: ~mailosaur.models.Server
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/servers/%s" % (self.base_url, id)
response = self.session.put(url, json=server.to_json())
if response.status_code not in [200]:
self.handle_http_error(response)
return
data = response.json()
return Server(data)
def delete(
self, id):
"""Delete a server.
Permanently deletes a server. This operation cannot be undone. Also
deletes all messages and associated attachments within the server.
:param id: The identifier of the server to be deleted.
:type id: str
:return: None
:rtype: None
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/servers/%s" % (self.base_url, id)
response = self.session.delete(url)
if response.status_code not in [204]:
self.handle_http_error(response)
return
| {
"content_hash": "8efddeef663223e9cdf1d6951b42b2e2",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 104,
"avg_line_length": 30.869565217391305,
"alnum_prop": 0.6004024144869216,
"repo_name": "mailosaur/mailosaur-python",
"id": "123277310be6a2a875336772aaf668dbb9578e1b",
"size": "4970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailosaur/operations/servers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1246"
},
{
"name": "Python",
"bytes": "80050"
}
],
"symlink_target": ""
} |
import datetime
from time import sleep
from flask import Flask
import model
from model import db
EXECUTOR_TIMEOUT_MINS = 1
def reset_overdue_runs():
"""give overdue runs another chance"""
runs = model.Run.query.filter(model.Run.finished_execing_time == None)\
.filter(model.Run.started_execing_time != None)\
.filter((datetime.datetime.utcnow() -
datetime.timedelta(minutes=EXECUTOR_TIMEOUT_MINS)) >
model.Run.started_execing_time).all()
for run in runs:
print("Returning overdue run: ", run.id)
run.started_execing_time = None
model.db.session.commit()
def event_loop():
"""loop to reset overdue runs"""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:////tmp/code_court.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['model'] = model
db.init_app(app)
with app.app_context():
try:
while True:
reset_overdue_runs()
sleep(30)
except KeyboardInterrupt:
print("Event loop shutting down")
if __name__ == "__main__":
event_loop()
| {
"content_hash": "6331c23e351e9de44a4ea346993958f4",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 27.113636363636363,
"alnum_prop": 0.597652975691534,
"repo_name": "spacetag/code_court",
"id": "c7674665871390a5660b3a7fbbd1c16a612d90fc",
"size": "1193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_court/courthouse/event_loop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "119040"
},
{
"name": "HTML",
"bytes": "523403"
},
{
"name": "JavaScript",
"bytes": "1912233"
},
{
"name": "Python",
"bytes": "160002"
},
{
"name": "Shell",
"bytes": "399"
},
{
"name": "Vue",
"bytes": "19720"
}
],
"symlink_target": ""
} |
import os
import shlex
from oslo_config import cfg
from oslo_privsep import priv_context
from neutron._i18n import _
from neutron.common import config
EXTERNAL_PROCESS_OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
]
PD_OPTS = [
cfg.StrOpt('pd_dhcp_driver',
default='dibbler',
help=_('Service to handle DHCPv6 Prefix delegation.')),
]
PD_DRIVER_OPTS = [
cfg.StrOpt('pd_confs',
default='$state_path/pd',
help=_('Location to store IPv6 PD files.')),
cfg.StrOpt('vendor_pen',
default='8888',
help=_("A decimal value as Vendor's Registered Private "
"Enterprise Number as required by RFC3315 DUID-EN.")),
]
INTERFACE_OPTS = [
cfg.StrOpt('ovs_integration_bridge',
default='br-int',
help=_('Name of Open vSwitch bridge to use')),
cfg.BoolOpt('ovs_use_veth',
default=False,
help=_("Uses veth for an OVS interface or not. "
"Support kernels with limited namespace support "
"(e.g. RHEL 6.5) and rate limiting on router's gateway "
"port so long as ovs_use_veth is set to "
"True.")),
]
RA_OPTS = [
cfg.StrOpt('ra_confs',
default='$state_path/ra',
help=_('Location to store IPv6 RA config files')),
cfg.IntOpt('min_rtr_adv_interval',
default=30,
help=_('MinRtrAdvInterval setting for radvd.conf')),
cfg.IntOpt('max_rtr_adv_interval',
default=100,
help=_('MaxRtrAdvInterval setting for radvd.conf')),
]
ROOT_HELPER_OPTS = [
cfg.StrOpt('root_helper', default='sudo',
help=_("Root helper application. "
"Use 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' "
"to use the real root filter facility. Change to 'sudo' "
"to skip the filtering and just run the command "
"directly.")),
cfg.BoolOpt('use_helper_for_ns_read',
default=True,
help=_("Use the root helper when listing the namespaces on a "
"system. This may not be required depending on the "
"security configuration. If the root helper is "
"not required, set this to False for a performance "
"improvement.")),
# We can't just use root_helper=sudo neutron-rootwrap-daemon $cfg because
# it isn't appropriate for long-lived processes spawned with create_process
# Having a bool use_rootwrap_daemon option precludes specifying the
# rootwrap daemon command, which may be necessary for Xen?
cfg.StrOpt('root_helper_daemon',
help=_("""
Root helper daemon application to use when possible.
Use 'sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf' to run rootwrap
in "daemon mode" which has been reported to improve performance at scale. For
more information on running rootwrap in "daemon mode", see:
https://docs.openstack.org/oslo.rootwrap/latest/user/usage.html#daemon-mode
For the agent which needs to execute commands in Dom0 in the hypervisor of
XenServer, this option should be set to 'xenapi_root_helper', so that it will
keep a XenAPI session to pass commands to Dom0.
""")),
]
AGENT_STATE_OPTS = [
cfg.FloatOpt('report_interval', default=30,
help=_('Seconds between nodes reporting state to server; '
'should be less than agent_down_time, best if it '
'is half or less than agent_down_time.')),
cfg.BoolOpt('log_agent_heartbeats', default=False,
help=_('Log agent heartbeats')),
]
INTERFACE_DRIVER_OPTS = [
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual interface.")),
]
IPTABLES_OPTS = [
cfg.BoolOpt('comment_iptables_rules', default=True,
help=_("Add comments to iptables rules. "
"Set to false to disallow the addition of comments to "
"generated iptables rules that describe each rule's "
"purpose. System must support the iptables comments "
"module for addition of comments.")),
cfg.BoolOpt('debug_iptables_rules', default=False,
help=_("Duplicate every iptables difference calculation to "
"ensure the format being generated matches the format "
"of iptables-save. This option should not be turned "
"on for production systems because it imposes a "
"performance penalty.")),
]
PROCESS_MONITOR_OPTS = [
cfg.StrOpt('check_child_processes_action', default='respawn',
choices=['respawn', 'exit'],
help=_('Action to be executed when a child process dies')),
cfg.IntOpt('check_child_processes_interval', default=60,
help=_('Interval between checks of child process liveness '
'(seconds), use 0 to disable')),
]
AVAILABILITY_ZONE_OPTS = [
# The default AZ name "nova" is selected to match the default
# AZ name in Nova and Cinder.
cfg.StrOpt('availability_zone', max_length=255, default='nova',
help=_("Availability zone of this node")),
]
EXT_NET_BRIDGE_OPTS = [
cfg.StrOpt('external_network_bridge', default='',
deprecated_for_removal=True,
help=_("Name of bridge used for external network "
"traffic. When this parameter is set, the L3 agent will "
"plug an interface directly into an external bridge "
"which will not allow any wiring by the L2 agent. Using "
"this will result in incorrect port statuses. This "
"option is deprecated and will be removed in Ocata."))
]
def get_log_args(conf, log_file_name, **kwargs):
cmd_args = []
if conf.debug:
cmd_args.append('--debug')
if (conf.log_dir or conf.log_file):
cmd_args.append('--log-file=%s' % log_file_name)
log_dir = None
if conf.log_dir and conf.log_file:
log_dir = os.path.dirname(
os.path.join(conf.log_dir, conf.log_file))
elif conf.log_dir:
log_dir = conf.log_dir
elif conf.log_file:
log_dir = os.path.dirname(conf.log_file)
if log_dir:
cmd_args.append('--log-dir=%s' % log_dir)
else:
if conf.use_syslog:
cmd_args.append('--use-syslog')
if conf.syslog_log_facility:
cmd_args.append(
'--syslog-log-facility=%s' % conf.syslog_log_facility)
return cmd_args
def register_external_process_opts(cfg=cfg.CONF):
cfg.register_opts(EXTERNAL_PROCESS_OPTS)
def register_pd_opts(cfg=cfg.CONF):
cfg.register_opts(PD_OPTS)
def register_pddriver_opts(cfg=cfg.CONF):
cfg.register_opts(PD_DRIVER_OPTS)
def register_interface_opts(cfg=cfg.CONF):
cfg.register_opts(INTERFACE_OPTS)
def register_ra_opts(cfg=cfg.CONF):
cfg.register_opts(RA_OPTS)
def register_root_helper(conf=cfg.CONF):
conf.register_opts(ROOT_HELPER_OPTS, 'AGENT')
def register_agent_state_opts_helper(conf):
conf.register_opts(AGENT_STATE_OPTS, 'AGENT')
def register_interface_driver_opts_helper(conf):
conf.register_opts(INTERFACE_DRIVER_OPTS)
def register_iptables_opts(conf):
conf.register_opts(IPTABLES_OPTS, 'AGENT')
def register_process_monitor_opts(conf):
conf.register_opts(PROCESS_MONITOR_OPTS, 'AGENT')
def register_availability_zone_opts_helper(conf):
conf.register_opts(AVAILABILITY_ZONE_OPTS, 'AGENT')
def get_root_helper(conf):
return conf.AGENT.root_helper
def setup_conf():
bind_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
conf = cfg.ConfigOpts()
conf.register_opts(bind_opts)
return conf
# add a logging setup method here for convenience
setup_logging = config.setup_logging
def setup_privsep():
priv_context.init(root_helper=shlex.split(get_root_helper(cfg.CONF)))
| {
"content_hash": "a0876a0f85474ff28fc43433f2214a48",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 79,
"avg_line_length": 34.89795918367347,
"alnum_prop": 0.6004678362573099,
"repo_name": "huntxu/neutron",
"id": "34027f99366ff019a8a5ed6051fa8a57e0366f2f",
"size": "9186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/conf/agent/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11111676"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import os
from a02a_word_count_evaluation import clean_text
from a01c_feature_engineering import tokenize_and_stem
from unidecode import unidecode
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from gensim.models.tfidfmodel import TfidfModel
def get_kernel_types():
return [
'cosine_similarity',
'linear',
'polynomial',
'sigmoid',
'rbf',
'laplacian',
]
def get_terms_from_tf_idf(tfidf_vectorizer):
terms = tfidf_vectorizer.get_feature_names()
return terms
def get_kernel(kernel_name):
options = {
'cosine_similarity': cosine_similarity,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'sigmoid': sigmoid_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel
}
return options[kernel_name]
def get_similarity_matrix(tfidf_matr, kernel):
dist = 1 - get_kernel(kernel)(tfidf_matr)
return dist
def perform_tf_idf(debug=False):
bow_matrix = pd.read_pickle('../../dataset/bow_per_product_tst.pickle')
max_features = None
# define vectorizer parameters
print("Setup TF-IDF Vectorizer")
tfidf_vectorizer = TfidfVectorizer(max_df=0.7, max_features=max_features,
min_df=0.2, stop_words=None,
use_idf=True, tokenizer=None)
print("Perform TF-IDF on the search results -- Max features = " + str(max_features))
kernel_type = 'rbf'
training_data = pd.read_csv('../../dataset/preprocessed_training_data_t.csv')
# # debug prints
# print("Bag of words matrix: ")
# print(bow_matrix)
# print("")
# print("Training Data: ")
# print(training_data)
all_feature_names = ['title_rate', 'desc_rate', 'attr_rate']
score_df = pd.DataFrame(
columns=all_feature_names,
index=training_data['id'].tolist()
)
counter = 0
for isearch in training_data.iterrows():
# get p_id, search_id and relevance from tr_data
p_id = isearch[1].product_uid
search_id = isearch[1].id
search_term_tokens = isearch[1].search_term
# # debug
# print search_term_set
test_matrix = [
search_term_tokens,
" ".join(bow_matrix.ix[np.int64(p_id), 'title']),
" ".join(bow_matrix.ix[np.int64(p_id), 'description']),
" ".join(bow_matrix.ix[np.int64(p_id), 'attributes']),
]
try:
tfidf_matrix = tfidf_vectorizer.fit_transform(test_matrix) # fit the vectorizer to books
except:
test_matrix = map(clean_text, test_matrix)
tfidf_matrix = tfidf_vectorizer.fit_transform(test_matrix) # fit the vectorizer to books
#print("ERROR!!!!! " + str(p_id))
#print(test_matrix)
# exit()
# Run all kernels for debug reasons (see print below)
#
# for kernel_type in get_kernel_types():
# print("Calculate similarity with - " + kernel_type + " kernel")
# sim_matrix = get_similarity_matrix(tfidf_matrix, kernel_type)[0]
# print(sim_matrix)
# break
# # Debug
# print("Calculate similarity with - " + kernel_type + " kernel")
sim_matrix = get_similarity_matrix(tfidf_matrix, kernel_type)[0]
title_score = sim_matrix[1]
desc_score = sim_matrix[2]
attr_score = sim_matrix[3]
# # Debug
# print(sim_matrix)
# print("Title score - " + str(title_score))
# print("Desc score - " + str(desc_score))
# print("Attrs score - " + str(attr_score))
score_row = {
'title_rate': title_score,
'desc_rate': desc_score,
'attr_rate': attr_score,
}
score_df.loc[search_id] = pd.Series(score_row)
counter += 1
if (counter is not 0 and counter % 1000 == 0):
print(str(counter) + " searches processed")
# # Stop execution for debug reasons
# if counter == 1000:
# break
score_df.to_pickle('../../dataset/score_df_tfidf_tst.pickle')
if debug:
print(score_df)
print("Score Dataframe succesfully saved!")
return None
if __name__ == "__main__":
perform_tf_idf(debug=True)
# Calculate similarity with - cosine_similarity kernel
# [[ 0.00000000e+00 1.00000000e+00 1.00000000e+00 1.00000000e+00]
# Calculate similarity with - linear kernel
# [[ 0.00000000e+00 1.00000000e+00 1.00000000e+00 1.00000000e+00]
# Calculate similarity with - polynomial kernel
# [[-0.03946922 0. 0. 0. ]
# Calculate similarity with - sigmoid kernel
# [[ 0.23300535 0.23840584 0.23840584 0.23840584]
# Calculate similarity with - rbf kernel
# [[ 0. 0.01290305 0.0256396 0.0256396 ]
# Calculate similarity with - laplacian kernel
# [[ 0. 0.01290305 0.10028499 0.07684025]
| {
"content_hash": "260512a4c02f2791d3df0156e5b27abc",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 101,
"avg_line_length": 30.548571428571428,
"alnum_prop": 0.6041900486344931,
"repo_name": "Iolaum/Phi1337",
"id": "a1bc515ecbe89b7955e2cd5d86af456ac50758e7",
"size": "5346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/pipeline/a05b_tst_tf_idf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9714"
},
{
"name": "Python",
"bytes": "452804"
}
],
"symlink_target": ""
} |
'''
Created on 13.07.2015
@author: Aaron Klein
'''
import DIRECT
import numpy as np
from robo.maximizers.base_maximizer import BaseMaximizer
class Direct(BaseMaximizer):
def __init__(self, objective_function, X_lower, X_upper,
n_func_evals=400, n_iters=200):
"""
Interface for the DIRECT algorithm by D. R. Jones, C. D. Perttunen
and B. E. Stuckmann
Parameters
----------
objective_function: acquisition function
The acquisition function which will be maximized
X_lower: np.ndarray (D)
Lower bounds of the input space
X_upper: np.ndarray (D)
Upper bounds of the input space
n_func_evals: int
The maximum number of function evaluations
n_iters: int
The maximum number of iterations
"""
self.n_func_evals = n_func_evals
self.n_iters = n_iters
super(Direct, self).__init__(objective_function, X_lower, X_upper)
def _direct_acquisition_fkt_wrapper(self, acq_f):
def _l(x, user_data):
return -acq_f(np.array([x])), 0
return _l
def maximize(self):
"""
Maximizes the given acquisition function.
Returns
-------
np.ndarray(N,D)
Point with highest acquisition value.
"""
x, _, _ = DIRECT.solve(
self._direct_acquisition_fkt_wrapper(self.objective_func),
l=[self.X_lower],
u=[self.X_upper],
maxT=self.n_iters,
maxf=self.n_func_evals)
return np.array([x])
| {
"content_hash": "192326a01a0af5d8423d1685d0a5ffb7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 74,
"avg_line_length": 28.76271186440678,
"alnum_prop": 0.5380082498526813,
"repo_name": "aaronkl/RoBO",
"id": "584f30679f663076ad0657fdf7f29a7abc8de86a",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robo/maximizers/direct.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "2054"
},
{
"name": "Python",
"bytes": "402978"
},
{
"name": "Shell",
"bytes": "318"
}
],
"symlink_target": ""
} |
"""mod_vnflcm_subscription
Revision ID: d25c7c865ce8
Revises: 2c5211036579
Create Date: 2020-10-15 14:27:04.946002
"""
# revision identifiers, used by Alembic.
revision = 'd25c7c865ce8'
down_revision = '2c5211036579'
from alembic import op
import sqlalchemy as sa
from tacker.db import types
from tacker.db import migration
def upgrade(active_plugins=None, options=None):
op.alter_column('vnf_lcm_filters', 'subscription_uuid',
type_=types.Uuid(length=36), existing_type=sa.String(length=255),
nullable=False)
sta_str = "json_unquote(json_extract('filter','$.operationTypes'))"
op.add_column(
'vnf_lcm_filters',
sa.Column('operation_types',
sa.LargeBinary(length=65536),
sa.Computed(sta_str)))
op.add_column(
'vnf_lcm_filters',
sa.Column('operation_types_len',
sa.Integer,
sa.Computed("ifnull(json_length('operation_types'),0)")))
op.drop_column('vnf_lcm_filters', 'operation_states')
op.drop_column('vnf_lcm_filters', 'operation_states_len')
op.alter_column('vnf_lcm_op_occs', 'operation_state',
type_=sa.String(length=16), existing_type=sa.String(length=255))
op.alter_column('vnf_lcm_op_occs', 'operation',
type_=sa.String(length=16),existing_type=sa.String(length=255))
op.add_column('vnf_lcm_op_occs',
sa.Column('is_cancel_pending', sa.Boolean, nullable=False)),
op.add_column('vnf_lcm_op_occs',
sa.Column('resource_changes', sa.JSON(), nullable=True))
op.add_column('vnf_lcm_op_occs',
sa.Column('error_point', sa.Integer, nullable=True))
op.add_column('vnf_lcm_op_occs',
sa.Column('changed_info', sa.JSON(), nullable=True))
op.add_column('vnf_lcm_op_occs',
sa.Column('created_at', sa.DateTime(), nullable=False))
op.add_column('vnf_lcm_op_occs',
sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('vnf_lcm_op_occs',
sa.Column('deleted_at', sa.DateTime(), nullable=True))
| {
"content_hash": "ae47a4d33c73dfc2d8b37da8b3e478bf",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 75,
"avg_line_length": 31.28787878787879,
"alnum_prop": 0.6430992736077482,
"repo_name": "openstack/tacker",
"id": "ff144d75fe38a914cf18608dc3bc2141d7fb1094",
"size": "2702",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tacker/db/migration/alembic_migrations/versions/d25c7c865ce8_mod_vnflcm_subscription.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "10809"
},
{
"name": "Mako",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "7648075"
},
{
"name": "Ruby",
"bytes": "2841"
},
{
"name": "Shell",
"bytes": "61750"
},
{
"name": "Smarty",
"bytes": "3624"
}
],
"symlink_target": ""
} |
"""
Views which allow users to create and activate accounts.
"""
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from registration.backends import get_backend
def activate(request, backend,
template_name='registration/activate.html',
success_url=None, extra_context=None, **kwargs):
"""
Activate a user's account.
The actual activation of the account will be delegated to the
backend specified by the ``backend`` keyword argument (see below);
the backend's ``activate()`` method will be called, passing any
keyword arguments captured from the URL, and will be assumed to
return a ``User`` if activation was successful, or a value which
evaluates to ``False`` in boolean context if not.
Upon successful activation, the backend's
``post_activation_redirect()`` method will be called, passing the
``HttpRequest`` and the activated ``User`` to determine the URL to
redirect the user to. To override this, pass the argument
``success_url`` (see below).
On unsuccessful activation, will render the template
``registration/activate.html`` to display an error message; to
override thise, pass the argument ``template_name`` (see below).
**Arguments**
``backend``
The dotted Python import path to the backend class to
use. Required.
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context. Optional.
``success_url``
The name of a URL pattern to redirect to on successful
acivation. This is optional; if not specified, this will be
obtained by calling the backend's
``post_activation_redirect()`` method.
``template_name``
A custom template to use. This is optional; if not specified,
this will default to ``registration/activate.html``.
``\*\*kwargs``
Any keyword arguments captured from the URL, such as an
activation key, which will be passed to the backend's
``activate()`` method.
**Context:**
The context will be populated from the keyword arguments captured
in the URL, and any extra variables supplied in the
``extra_context`` argument (see above).
**Template:**
registration/activate.html or ``template_name`` keyword argument.
"""
backend = get_backend(backend)
account = backend.activate(request, **kwargs)
if account and not account.activated:
if success_url is None:
to, args, kwargs = backend.post_activation_redirect(request, account)
return redirect(to, *args, **kwargs)
else:
return redirect(success_url)
if extra_context is None:
extra_context = {}
context = RequestContext(request)
context['already_activated'] = account and account.activated or False
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
kwargs,
context_instance=context)
def register(request, backend, success_url=None, form_class=None,
disallowed_url='registration_disallowed',
template_name='registration/registration_form.html',
extra_context=None):
"""
Allow a new user to register an account.
The actual registration of the account will be delegated to the
backend specified by the ``backend`` keyword argument (see below);
it will be used as follows:
1. The backend's ``registration_allowed()`` method will be called,
passing the ``HttpRequest``, to determine whether registration
of an account is to be allowed; if not, a redirect is issued to
the view corresponding to the named URL pattern
``registration_disallowed``. To override this, see the list of
optional arguments for this view (below).
2. The form to use for account registration will be obtained by
calling the backend's ``get_form_class()`` method, passing the
``HttpRequest``. To override this, see the list of optional
arguments for this view (below).
3. If valid, the form's ``cleaned_data`` will be passed (as
keyword arguments, and along with the ``HttpRequest``) to the
backend's ``register()`` method, which should return the new
``User`` object.
4. Upon successful registration, the backend's
``post_registration_redirect()`` method will be called, passing
the ``HttpRequest`` and the new ``User``, to determine the URL
to redirect the user to. To override this, see the list of
optional arguments for this view (below).
**Required arguments**
None.
**Optional arguments**
``backend``
The dotted Python import path to the backend class to use.
``disallowed_url``
URL to redirect to if registration is not permitted for the
current ``HttpRequest``. Must be a value which can legally be
passed to ``django.shortcuts.redirect``. If not supplied, this
will be whatever URL corresponds to the named URL pattern
``registration_disallowed``.
``form_class``
The form class to use for registration. If not supplied, this
will be retrieved from the registration backend.
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``success_url``
URL to redirect to after successful registration. Must be a
value which can legally be passed to
``django.shortcuts.redirect``. If not supplied, this will be
retrieved from the registration backend.
``template_name``
A custom template to use. If not supplied, this will default
to ``registration/registration_form.html``.
**Context:**
``form``
The registration form.
Any extra variables supplied in the ``extra_context`` argument
(see above).
**Template:**
registration/registration_form.html or ``template_name`` keyword
argument.
"""
backend = get_backend(backend)
if not backend.registration_allowed(request):
return redirect(disallowed_url)
if form_class is None:
form_class = backend.get_form_class(request)
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
new_user = backend.register(request, **form.cleaned_data)
if success_url is None:
to, args, kwargs = backend.post_registration_redirect(request, new_user)
return redirect(to, *args, **kwargs)
else:
return redirect(success_url)
else:
form = form_class()
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{'form': form},
context_instance=context)
| {
"content_hash": "22b0b73886cc6f1859143a69d52f3c59",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 88,
"avg_line_length": 36.229268292682924,
"alnum_prop": 0.6562542076208429,
"repo_name": "jnns/django-registration",
"id": "9fef172b36733833e03c7ab65fdf127b13c28cd7",
"size": "7427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registration/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "101077"
}
],
"symlink_target": ""
} |
def not_a_filter(data):
return data
| {
"content_hash": "2049b59de7619e190690bcb68eeb749e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 23,
"avg_line_length": 10.5,
"alnum_prop": 0.6428571428571429,
"repo_name": "jstacoder/flask-manage",
"id": "abc718f2cf8b02e8ece9d995c9f3db0f38202b3a",
"size": "97",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask_mrbob/templates/blueprint/+blueprint.name+/filters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18183"
}
],
"symlink_target": ""
} |
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AUTOML_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, provide_gcp_context, skip_gcp_system
from tests.test_utils.system_tests_class import SystemTest
@skip_gcp_system(GCP_AUTOML_KEY, require_local_executor=True, long_lasting=True)
class AutoMLDatasetOperationsSystemTest(SystemTest):
@provide_gcp_context(GCP_AUTOML_KEY)
def test_run_example_dag(self):
self.run_dag('example_automl_dataset', CLOUD_DAG_FOLDER)
@skip_gcp_system(GCP_AUTOML_KEY, require_local_executor=True, long_lasting=True)
class AutoMLModelOperationsSystemTest(SystemTest):
@provide_gcp_context(GCP_AUTOML_KEY)
def test_run_example_dag(self):
self.run_dag('example_create_and_deploy', CLOUD_DAG_FOLDER)
| {
"content_hash": "0d7257b78be5e580adc8fc3813b66a67",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 102,
"avg_line_length": 47,
"alnum_prop": 0.7759699624530664,
"repo_name": "wileeam/airflow",
"id": "e54f7f694888d7b23b5cfaf33f479db9c2f48dd9",
"size": "1588",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/operators/test_automl_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148281"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9763694"
},
{
"name": "Shell",
"bytes": "221331"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
"""GATK variant calling -- HaplotypeCaller and UnifiedGenotyper.
"""
from distutils.version import LooseVersion
import toolz as tz
from bcbio import bam, broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.pipeline import datadict as dd
from bcbio.variation import annotation, bamprep, ploidy
def _shared_gatk_call_prep(align_bams, items, ref_file, dbsnp, region, out_file):
"""Shared preparation work for GATK variant calling.
"""
data = items[0]
config = data["config"]
broad_runner = broad.runner_from_config(config)
broad_runner.run_fn("picard_index_ref", ref_file)
for x in align_bams:
bam.index(x, config)
params = ["-R", ref_file]
coverage_depth_min = tz.get_in(["algorithm", "coverage_depth_min"], config)
if coverage_depth_min and coverage_depth_min < 4:
confidence = "4.0"
params += ["--standard_min_confidence_threshold_for_calling", confidence,
"--standard_min_confidence_threshold_for_emitting", confidence]
for a in annotation.get_gatk_annotations(config):
params += ["--annotation", a]
for x in align_bams:
params += ["-I", x]
if dbsnp:
params += ["--dbsnp", dbsnp]
variant_regions = tz.get_in(["algorithm", "variant_regions"], config)
region = subset_variant_regions(variant_regions, region, out_file, items)
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"]
return broad_runner, params
def unified_genotyper(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Perform SNP genotyping on the given alignment file.
"""
if out_file is None:
out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0]
if not utils.file_exists(out_file):
broad_runner, params = \
_shared_gatk_call_prep(align_bams, items,
ref_file, assoc_files.get("dbsnp"),
region, out_file)
with file_transaction(items[0], out_file) as tx_out_file:
params += ["-T", "UnifiedGenotyper",
"-o", tx_out_file,
"-ploidy", (str(ploidy.get_ploidy(items, region))
if broad_runner.gatk_type() == "restricted" else "2"),
"--genotype_likelihoods_model", "BOTH"]
broad_runner.run_gatk(params)
return out_file
def _joint_calling(items):
"""Determine if this call feeds downstream into joint calls.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0])
if jointcaller:
assert len(items) == 1, "Can only do joint calling preparation with GATK with single samples"
assert tz.get_in(("metadata", "batch"), items[0]) is not None, \
"Joint calling requires batched samples, %s has no metadata batch." % dd.get_sample_name(items[0])
return jointcaller
def haplotype_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Call variation with GATK's HaplotypeCaller.
This requires the full non open-source version of GATK.
"""
if out_file is None:
out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0]
if not utils.file_exists(out_file):
broad_runner, params = \
_shared_gatk_call_prep(align_bams, items,
ref_file, assoc_files.get("dbsnp"),
region, out_file)
assert broad_runner.gatk_type() == "restricted", \
"Require full version of GATK 2.4+ for haplotype calling"
with file_transaction(items[0], out_file) as tx_out_file:
params += ["-T", "HaplotypeCaller",
"-o", tx_out_file,
"--annotation", "ClippingRankSumTest",
"--annotation", "DepthPerSampleHC"]
# Enable hardware based optimizations in GATK 3.1+
if LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.1"):
params += ["--pair_hmm_implementation", "VECTOR_LOGLESS_CACHING"]
# Enable non-diploid calling in GATK 3.3+
if LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.3"):
params += ["-ploidy", str(ploidy.get_ploidy(items, region))]
if _joint_calling(items): # Prepare gVCFs if doing joint calling
params += ["--emitRefConfidence", "GVCF", "--variant_index_type", "LINEAR",
"--variant_index_parameter", "128000"]
resources = config_utils.get_resources("gatk-haplotype", items[0]["config"])
if "options" in resources:
params += [str(x) for x in resources.get("options", [])]
broad_runner.new_resources("gatk-haplotype")
broad_runner.run_gatk(params)
return out_file
| {
"content_hash": "7f402d3defdf1adfd8c85406b2e8372c",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 110,
"avg_line_length": 48.79047619047619,
"alnum_prop": 0.5988678508686317,
"repo_name": "guillermo-carrasco/bcbio-nextgen",
"id": "fdee8fe6b9101a89ffaebdd22d2d335c3a723de3",
"size": "5123",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bcbio/variation/gatk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1482215"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "13852"
}
],
"symlink_target": ""
} |
"""
__built_traceability_for_rule_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: levi
Modified: Sat Aug 24 20:49:41 2013
_________________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from MT_post__MetaModelElement_S import *
from MT_post__MetaModelElement_T import *
from MT_post__trace_link import *
from MT_pre__MetaModelElement_T import *
from MT_pre__MetaModelElement_S import *
from RHS import *
from LHS import *
from graph_MT_post__MetaModelElement_S import *
from graph_MT_post__MetaModelElement_T import *
from graph_LHS import *
from graph_MT_pre__MetaModelElement_S import *
from graph_MT_pre__MetaModelElement_T import *
from graph_RHS import *
from graph_MT_post__trace_link import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def built_traceability_for_rule_MDL(self, rootNode, MT_post__GM2AUTOSAR_MMRootNode=None, MT_pre__GM2AUTOSAR_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_post__GM2AUTOSAR_MM ---
if( MT_post__GM2AUTOSAR_MMRootNode ):
# author
MT_post__GM2AUTOSAR_MMRootNode.author.setValue('Annonymous')
# description
MT_post__GM2AUTOSAR_MMRootNode.description.setValue('\n')
MT_post__GM2AUTOSAR_MMRootNode.description.setHeight(15)
# name
MT_post__GM2AUTOSAR_MMRootNode.name.setValue('')
MT_post__GM2AUTOSAR_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MT_pre__GM2AUTOSAR_MM ---
if( MT_pre__GM2AUTOSAR_MMRootNode ):
# author
MT_pre__GM2AUTOSAR_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__GM2AUTOSAR_MMRootNode.description.setValue('\n')
MT_pre__GM2AUTOSAR_MMRootNode.description.setHeight(15)
# name
MT_pre__GM2AUTOSAR_MMRootNode.name.setValue('')
MT_pre__GM2AUTOSAR_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('BuildTraceabilityForRule')
# --- ASG attributes over ---
self.obj71=MT_post__MetaModelElement_S(self)
self.obj71.isGraphObjectVisual = True
if(hasattr(self.obj71, '_setHierarchicalLink')):
self.obj71._setHierarchicalLink(False)
# MT_label__
self.obj71.MT_label__.setValue('1')
# MT_pivotOut__
self.obj71.MT_pivotOut__.setValue('')
self.obj71.MT_pivotOut__.setNone()
# MT_post__cardinality
self.obj71.MT_post__cardinality.setValue('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n')
self.obj71.MT_post__cardinality.setHeight(15)
# MT_post__classtype
self.obj71.MT_post__classtype.setValue('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n')
self.obj71.MT_post__classtype.setHeight(15)
# MT_post__name
self.obj71.MT_post__name.setValue('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n')
self.obj71.MT_post__name.setHeight(15)
self.obj71.graphClass_= graph_MT_post__MetaModelElement_S
if self.genGraphics:
new_obj = graph_MT_post__MetaModelElement_S(763.0,226.0,self.obj71)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_post__MetaModelElement_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj71.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj71)
self.globalAndLocalPostcondition(self.obj71, rootNode)
self.obj71.postAction( rootNode.CREATE )
self.obj72=MT_post__MetaModelElement_T(self)
self.obj72.isGraphObjectVisual = True
if(hasattr(self.obj72, '_setHierarchicalLink')):
self.obj72._setHierarchicalLink(False)
# MT_label__
self.obj72.MT_label__.setValue('2')
# MT_pivotOut__
self.obj72.MT_pivotOut__.setValue('')
self.obj72.MT_pivotOut__.setNone()
# MT_post__cardinality
self.obj72.MT_post__cardinality.setValue('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n')
self.obj72.MT_post__cardinality.setHeight(15)
# MT_post__classtype
self.obj72.MT_post__classtype.setValue('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n')
self.obj72.MT_post__classtype.setHeight(15)
# MT_post__name
self.obj72.MT_post__name.setValue('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n')
self.obj72.MT_post__name.setHeight(15)
self.obj72.graphClass_= graph_MT_post__MetaModelElement_T
if self.genGraphics:
new_obj = graph_MT_post__MetaModelElement_T(763.0,406.0,self.obj72)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_post__MetaModelElement_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj72.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj72)
self.globalAndLocalPostcondition(self.obj72, rootNode)
self.obj72.postAction( rootNode.CREATE )
self.obj73=MT_post__trace_link(self)
self.obj73.isGraphObjectVisual = True
if(hasattr(self.obj73, '_setHierarchicalLink')):
self.obj73._setHierarchicalLink(False)
# MT_label__
self.obj73.MT_label__.setValue('3')
# MT_pivotOut__
self.obj73.MT_pivotOut__.setValue('')
self.obj73.MT_pivotOut__.setNone()
self.obj73.graphClass_= graph_MT_post__trace_link
if self.genGraphics:
new_obj = graph_MT_post__trace_link(932.0,389.5,self.obj73)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_post__trace_link", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj73.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj73)
self.globalAndLocalPostcondition(self.obj73, rootNode)
self.obj73.postAction( rootNode.CREATE )
self.obj75=MT_pre__MetaModelElement_T(self)
self.obj75.isGraphObjectVisual = True
if(hasattr(self.obj75, '_setHierarchicalLink')):
self.obj75._setHierarchicalLink(False)
# MT_pivotOut__
self.obj75.MT_pivotOut__.setValue('')
self.obj75.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj75.MT_subtypeMatching__.setValue(('True', 1))
self.obj75.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj75.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj75.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj75.MT_pivotIn__.setValue('')
self.obj75.MT_pivotIn__.setNone()
# MT_label__
self.obj75.MT_label__.setValue('2')
# MT_pre__cardinality
self.obj75.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj75.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj75.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj75.MT_pre__name.setHeight(15)
self.obj75.graphClass_= graph_MT_pre__MetaModelElement_T
if self.genGraphics:
new_obj = graph_MT_pre__MetaModelElement_T(263.0,406.0,self.obj75)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__MetaModelElement_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj75.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj75)
self.globalAndLocalPostcondition(self.obj75, rootNode)
self.obj75.postAction( rootNode.CREATE )
self.obj77=MT_pre__MetaModelElement_S(self)
self.obj77.isGraphObjectVisual = True
if(hasattr(self.obj77, '_setHierarchicalLink')):
self.obj77._setHierarchicalLink(False)
# MT_pivotOut__
self.obj77.MT_pivotOut__.setValue('')
self.obj77.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj77.MT_subtypeMatching__.setValue(('True', 1))
self.obj77.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj77.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj77.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj77.MT_pivotIn__.setValue('')
self.obj77.MT_pivotIn__.setNone()
# MT_label__
self.obj77.MT_label__.setValue('1')
# MT_pre__cardinality
self.obj77.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj77.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj77.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj77.MT_pre__name.setHeight(15)
self.obj77.graphClass_= graph_MT_pre__MetaModelElement_S
if self.genGraphics:
new_obj = graph_MT_pre__MetaModelElement_S(263.0,226.0,self.obj77)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__MetaModelElement_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj77.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj77)
self.globalAndLocalPostcondition(self.obj77, rootNode)
self.obj77.postAction( rootNode.CREATE )
self.obj80=RHS(self)
self.obj80.isGraphObjectVisual = True
if(hasattr(self.obj80, '_setHierarchicalLink')):
self.obj80._setHierarchicalLink(False)
# action
self.obj80.action.setValue('#===============================================================================\n# This code is executed after the rule has been applied.\n# You can access a node labelled n matched by this rule by: PostNode(\'n\').\n# To access attribute x of node n, use: PostNode(\'n\')[\'x\'].\n#===============================================================================\n\npass\n')
self.obj80.action.setHeight(15)
self.obj80.graphClass_= graph_RHS
if self.genGraphics:
new_obj = graph_RHS(583.0,166.0,self.obj80)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("RHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj80.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj80)
self.globalAndLocalPostcondition(self.obj80, rootNode)
self.obj80.postAction( rootNode.CREATE )
self.obj81=LHS(self)
self.obj81.isGraphObjectVisual = True
if(hasattr(self.obj81, '_setHierarchicalLink')):
self.obj81._setHierarchicalLink(False)
# constraint
self.obj81.constraint.setValue('#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n')
self.obj81.constraint.setHeight(15)
self.obj81.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(143.0,166.0,self.obj81)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj81.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj81)
self.globalAndLocalPostcondition(self.obj81, rootNode)
self.obj81.postAction( rootNode.CREATE )
# Connections for obj71 (graphObject_: Obj0) of type MT_post__MetaModelElement_S
self.drawConnections(
)
# Connections for obj72 (graphObject_: Obj1) of type MT_post__MetaModelElement_T
self.drawConnections(
(self.obj72,self.obj73,[931.0, 479.0, 932.0, 389.5],"true", 2) )
# Connections for obj73 (graphObject_: Obj2) of type MT_post__trace_link
self.drawConnections(
(self.obj73,self.obj71,[932.0, 389.5, 933.0, 300.0],"true", 2) )
# Connections for obj75 (graphObject_: Obj4) of type MT_pre__MetaModelElement_T
self.drawConnections(
)
# Connections for obj77 (graphObject_: Obj6) of type MT_pre__MetaModelElement_S
self.drawConnections(
)
# Connections for obj80 (graphObject_: Obj9) of type RHS
self.drawConnections(
)
# Connections for obj81 (graphObject_: Obj10) of type LHS
self.drawConnections(
)
newfunction = built_traceability_for_rule_MDL
loadedMMName = ['MT_post__GM2AUTOSAR_MM_META', 'MT_pre__GM2AUTOSAR_MM_META', 'MoTifRule_META']
atom3version = '0.3'
| {
"content_hash": "ff3a493f4132d7239e0d240cf6b645b2",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 747,
"avg_line_length": 57.01634877384196,
"alnum_prop": 0.6277180406212665,
"repo_name": "levilucio/SyVOLT",
"id": "4b2c91a4546fce4c0c68363d4eee1062089b1aa8",
"size": "20925",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/traceability_construction/models/built_traceability_for_rule_MDL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
a = r'''this is some
more stuff
| {
"content_hash": "8aa2b57bf9c3ae1893560aab09e39edf",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 20,
"avg_line_length": 11,
"alnum_prop": 0.6363636363636364,
"repo_name": "ArcherSys/ArcherSys",
"id": "f0df304376c361c0f0250b0449b8c580d4eacf2b",
"size": "33",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skulpt/test/tokenize/t40.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def template_test():
return render_template("template.html", my_string="Wheeee!", my_list=[0, 1, 2, 3, 4, 5], my_dict=dict(name="Nitin",
age=32))
if __name__ == '__main__':
app.run(debug=True) | {
"content_hash": "0d3a553800d490297cca550694d2e857",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 119,
"avg_line_length": 27.928571428571427,
"alnum_prop": 0.45012787723785164,
"repo_name": "nitin-cherian/LifeLongLearning",
"id": "37ac079237fec14c672e5adf35f12dd8d17163d7",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Experiments/JINJA/RealPython/Examples/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32365"
},
{
"name": "CSS",
"bytes": "10259"
},
{
"name": "HTML",
"bytes": "55977"
},
{
"name": "JavaScript",
"bytes": "7368910"
},
{
"name": "Jupyter Notebook",
"bytes": "768879"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "17502534"
},
{
"name": "Shell",
"bytes": "7751"
},
{
"name": "Smarty",
"bytes": "30663"
}
],
"symlink_target": ""
} |
"""Serves content for "script" handlers using the Java runtime."""
import google
import os
import os.path
import threading
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import http_runtime
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import java_application
# TODO: figure out what's needed to react to file changes
class JavaRuntimeInstanceFactory(instance.InstanceFactory):
"""A factory that creates new Java runtime Instances."""
START_URL_MAP = appinfo.URLMap(
url='/_ah/start',
script='_java_app',
login='admin')
WARMUP_URL_MAP = appinfo.URLMap(
url='/_ah/warmup',
script='_java_app',
login='admin')
FILE_CHANGE_INSTANCE_RESTART_POLICY = instance.ALWAYS
def __init__(self, request_data, runtime_config_getter, module_configuration):
"""Initializer for JavaRuntimeInstanceFactory.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
runtime_config_getter: A function that can be called without arguments
and returns the runtime_config_pb2.RuntimeConfig containing the
configuration for the runtime.
module_configuration: An application_configuration.ModuleConfiguration
instance respresenting the configuration of the module that owns the
runtime.
"""
super(JavaRuntimeInstanceFactory, self).__init__(request_data, 1)
self._runtime_config_getter = runtime_config_getter
self._module_configuration = module_configuration
self._application_lock = threading.Lock()
self._java_application = java_application.JavaApplication(
self._module_configuration)
def get_restart_directories(self):
"""Returns a list of directories where changes trigger a restart.
Returns:
A list of directories where changes trigger a restart.
"""
# TODO: implement
return []
def files_changed(self):
"""Called when a file relevant to the factory *might* have changed."""
# TODO: implement
def configuration_changed(self, config_changes):
"""Called when the configuration of the module has changed.
Args:
config_changes: A set containing the changes that occured. See the
*_CHANGED constants in the application_configuration module.
"""
# TODO: implement
def new_instance(self, instance_id, expect_ready_request=False):
"""Create and return a new Instance.
Args:
instance_id: A string or integer representing the unique (per module) id
of the instance.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
Returns:
The newly created instance.Instance.
"""
def instance_config_getter():
runtime_config = self._runtime_config_getter()
runtime_config.instance_id = str(instance_id)
return runtime_config
env = self._java_application.get_environment()
instance_jar = os.path.abspath(os.path.join(
os.path.dirname(google.__file__),
'appengine/tools/devappserver2/java/lib/StandaloneInstance_deploy.jar'))
assert os.path.exists(instance_jar), instance_jar
# TODO: replace this with something smaller and releasable
with self._application_lock:
proxy = http_runtime.HttpRuntimeProxy(
['java', '-jar', instance_jar],
instance_config_getter,
self._module_configuration,
env=env,
start_process_flavor=http_runtime.START_PROCESS_FILE)
return instance.Instance(self.request_data,
instance_id,
proxy,
self.max_concurrent_requests,
self.max_background_threads,
expect_ready_request)
| {
"content_hash": "dc5fd0b12c704ca9717c17603083864f",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 80,
"avg_line_length": 35.57142857142857,
"alnum_prop": 0.6772088353413654,
"repo_name": "yencarnacion/jaikuengine",
"id": "6b4d31965e4b12438e2bc700a064b782b2f94ca4",
"size": "4585",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".google_appengine/google/appengine/tools/devappserver2/java_runtime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "407860"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "CSS",
"bytes": "330328"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "JavaScript",
"bytes": "751903"
},
{
"name": "PHP",
"bytes": "1808240"
},
{
"name": "Python",
"bytes": "50134630"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "39632"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
import os, sys
from django.core.management import execute_manager
# dirty hack to get the backend working.
#sys.path.insert(0, os.path.abspath('./..'))
#sys.path.insert(0, os.path.abspath('./../..'))
#example_dir = os.path.dirname(__file__)
#sys.path.insert(0, os.path.join(example_dir, '..'))
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| {
"content_hash": "182d4a51fe559fa3098b95c5fb2e80e5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 299,
"avg_line_length": 47.9375,
"alnum_prop": 0.7001303780964798,
"repo_name": "aparo/django-elasticsearch",
"id": "fb48d9fbcdcd240f56e16fff13999be742d0e8c5",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testproj/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "94880"
}
],
"symlink_target": ""
} |
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'learn_W': True, 'bias_term': False, 'bias_shape': None},
{'learn_W': True, 'bias_term': True, 'bias_shape': None},
{'learn_W': False, 'bias_term': False, 'bias_shape': None},
{'learn_W': False, 'bias_term': True, 'bias_shape': (2,)}
)
class TestScale(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
self.W = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.b = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.y_expected = numpy.copy(self.x)
for i, j, k in numpy.ndindex(self.y_expected.shape):
self.y_expected[i, j, k] *= self.W[j]
if self.bias_term:
self.y_expected[i, j, k] += self.b[j]
self.gy = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
bias_term = self.bias_term
bias_shape = self.bias_shape
axis = 1
if self.learn_W:
self.link = links.Scale(
axis, self.W.shape, bias_term, bias_shape)
self.link.W.data = self.W
if bias_term:
self.link.bias.b.data = self.b
else:
self.link = links.Scale(
axis, None, bias_term, bias_shape)
if bias_term:
self.link.bias.b.data = self.b
self.link.cleargrads()
def test_attribute_presence(self):
self.assertEqual(self.learn_W, hasattr(self.link, 'W'))
self.assertEqual(self.bias_term, hasattr(self.link, 'bias'))
def check_forward(self, x_data, W_data, y_expected):
x = chainer.Variable(x_data)
if W_data is None:
y = self.link(x)
testing.assert_allclose(y_expected, y.data)
else:
W = chainer.Variable(W_data)
y = self.link(x, W)
testing.assert_allclose(y_expected, y.data)
def test_forward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_forward(self.x, W, self.y_expected)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
self.check_forward(x, W, self.y_expected)
def check_backward(self, x_data, W_data, y_grad):
if W_data is None:
params = [self.link.W]
gradient_check.check_backward(
self.link, x_data, y_grad, params, atol=1e-2)
else:
gradient_check.check_backward(
self.link, (x_data, W_data), y_grad, atol=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_backward(self.x, W, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
gy = cuda.to_gpu(self.gy)
self.check_backward(x, W, gy)
class TestScaleInvalidArgc(unittest.TestCase):
def setUp(self):
x_data = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
W_data = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.axis = 1
self.x = chainer.Variable(x_data)
self.W = chainer.Variable(W_data)
def test_scale_invalid_argc1(self):
func = links.Scale(self.axis, self.W.data.shape)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x, self.W)
def test_scale_invalid_argc2(self):
func = links.Scale(self.axis, None)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x)
class TestScaleNoBiasShape(unittest.TestCase):
def test_scale_no_bias_shape(self):
axis = 1
with self.assertRaises(ValueError):
links.Scale(axis, None, True, None)
testing.run_module(__name__, __file__)
| {
"content_hash": "d1b1620bff13887073ce1bef308c49c7",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 78,
"avg_line_length": 32.11971830985915,
"alnum_prop": 0.5698311773733831,
"repo_name": "pfnet/chainer",
"id": "bd75690deedaff8fc647fa51a433fbc4d029e534",
"size": "4561",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/links_tests/connection_tests/test_scale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2564338"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
import sklearn.datasets as datasets
import sklearn.cluster as cluster
import sklearn.preprocessing as pp
import sklearn.metrics as m
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestCluster(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.cluster.AffinityPropagation, cluster.AffinityPropagation)
self.assertIs(df.cluster.AgglomerativeClustering, cluster.AgglomerativeClustering)
self.assertIs(df.cluster.Birch, cluster.Birch)
self.assertIs(df.cluster.DBSCAN, cluster.DBSCAN)
self.assertIs(df.cluster.FeatureAgglomeration, cluster.FeatureAgglomeration)
self.assertIs(df.cluster.KMeans, cluster.KMeans)
self.assertIs(df.cluster.MiniBatchKMeans, cluster.MiniBatchKMeans)
self.assertIs(df.cluster.MeanShift, cluster.MeanShift)
self.assertIs(df.cluster.SpectralClustering, cluster.SpectralClustering)
self.assertIs(df.cluster.bicluster.SpectralBiclustering,
cluster.bicluster.SpectralBiclustering)
self.assertIs(df.cluster.bicluster.SpectralCoclustering,
cluster.bicluster.SpectralCoclustering)
def test_estimate_bandwidth(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.cluster.estimate_bandwidth(random_state=self.random_state)
expected = cluster.estimate_bandwidth(iris.data, random_state=self.random_state)
self.assertEqual(result, expected)
def test_k_means(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.cluster.k_means(3, random_state=self.random_state)
expected = cluster.k_means(iris.data, 3, random_state=self.random_state)
self.assertEqual(len(result), 3)
self.assert_numpy_array_almost_equal(result[0], expected[0])
self.assertIsInstance(result[1], pdml.ModelSeries)
tm.assert_index_equal(result[1].index, df.index)
tm.assert_numpy_array_equal(result[1].values, expected[1])
self.assertAlmostEqual(result[2], expected[2])
def test_ward_tree(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.cluster.ward_tree()
expected = cluster.ward_tree(iris.data)
self.assertEqual(len(result), 4)
self.assert_numpy_array_almost_equal(result[0], expected[0])
self.assertEqual(result[1], expected[1])
self.assertEqual(result[2], expected[2])
self.assertEqual(result[3], expected[3])
connectivity = np.ones((len(df), len(df)))
result = df.cluster.ward_tree(connectivity)
expected = cluster.ward_tree(iris.data, connectivity)
self.assert_numpy_array_almost_equal(result[0], expected[0])
self.assertEqual(result[1], expected[1])
self.assertEqual(result[2], expected[2])
self.assert_numpy_array_almost_equal(result[3], expected[3])
def test_affinity_propagation(self):
iris = datasets.load_iris()
similality = np.cov(iris.data)
df = pdml.ModelFrame(similality)
result = df.cluster.affinity_propagation()
expected = cluster.affinity_propagation(similality)
self.assertEqual(len(result), 2)
self.assert_numpy_array_almost_equal(result[0], expected[0])
self.assertIsInstance(result[1], pdml.ModelSeries)
tm.assert_index_equal(result[1].index, df.index)
tm.assert_numpy_array_equal(result[1].values, expected[1])
def test_affinity_propagation_class(self):
from sklearn.datasets.samples_generator import make_blobs
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers,
cluster_std=0.5, random_state=0)
df = pdml.ModelFrame(data=X, target=labels_true)
af = df.cluster.AffinityPropagation(preference=-50)
df.fit(af)
af2 = cluster.AffinityPropagation(preference=-50).fit(X)
tm.assert_numpy_array_equal(af.cluster_centers_indices_,
af2.cluster_centers_indices_)
tm.assert_numpy_array_equal(af.labels_, af2.labels_)
def test_dbscan(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.cluster.dbscan()
expected = cluster.dbscan(iris.data)
self.assertEqual(len(result), 2)
self.assert_numpy_array_almost_equal(result[0], expected[0])
self.assertIsInstance(result[1], pdml.ModelSeries)
tm.assert_index_equal(result[1].index, df.index)
tm.assert_numpy_array_equal(result[1].values, expected[1])
def test_mean_shift(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.cluster.mean_shift()
expected = cluster.mean_shift(iris.data)
self.assertEqual(len(result), 2)
self.assert_numpy_array_almost_equal(result[0], expected[0])
self.assertIsInstance(result[1], pdml.ModelSeries)
tm.assert_index_equal(result[1].index, df.index)
tm.assert_numpy_array_equal(result[1].values, expected[1])
def test_spectral_clustering(self):
N = 50
m = np.random.random_integers(1, 200, size=(N, N))
m = (m + m.T) / 2
df = pdml.ModelFrame(m)
result = df.cluster.spectral_clustering(random_state=self.random_state)
expected = cluster.spectral_clustering(m, random_state=self.random_state)
self.assertIsInstance(result, pdml.ModelSeries)
tm.assert_index_equal(result.index, df.index)
tm.assert_numpy_array_equal(result.values, expected)
@pytest.mark.parametrize("algo", ['KMeans', 'MiniBatchKMeans'])
def test_KMeans(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.cluster, algo)(3, random_state=self.random_state)
mod2 = getattr(cluster, algo)(3, random_state=self.random_state)
df.fit(mod1)
mod2.fit(iris.data)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
def test_KMeans_scores(self):
digits = datasets.load_digits()
df = pdml.ModelFrame(digits)
scaled = pp.scale(digits.data)
df.data = df.data.pp.scale()
self.assert_numpy_array_almost_equal(df.data.values, scaled)
clf1 = cluster.KMeans(init='k-means++', n_clusters=10,
n_init=10, random_state=self.random_state)
clf2 = df.cluster.KMeans(init='k-means++', n_clusters=10,
n_init=10, random_state=self.random_state)
clf1.fit(scaled)
df.fit_predict(clf2)
expected = m.homogeneity_score(digits.target, clf1.labels_)
self.assertEqual(df.metrics.homogeneity_score(), expected)
expected = m.completeness_score(digits.target, clf1.labels_)
self.assertEqual(df.metrics.completeness_score(), expected)
expected = m.v_measure_score(digits.target, clf1.labels_)
self.assertEqual(df.metrics.v_measure_score(), expected)
expected = m.adjusted_rand_score(digits.target, clf1.labels_)
self.assertEqual(df.metrics.adjusted_rand_score(), expected)
expected = m.homogeneity_score(digits.target, clf1.labels_)
self.assertEqual(df.metrics.homogeneity_score(), expected)
expected = m.silhouette_score(scaled, clf1.labels_, metric='euclidean',
sample_size=300, random_state=self.random_state)
result = df.metrics.silhouette_score(metric='euclidean', sample_size=300,
random_state=self.random_state)
self.assertAlmostEqual(result, expected)
def test_Classifications(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
models = ['AffinityPropagation', 'MeanShift']
for model in models:
mod1 = getattr(df.cluster, model)()
mod2 = getattr(cluster, model)()
df.fit(mod1)
mod2.fit(iris.data)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
@pytest.mark.parametrize("algo", ['KMeans', 'MiniBatchKMeans'])
def test_fit_predict(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.cluster, algo)(3, random_state=self.random_state)
mod2 = getattr(cluster, algo)(3, random_state=self.random_state)
result = df.fit_predict(mod1)
expected = mod2.fit_predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
result = df.score(mod1)
expected = mod2.score(iris.data)
self.assert_numpy_array_almost_equal(result, expected)
@pytest.mark.parametrize("algo", ['SpectralBiclustering',
'SpectralCoclustering'])
def test_Bicluster(self, algo):
data, rows, columns = datasets.make_checkerboard(
shape=(300, 300), n_clusters=5, noise=10,
shuffle=True, random_state=self.random_state)
df = pdml.ModelFrame(data)
mod1 = getattr(df.cluster.bicluster, algo)(3, random_state=self.random_state)
mod2 = getattr(cluster.bicluster, algo)(3, random_state=self.random_state)
df.fit(mod1)
mod2.fit(data)
self.assert_numpy_array_almost_equal(mod1.biclusters_, mod2.biclusters_)
| {
"content_hash": "c828ccc8b27d48a488e343648cf25905",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 90,
"avg_line_length": 40.18650793650794,
"alnum_prop": 0.6283203317863139,
"repo_name": "sinhrks/expandas",
"id": "c992a8f45baf5fa92a8cce1b73b9321c2751f3c3",
"size": "10150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas_ml/skaccessors/test/test_cluster.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "387987"
},
{
"name": "Shell",
"bytes": "816"
}
],
"symlink_target": ""
} |
import json
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from . import models
from .utils import Form
def form(request, form_slug, data=None):
form_spec = get_object_or_404(models.FormSpec, slug=form_slug)
if request.method == 'POST':
form = Form(request.user, form_spec, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('.')
else:
form = Form(request.user, form_spec, data=data)
return TemplateResponse(
request,
'dittoforms/form.html',
{'form': form}
)
# @login_required
def response(request, form_slug):
form_spec = get_object_or_404(models.FormSpec, slug=form_slug)
# TODO need to configure if multiple submissions are required
submissions = models.FormSubmission.objects.filter(
form=form_spec,
user=request.user
)
return HttpResponse(submissions[0].data)
def edit(request, form_slug):
form_spec = get_object_or_404(models.FormSpec, slug=form_slug)
# TODO need to configure if multiple submissions are required
submissions = models.FormSubmission.objects.filter(
form=form_spec,
user=request.user
)
data = json.loads(submissions[0].data)
return form(request, form_slug, data)
# TODO full rest_framework goodness here, just hacking this together for now
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt # TODO remove this when remembered how to add token to ajax requests
def api(request, role_name):
form_spec = get_object_or_404(
models.FormSpec, regform__role__name=role_name
)
if request.method == 'GET':
return HttpResponse(
form_spec.spec,
content_type='application/json'
)
else:
form_spec.spec = request.body
form_spec.save()
return HttpResponse() # 200 ok
| {
"content_hash": "538386da80390e21e57a44adea05ffd7",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 82,
"avg_line_length": 31.353846153846153,
"alnum_prop": 0.6766437684003925,
"repo_name": "Kvoti/ditto",
"id": "509e9607471e43bbb4ac9d8ae039313bfabb0207",
"size": "2038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ditto/dittoforms/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "315024"
},
{
"name": "HTML",
"bytes": "1012831"
},
{
"name": "JavaScript",
"bytes": "539187"
},
{
"name": "Python",
"bytes": "200035"
},
{
"name": "Ruby",
"bytes": "1183"
},
{
"name": "Shell",
"bytes": "895"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
from django.db.models.fields import SlugField
import re
from django.template.loader import get_template
from django.template import Context
class Baker(object):
"""
Given a dictionary of apps and models, Baker will bake up a bunch of files that will help get your new app up
and running quickly.
"""
def bake(self, apps_and_models):
"""
Iterates a dictionary of apps and models and creates all the necessary files to get up and running quickly.
"""
for app_label, models in apps_and_models.iteritems():
model_names = {model.__name__: self.get_field_names_for_model(model) for model in models}
self.create_directories(app_label)
self.create_init_files(app_label, model_names.keys(), models)
self.remove_empty_startapp_files(app_label)
for file_name in ["forms", "admin"]:
file_path = "%s/%s.py" % (app_label, file_name)
template_path = "django_baker/%s" % (file_name)
self.create_file_from_template(file_path, template_path, {"model_names": model_names})
for model in models:
model_attributes = self.model_attributes(app_label, model)
self.create_files_from_templates(model_attributes)
def get_field_names_for_model(self, model):
"""
Returns fields other than id and uneditable fields (DateTimeField where auto_now or auto_now_add is True)
"""
return [field.name for field in model._meta.fields if field.name != "id" and not
(field.get_internal_type() == "DateTimeField" and
(field.auto_now is True or field.auto_now_add is True))]
def create_directories(self, app_label):
"""
If not already there, adds a directory for views, urls and templates.
"""
for folder_name in ["views", "urls", "templates/%s" % app_label]:
directory_path = "%s/%s" % (app_label, folder_name)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
def create_init_files(self, app_label, model_names, models):
"""
If not already there, creates a new init file in views and urls directory. Init file imports from all
of the files within the directory.
"""
model_name_slugs = ["%s_views" % (self.camel_to_slug(model_name)) for model_name in model_names]
model_names_dict = {self.camel_to_slug(model.__name__): self.camel_to_slug(self.model_name_plural(model)) for
model in models}
for folder_name in ["views", "urls"]:
file_path = "%s/%s/__init__.py" % (app_label, folder_name)
template_path = "django_baker/__init__%s" % folder_name
self.create_file_from_template(file_path, template_path, {"app_label": app_label,
"model_name_slugs": model_name_slugs,
"model_names_dict": model_names_dict
})
def model_attributes(self, app_label, model):
"""
Creates a dictionary of model attributes that will be used in the templates.
"""
model_name = model.__name__
model_name_plural = self.model_name_plural(model)
slug_field = self.get_unique_slug_field_name(model)
slug_field_name = slug_field.name if slug_field else "slug"
lookup_field = slug_field_name if slug_field else "pk"
return {
'app_label': app_label,
'model': model,
'model_name': model_name,
'model_name_slug': self.camel_to_slug(model_name),
'model_name_plural': model_name_plural,
'model_name_plural_slug': self.camel_to_slug(model_name_plural),
'model_fields': self.get_field_names_for_model(model),
'slug_field': slug_field,
'slug_field_name': slug_field_name,
'lookup_field': lookup_field
}
def create_files_from_templates(self, model_attributes):
"""
Determines the correct path to put each file and then calls create file method.
"""
for folder_name in ["views", "urls"]:
file_path = "%s/%s/%s_%s.py" % (model_attributes['app_label'], folder_name,
model_attributes['model_name_slug'], folder_name)
template_path = "django_baker/%s" % (folder_name)
self.create_file_from_template(file_path, template_path, model_attributes)
for file_name in ["base", "list", "detail", "create", "update", "delete"]:
file_path = "%s/templates/%s/%s_%s.html" % (model_attributes['app_label'], model_attributes['app_label'],
model_attributes['model_name_slug'], file_name)
template_path = "django_baker/%s.html" % (file_name)
self.create_file_from_template(file_path, template_path, model_attributes)
def create_file_from_template(self, file_path, template_path, context_variables):
"""
Takes template file and context variables and uses django's render method to create new file.
"""
if os.path.exists(file_path):
print("\033[91m" + file_path + " already exists. Skipping." + "\033[0m")
return
with open(file_path, 'w') as new_file:
new_file.write(get_template(template_path).render(Context(context_variables)))
print("\033[92m" + "successfully baked " + file_path + "\033[0m")
def remove_empty_startapp_files(self, app_label):
"""
Removes 'empty' (less than or equal to 4 lines, as that is what they begin with) views, admin, and tests
files.
"""
for file_name in ["views", "admin", "tests"]:
file_path = "%s/%s.py" % (app_label, file_name)
if os.path.exists(file_path):
num_lines = sum(1 for line in open(file_path))
if num_lines <= 4:
os.remove(file_path)
def camel_to_slug(self, name):
"""
Helper method to convert camel case string (PumpernickelBread) to slug string (pumpernickel_bread)
"""
name = re.sub(r'([a-z])([A-Z])', r'\1 \2', name).title().replace(" ", "").replace("_", "")
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
slug = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return slug
def model_name_plural(self, model):
"""
Gets the pluralized version of a model. Simply adds an 's' to model name if verbose_name_plural isn't set.
"""
if isinstance(model._meta.verbose_name_plural, str):
return model._meta.verbose_name_plural
return "%ss" % model.__name__
def get_unique_slug_field_name(self, model):
"""
Determines if model has exactly 1 SlugField that is unique. If so, returns it. Otherwise returns None.
"""
slug_fields = []
for field in model._meta.fields:
if field.unique and isinstance(field, SlugField):
slug_fields.append(field)
if len(slug_fields) == 1:
return slug_fields[0]
return None
| {
"content_hash": "2ebe5f5dfd17ea20eab8a4cfeb2859be",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 119,
"avg_line_length": 49.348684210526315,
"alnum_prop": 0.564591387814958,
"repo_name": "ojengwa/django-baker",
"id": "ff04ef3142f9cd91616e6d4b29563996ad52d04b",
"size": "7501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_baker/bakery.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6016"
},
{
"name": "Python",
"bytes": "19326"
}
],
"symlink_target": ""
} |
"""
Created on Mon Dec 14 22:25:49 2016
Preprocessing Utilities for cleaning and processing of data
@author: Rupak Chakraborty
"""
import pandas as pd
from nltk.corpus import stopwords
import string
from nltk.stem import PorterStemmer
stopword_list = set(stopwords.words("english"))
punctuation_list = list(string.punctuation)
ps = PorterStemmer()
months_list = ["january","february","march","april","may","june","july","august",
"september","october","november","december"]
digit_list = ["0","1","2","3","4","5","6","7","8","9"]
month_list_short = ["jan","feb","mar","apr","may","jun","jul","aug","sept","oct","nov","dec"]
emoticon_list = [":)",":(","^_^","-_-","<3",":D",":P",":/"]
html_tag_list = [" ","<",">","&",";","<strong>","<em>","[1]","</strong>","</em>","<div>","</div>","<b>","</b>","[2]","[3]","...","[img]","[/img]","<u>","</u>","<p>","</p>","\n","\\t","<span>",
"</span>","[Moved]","<br/>","<a>","</a>",""","<br>","<br />","Â","<a rel=\"nofollow\" class=\"ot-hashtag\"","'","<a","’","'"]
extend_punct_list = [' ',',',':',';','\'','\t','\n','?','-','$',"!!","?","w/","!","!!!","w/","'","RT","rt","@","#","/",":)",
":(",":D","^_^","^","...","&","\\",":","?","<",">","$","%","*","`","~","-","_",
"+","=","{","}","[","]","|","\"",",",";",")","(","r/","/u/","*","-"]
punctuation_list.extend(extend_punct_list)
#punctuation_list.remove(".")
months_list.extend(month_list_short)
"""
Given a string normalizes it, i.e. converts it to lowercase and strips it of extra spaces
Params:
--------
s - String which is to be normalized
Returns:
---------
String in the normalized form
"""
def normalize_string(s):
s = s.lower()
s = s.strip()
return s
"""
Given a list of strings normalizes the strings
Params:
-------
string_list - List containing the strings which are to be normalized
Returns:
---------
Returns a list containing the normalized string list
"""
def normalize_string_list(string_list):
normalized_list = []
for sentence in string_list:
normalized_list.append(normalize_string(sentence))
return normalized_list
"""
Given a string and a separator splits up the string in the tokens
Params:
--------
s - string which has to be tokenized
separator - separator based on which the string is to be tokenized
Returns:
---------
A list of words in the sentence based on the separator
"""
def tokenize_string(s,separator):
word_list = list([])
if isinstance(s,basestring):
word_list = s.split(separator)
return word_list
"""
Given a list of sentences tokenizes each sentence in the list
Params:
--------
string_list - List of sentences which have to be tokenized
separator - Separator based on which the sentences have to be tokenized
"""
def tokenize_string_list(string_list,separator):
tokenized_sentence_list = []
for sentence in string_list:
sentence = sentence.encode("ascii","ignore")
tokenized_sentence_list.append(tokenize_string(sentence,separator))
return tokenized_sentence_list
"""
Given a string containing stopwords removes all the stopwords
Params:
--------
s - String containing the stopwords which are to be removed
Returns:
---------
String sans the stopwords
"""
def remove_stopwords(s):
s = s.lower()
removed_string = ''
words = s.split()
for word in words:
if word not in stopword_list:
removed_string = removed_string + word.strip() + " "
return removed_string.strip()
"""
Given a list of sentences and a filename, writes the sentences to the file
Params:
--------
sentence_list - List of sentences which have to be written to the file
filename - File to which the sentences have to be written
Returns:
---------
Nothing quite just writes the sentences to the file
"""
def write_sentences_to_file(sentence_list,filename):
write_file = open(filename,'w')
for sentence in sentence_list:
write_file.write(encode_ascii(sentence) + '\n')
write_file.flush()
write_file.close()
"""
Removes all the punctuations from a given string
Params:
--------
s - String containing the possible punctuations
Returns:
--------
String without the punctuations (including new lines and tabs)
"""
def remove_punctuations(s):
s = s.lower()
s = s.strip()
for punctuation in punctuation_list:
s = s.replace(punctuation,' ')
return s.strip()
"""
Strips a given string of HTML tags
Params:
--------
s - String from which the HTML tags have to be removed
Returns:
---------
String sans the HTML tags
"""
def remove_html_tags(s):
for tag in html_tag_list:
s = s.replace(tag,' ')
return s
"""
Given a string removes all the digits from them
Params:
-------
s - String from which the digits need to be removed
Returns:
---------
String without occurence of the digits
"""
def remove_digits(s):
for digit in digit_list:
s = s.replace(digit,'')
return s
"""
Given a string returns all occurences of a month from it
Params:
--------
s - String containing possible month names
Returns:
--------
String wihtout the occurence of the months
"""
def remove_months(s):
s = s.lower()
words = s.split()
without_month_list = [word for word in words if word not in months_list]
month_clean_string = ""
for word in without_month_list:
month_clean_string = month_clean_string + word + " "
return month_clean_string.strip()
"""
Checks if a given string contains all ASCII characters
Params:
-------
s - String which is to be checked for ASCII characters
Returns:
--------
True if the string contains all ASCII characters, False otherwise
"""
def is_ascii(s):
if isinstance(s,basestring):
return all(ord(c) < 128 for c in s)
return False
"""
Given a string encodes it in ascii format
Params:
--------
s - String which is to be encoded
Returns:
--------
String encoded in ascii format
"""
def encode_ascii(s):
return s.encode('ascii','ignore')
"""
Stems each word of a given sentence to it's root word using Porters Stemmer
Params:
--------
sentence - String containing the sentence which is to be stemmed
Returns:
---------
Sentence where each word has been stemmed to it's root word
"""
def stem_sentence(sentence):
words = sentence.split()
stemmed_sentence = ""
for word in words:
try:
if is_ascii(word):
stemmed_sentence = stemmed_sentence + ps.stem_word(word) + " "
except:
pass
return stemmed_sentence.strip()
"""
Given a string removes urls from the string
Params:
--------
s - String containing urls which have to be removed
Returns:
--------
String without the occurence of the urls
"""
def remove_url(s):
s = s.lower()
words = s.split()
without_url = ""
for word in words:
if word.count('http:') == 0 and word.count('https:') == 0 and word.count('ftp:') == 0 and word.count('www.') == 0 and word.count('.com') == 0 and word.count('.ly') == 0 and word.count('.st') == 0:
without_url = without_url + word + " "
return without_url.strip()
"""
Given a string removes all the words whose length is less than 3
Params:
--------
s - String from which small words have to be removed.
Returns:
---------
Returns a string without occurence of small words
"""
def remove_small_words(s):
words = s.split()
clean_string = ""
for word in words:
if len(word) >= 3:
clean_string = clean_string + word + " "
return clean_string.strip()
"""
Defines the pipeline for cleaning and preprocessing of text
Params:
--------
s - String containing the text which has to be preprocessed
Returns:
---------
String which has been passed through the preprocessing pipeline
"""
def text_clean_pipeline(s):
s = remove_url(s)
s = remove_punctuations(s)
s = remove_html_tags(s)
s = remove_stopwords(s)
s = remove_months(s)
s = remove_digits(s)
#s = stem_sentence(s)
s = remove_small_words(s)
return s
"""
Given a list of sentences processes the list through the pre-preprocessing pipeline and returns the list
Params:
--------
sentence_list - List of sentences which are to be cleaned
Returns:
---------
The cleaned and pre-processed sentence list
"""
def text_clean_pipeline_list(sentence_list):
clean_sentence_list = list([])
for s in sentence_list:
s = remove_digits(s)
s = remove_punctuations(s)
s = remove_stopwords(s)
s = remove_months(s)
s = remove_small_words(s)
#s = encode_ascii(s)
s = remove_url(s)
s = stem_sentence(s)
clean_sentence_list.append(s)
return clean_sentence_list
"""
Given a excel filepath and a corresponding sheetname reads it and converts it into a dataframe
Params:
--------
filename - Filepath containing the location and name of the file
sheetname - Name of the sheet containing the data
Returns:
---------
pandas dataframe containing the data from the excel file
"""
def get_dataframe_from_excel(filename,sheetname):
xl_file = pd.ExcelFile(filename)
data_frame = xl_file.parse(sheetname)
return data_frame
| {
"content_hash": "bcd5d1f1261ba323bb35532b8fc86126",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 204,
"avg_line_length": 22.375598086124402,
"alnum_prop": 0.61434833743184,
"repo_name": "rupakc/Kaggle-Compendium",
"id": "1e3f166073469fa7fb4a77b5a2aca897df8ad907",
"size": "9383",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Crowdflower Search Results Relevance/preprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24724"
},
{
"name": "Python",
"bytes": "612030"
}
],
"symlink_target": ""
} |
import numpy as np
from dipy.reconst.dti import (design_matrix, decompose_tensor,
from_lower_triangular, lower_triangular)
from dipy.reconst.vec_val_sum import vec_val_vect
from dipy.core.ndindex import ndindex
import scipy.optimize as opt
# -------------------------------------------------------------------------
# Weigthed linear least squares fit procedure
# -------------------------------------------------------------------------
def wls_iter(design_matrix, sig, S0, Diso=3e-3, mdreg=1.5e-3,
min_signal=1.0e-6, piterations=3):
""" Applies weighted linear least squares fit of the water free elimination
model to single voxel signals.
Parameters
----------
design_matrix : array (g, 7)
Design matrix holding the covariants used to solve for the regression
coefficients.
sig : array (g, )
Diffusion-weighted signal for a single voxel data.
S0 : float
Non diffusion weighted signal (i.e. signal for b-value=0).
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
mdreg : float, optimal
Tissue compartment mean diffusivity regularization threshold.
If tissue's mean diffusivity is almost near the free water diffusion
value, the diffusion signal is assumed to be only free water diffusion
(i.e. volume fraction will be set to 1 and tissue's diffusion
parameters are set to zero). Default md_reg was set to
1.5e-3 $mm^{2}.s^{-1}$ according to [1]_.
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: minimal signal in the data provided to `fit`.
piterations : inter, optional
Number of iterations used to refine the precision of f. Default is set
to 3 corresponding to a precision of 0.01.
Returns
-------
All parameters estimated from the free water tensor model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment
References
----------
.. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S., Peterson,
E.T., Correia, M.M., 2017. Re: Optimization of a free water
elimination two-compartmental model for diffusion tensor imaging.
ReScience
"""
W = design_matrix
# Define weights
S2 = np.diag(sig**2)
# Defining matrix to solve fwDTI wls solution
WTS2 = np.dot(W.T, S2)
inv_WT_S2_W = np.linalg.pinv(np.dot(WTS2, W))
invWTS2W_WTS2 = np.dot(inv_WT_S2_W, WTS2)
# Process voxel if it has significant signal from tissue
if np.mean(sig) > min_signal and S0 > min_signal:
# General free-water signal contribution
fwsig = np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, 0])))
df = 1 # initialize precision
flow = 0 # lower f evaluated
fhig = 1 # higher f evaluated
ns = 9 # initial number of samples per iteration
for p in range(piterations):
df = df * 0.1
fs = np.linspace(flow+df, fhig-df, num=ns) # sampling f
SFW = np.array([fwsig, ]*ns) # repeat contributions for all values
FS, SI = np.meshgrid(fs, sig)
SA = SI - FS*S0*SFW.T
# SA < 0 means that the signal components from the free water
# component is larger than the total fiber. This cases are present
# for inapropriate large volume fractions (given the current S0
# value estimated). To overcome this issue negative SA are replaced
# by data's min positive signal.
SA[SA <= 0] = min_signal
y = np.log(SA / (1-FS))
all_new_params = np.dot(invWTS2W_WTS2, y)
# Select params for lower F2
SIpred = (1-FS)*np.exp(np.dot(W, all_new_params)) + FS*S0*SFW.T
F2 = np.sum(np.square(SI - SIpred), axis=0)
Mind = np.argmin(F2)
params = all_new_params[:, Mind]
f = fs[Mind] # Updated f
flow = f - df # refining precision
fhig = f + df
ns = 19
if mdreg is None:
evals, evecs = decompose_tensor(from_lower_triangular(params))
fw_params = np.concatenate((evals, evecs[0], evecs[1], evecs[2],
np.array([f])), axis=0)
else:
# MD regularization - if tissue's md is larger than mdreg,
# the voxel will be classified as containing only free water
md = (params[0] + params[2] + params[5]) / 3
if md > mdreg:
fw_params = np.zeros(13)
fw_params[12] = 1.0
else:
evals, evecs = decompose_tensor(from_lower_triangular(params))
fw_params = np.concatenate((evals, evecs[0], evecs[1],
evecs[2], np.array([f])), axis=0)
else:
fw_params = np.zeros(13)
return fw_params
def wls_fit_tensor(gtab, data, Diso=3e-3, mask=None, min_signal=1.0e-6,
piterations=3, mdreg=1.5e-3):
r""" Computes weighted least squares (WLS) fit to calculate self-diffusion
tensor using a linear regression model [1]_.
Parameters
----------
gtab : a GradientTable class instance
The gradient table containing diffusion acquisition parameters.
data : ndarray ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
mask : array, optional
A boolean array used to mark the coordinates in the data that should
be analyzed that has the shape data.shape[:-1]
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: 1.0e-6.
piterations : inter, optional
Number of iterations used to refine the precision of f. Default is set
to 3 corresponding to a precision of 0.01.
mdreg : float, optimal
Tissue compartment mean diffusivity regularization threshold.
If tissue's mean diffusivity is almost near the free water diffusion
value, the diffusion signal is assumed to be only free water diffusion
(i.e. volume fraction will be set to 1 and tissue's diffusion
parameters are set to zero). Default md_reg was set to
1.5e-3 $mm^{2}.s^{-1}$ according to [1]_.
Returns
-------
fw_params : ndarray (x, y, z, 13)
Matrix containing in the last dimension the free water model parameters
in the following order:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment.
References
----------
.. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S., Peterson,
E.T., Correia, M.M., 2017. Re: Optimization of a free water
elimination two-compartmental model for diffusion tensor imaging.
ReScience
"""
fw_params = np.zeros(data.shape[:-1] + (13,))
W = design_matrix(gtab)
# Prepare mask
if mask is None:
mask = np.ones(data.shape[:-1], dtype=bool)
else:
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
# Prepare S0
S0 = np.mean(data[..., gtab.b0s_mask], axis=-1)
index = ndindex(mask.shape)
for v in index:
if mask[v]:
params = wls_iter(W, data[v], S0[v], min_signal=min_signal,
Diso=3e-3, piterations=piterations, mdreg=mdreg)
fw_params[v] = params
return fw_params
# -------------------------------------------------------------------------
# non-linear least squares fit procedure
# -------------------------------------------------------------------------
def _nls_err_func(tensor_elements, design_matrix, data, Diso=3e-3,
cholesky=False, f_transform=False):
""" Error function for the non-linear least-squares fit of the tensor water
elimination model.
Parameters
----------
tensor_elements : array (8, )
The six independent elements of the diffusion tensor followed by
-log(S0) and the volume fraction f of the water elimination
compartment. Note that if cholesky is set to true, tensor elements are
assumed to be written as Cholesky's decomposition elements. If
f_transform is true, volume fraction f has to be converted to
ft = arcsin(2*f - 1) + pi/2
design_matrix : array
The design matrix
data : array
The voxel signal in all gradient directions
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
cholesky : bool, optional
If true, the diffusion tensor elements were decomposed using cholesky
decomposition. See fwdti.nls_fit_tensor
Default: False
f_transform : bool, optional
If true, the water volume fraction was converted to
ft = arcsin(2*f - 1) + pi/2, insuring f estimates between 0 and 1.
See fwdti.nls_fit_tensor
Default: True
"""
tensor = np.copy(tensor_elements)
if cholesky:
tensor[:6] = cholesky_to_lower_triangular(tensor[:6])
if f_transform:
f = 0.5 * (1 + np.sin(tensor[7] - np.pi/2))
else:
f = tensor[7]
# This is the predicted signal given the params:
y = (1-f) * np.exp(np.dot(design_matrix, tensor[:7])) + \
f * np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, tensor[6]])))
# Compute the residuals
return data - y
def _nls_jacobian_func(tensor_elements, design_matrix, data, Diso=3e-3,
cholesky=False, f_transform=False):
"""The Jacobian is the first derivative of the least squares error
function.
Parameters
----------
tensor_elements : array (8, )
The six independent elements of the diffusion tensor followed by
-log(S0) and the volume fraction f of the water elimination
compartment. Note that if f_transform is true, volume fraction f is
converted to ft = arcsin(2*f - 1) + pi/2
design_matrix : array
The design matrix
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
f_transform : bool, optional
If true, the water volume fraction was converted to
ft = arcsin(2*f - 1) + pi/2, insuring f estimates between 0 and 1.
See fwdti.nls_fit_tensor
Default: True
"""
tensor = np.copy(tensor_elements)
if f_transform:
f = 0.5 * (1 + np.sin(tensor[7] - np.pi/2))
else:
f = tensor[7]
t = np.exp(np.dot(design_matrix, tensor[:7]))
s = np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, tensor[6]])))
T = (f-1.0) * t[:, None] * design_matrix
S = np.zeros(design_matrix.shape)
S[:, 6] = f * s
if f_transform:
df = (t-s) * (0.5*np.cos(tensor[7]-np.pi/2))
else:
df = (t-s)
return np.concatenate((T - S, df[:, None]), axis=1)
def nls_iter(design_matrix, sig, S0, Diso=3e-3, mdreg=2.7e-3,
min_signal=1.0e-6, cholesky=False, f_transform=True,
jac=True):
""" Applies non linear least squares fit of the water free elimination
model to single voxel signals.
Parameters
----------
design_matrix : array (g, 7)
Design matrix holding the covariants used to solve for the regression
coefficients.
sig : array (g, )
Diffusion-weighted signal for a single voxel data.
S0 : float
Non diffusion weighted signal (i.e. signal for b-value=0).
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
min_signal : float
The minimum signal value. Needs to be a strictly positive
number.
cholesky : bool, optional
If true it uses cholesky decomposition to insure that diffusion tensor
is positive define.
Default: False
f_transform : bool, optional
If true, the water volume fractions is converted during the convergence
procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
0 and 1.
Default: True
jac : bool
Use the Jacobian? Default: False
Returns
-------
All parameters estimated from the free water tensor model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment.
"""
# Initial guess
params = wls_iter(design_matrix, sig, S0, min_signal=min_signal, Diso=Diso)
# Process voxel if it has significant signal from tissue
if np.mean(sig) > min_signal and S0 > min_signal:
# converting evals and evecs to diffusion tensor elements
evals = params[:3]
evecs = params[3:12].reshape((3, 3))
dt = lower_triangular(vec_val_vect(evecs, evals))
# Cholesky decomposition if requested
if cholesky:
dt = lower_triangular_to_cholesky(dt)
# f transformation if requested
if f_transform:
f = np.arcsin(2*params[12] - 1) + np.pi/2
else:
f = params[12]
# Use the Levenberg-Marquardt algorithm wrapped in opt.leastsq
start_params = np.concatenate((dt, [-np.log(S0), f]), axis=0)
if jac:
this_tensor, status = opt.leastsq(_nls_err_func, start_params[:8],
args=(design_matrix, sig, Diso,
cholesky, f_transform),
Dfun=_nls_jacobian_func)
else:
this_tensor, status = opt.leastsq(_nls_err_func, start_params[:8],
args=(design_matrix, sig, Diso,
cholesky, f_transform))
# Invert the cholesky decomposition if this was requested
if cholesky:
this_tensor[:6] = cholesky_to_lower_triangular(this_tensor[:6])
# Invert f transformation if this was requested
if f_transform:
this_tensor[7] = 0.5 * (1 + np.sin(this_tensor[7] - np.pi/2))
# The parameters are the evals and the evecs:
evals, evecs = decompose_tensor(from_lower_triangular(this_tensor[:6]))
params = np.concatenate((evals, evecs[0], evecs[1], evecs[2],
np.array([this_tensor[7]])), axis=0)
return params
def nls_fit_tensor(gtab, data, mask=None, Diso=3e-3,
min_signal=1.0e-6, f_transform=True, cholesky=False,
jac=True):
"""
Fit the water elimination tensor model using the non-linear least-squares.
Parameters
----------
gtab : a GradientTable class instance
The gradient table containing diffusion acquisition parameters.
data : ndarray ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
mask : array, optional
A boolean array used to mark the coordinates in the data that should
be analyzed that has the shape data.shape[:-1]
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: 1.0e-6.
f_transform : bool, optional
If true, the water volume fractions is converted during the convergence
procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
0 and 1.
Default: True
cholesky : bool, optional
If true it uses cholesky decomposition to insure that diffusion tensor
is positive define.
Default: False
jac : bool
Use the Jacobian? Default: False
Returns
-------
fw_params : ndarray (x, y, z, 13)
Matrix containing in the last dimension the free water model parameters
in the following order:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment
References
----------
.. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S., Peterson,
E.T., Correia, M.M., 2017. Re: Optimization of a free water
elimination two-compartmental model for diffusion tensor imaging.
ReScience
"""
# Analyse compatible input cases
if jac is True and cholesky is True:
raise ValueError("Cholesky decomposition is not compatible with jac.")
fw_params = np.zeros(data.shape[:-1] + (13,))
W = design_matrix(gtab)
# Prepare mask
if mask is None:
mask = np.ones(data.shape[:-1], dtype=bool)
else:
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
# Prepare S0
S0 = np.mean(data[..., gtab.b0s_mask], axis=-1)
# Loop data fitting through all voxels
index = ndindex(mask.shape)
for v in index:
if mask[v]:
params = nls_iter(W, data[v], S0[v], Diso=Diso,
min_signal=min_signal,
f_transform=f_transform,
cholesky=cholesky, jac=jac)
fw_params[v] = params
return fw_params
def lower_triangular_to_cholesky(tensor_elements):
""" Perfoms Cholesky decompostion of the diffusion tensor
Parameters
----------
tensor_elements : array (6,)
Array containing the six elements of diffusion tensor's lower
triangular.
Returns
-------
cholesky_elements : array (6,)
Array containing the six Cholesky's decomposition elements
(R0, R1, R2, R3, R4, R5) [1]_.
References
----------
.. [1] Koay, C.G., Carew, J.D., Alexander, A.L., Basser, P.J.,
Meyerand, M.E., 2006. Investigation of anomalous estimates of
tensor-derived quantities in diffusion tensor imaging. Magnetic
Resonance in Medicine, 55(4), 930-936. doi:10.1002/mrm.20832
"""
R0 = np.sqrt(tensor_elements[0])
R3 = tensor_elements[1] / R0
R1 = np.sqrt(tensor_elements[2] - R3**2)
R5 = tensor_elements[3] / R0
R4 = (tensor_elements[4] - R3*R5) / R1
R2 = np.sqrt(tensor_elements[5] - R4**2 - R5**2)
return np.array([R0, R1, R2, R3, R4, R5])
def cholesky_to_lower_triangular(R):
""" Convert Cholesky decompostion elements to the diffusion tensor elements
Parameters
----------
R : array (6,)
Array containing the six Cholesky's decomposition elements
(R0, R1, R2, R3, R4, R5) [1]_.
Returns
-------
tensor_elements : array (6,)
Array containing the six elements of diffusion tensor's lower
triangular.
References
----------
.. [1] Koay, C.G., Carew, J.D., Alexander, A.L., Basser, P.J.,
Meyerand, M.E., 2006. Investigation of anomalous estimates of
tensor-derived quantities in diffusion tensor imaging. Magnetic
Resonance in Medicine, 55(4), 930-936. doi:10.1002/mrm.20832
"""
Dxx = R[0]**2
Dxy = R[0]*R[3]
Dyy = R[1]**2 + R[3]**2
Dxz = R[0]*R[5]
Dyz = R[1]*R[4] + R[3]*R[5]
Dzz = R[2]**2 + R[4]**2 + R[5]**2
return np.array([Dxx, Dxy, Dyy, Dxz, Dyz, Dzz])
# -------------------------------------------------------------------------
# Supplementary function
# -------------------------------------------------------------------------
def nls_iter_bounds(design_matrix, sig, S0, Diso=3e-3,
min_signal=1.0e-6, bounds=None, jac=True):
""" Applies non-linear least-squares fit with constraints of the water free
elimination model to single voxel signals.
Parameters
----------
design_matrix : array (g, 7)
Design matrix holding the covariants used to solve for the regression
coefficients.
sig : array (g, )
Diffusion-weighted signal for a single voxel data.
S0 : float
Non diffusion weighted signal (i.e. signal for b-value=0).
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
min_signal : float
The minimum signal value. Needs to be a strictly positive
number.
bounds : 2-tuple of arrays with 14 elements, optional
Lower and upper bounds on fwdti model variables and the log of
non-diffusion signal S0. Use np.inf with an appropriate sign to
disable bounds on all or some variables. When bounds is set to None
the following default variable bounds is used:
([0., -Diso, 0., -Diso, -Diso, 0., 0., np.exp(-10.)],
[Diso, Diso, Diso, Diso, Diso, Diso, 1., np.exp(10.)])
jac : bool
Use the Jacobian? Default: False
Returns
-------
All parameters estimated from the free water tensor model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment.
References
----------
.. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S., Peterson,
E.T., Correia, M.M., 2017. Re: Optimization of a free water
elimination two-compartmental model for diffusion tensor imaging.
ReScience
"""
# Initial guess
params = wls_iter(design_matrix, sig, S0,
min_signal=min_signal, Diso=Diso)
# Set bounds
if bounds is None:
bounds = ([0., -Diso, 0., -Diso, -Diso, 0., -10., 0],
[Diso, Diso, Diso, Diso, Diso, Diso, 10., 1])
else:
# In the helper subfunctions it was easier to have log(S0) first than
# the water volume. Therefore, we have to reorder the boundaries if
# specified by the user
S0low = np.log(bounds[0][7])
S0hig = np.log(bounds[1][7])
bounds[0][7] = bounds[0][6]
bounds[1][7] = bounds[1][6]
bounds[0][6] = S0low
bounds[1][6] = S0hig
# Process voxel if it has significant signal from tissue
if np.mean(sig) > min_signal and S0 > min_signal:
# converting evals and evecs to diffusion tensor elements
evals = params[:3]
evecs = params[3:12].reshape((3, 3))
dt = lower_triangular(vec_val_vect(evecs, evals))
f = params[12]
# Use the Levenberg-Marquardt algorithm wrapped in opt.leastsq
start_params = np.concatenate((dt, [-np.log(S0), f]), axis=0)
lb = np.array(bounds[0])
ub = np.array(bounds[1])
start_params[start_params < lb] = lb[start_params < lb]
start_params[start_params > ub] = ub[start_params > ub]
if jac:
out = opt.least_squares(_nls_err_func, start_params[:8],
args=(design_matrix, sig,
Diso, False, False),
jac=_nls_jacobian_func,
bounds=bounds)
else:
out = opt.least_squares(_nls_err_func, start_params[:8],
args=(design_matrix, sig,
Diso, False, False),
bounds=bounds)
this_tensor = out.x
# The parameters are the evals and the evecs:
evals, evecs = decompose_tensor(from_lower_triangular(this_tensor[:6]))
params = np.concatenate((evals, evecs[0], evecs[1], evecs[2],
np.array([this_tensor[7]])), axis=0)
return params
def nls_fit_tensor_bounds(gtab, data, mask=None, Diso=3e-3,
min_signal=1.0e-6, bounds=None, jac=True):
"""
Fit the water elimination tensor model using the non-linear least-squares
with constraints
gtab : a GradientTable class instance
The gradient table containing diffusion acquisition parameters.
data : ndarray ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
mask : array, optional
A boolean array used to mark the coordinates in the data that should
be analyzed that has the shape data.shape[:-1]
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: 1.0e-6.
bounds : 2-tuple of arrays with 14 elements, optional
Lower and upper bounds on fwdti model variables and the log of
non-diffusion signal S0. Use np.inf with an appropriate sign to
disable bounds on all or some variables. When bounds is set to None
the following default variable bounds is used:
([0., -Diso, 0., -Diso, -Diso, 0., 0., np.exp(-10.)],
[Diso, Diso, Diso, Diso, Diso, Diso, 1., np.exp(10.)])
jac : bool
Use the Jacobian? Default: False
Returns
-------
fw_params : ndarray (x, y, z, 13)
Matrix containing in the last dimension the free water model parameters
in the following order:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment
References
----------
.. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S., Peterson,
E.T., Correia, M.M., 2017. Re: Optimization of a free water
elimination two-compartmental model for diffusion tensor imaging.
ReScience
"""
fw_params = np.zeros(data.shape[:-1] + (13,))
W = design_matrix(gtab)
# Prepare mask
if mask is None:
mask = np.ones(data.shape[:-1], dtype=bool)
else:
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
# Prepare S0
S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)
index = ndindex(mask.shape)
for v in index:
if mask[v]:
params = nls_iter_bounds(W, data[v], S0[v], Diso=Diso,
min_signal=min_signal, bounds=bounds,
jac=jac)
fw_params[v] = params
return fw_params
| {
"content_hash": "d7fd9bf8b36e9e461130085bf0e559ae",
"timestamp": "",
"source": "github",
"line_count": 709,
"max_line_length": 79,
"avg_line_length": 40.44428772919605,
"alnum_prop": 0.5873757628596338,
"repo_name": "RafaelNH/Reproduce-Hoy-et-al-2014",
"id": "f9f395aa27445a09952db3646778c14cd1254c01",
"size": "29304",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code/functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "87864"
},
{
"name": "Python",
"bytes": "18447"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv4-internal-reachability/prefixes/prefixes/delay-metric/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of delay-metric.
"""
__slots__ = ("_path_helper", "_extmethods", "__metric", "__flags")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..63"]},
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:narrow-metric",
is_config=False,
)
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"INTERNAL": {}, "UNSUPPORTED": {}},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="isis-metric-flags",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv4-internal-reachability",
"prefixes",
"prefixes",
"delay-metric",
"state",
]
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/metric (oc-isis-types:narrow-metric)
YANG Description: ISIS delay metric value. This metric measures the transit delay of
the associated circuit. It is an optional metric, which if assigned
to a circuit shall have a positive integral value. Higher values
indicate a longer transit delay.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/metric (oc-isis-types:narrow-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: ISIS delay metric value. This metric measures the transit delay of
the associated circuit. It is an optional metric, which if assigned
to a circuit shall have a positive integral value. Higher values
indicate a longer transit delay.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..63"]},
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:narrow-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-isis-types:narrow-metric""",
"defined-type": "oc-isis-types:narrow-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..63"]},
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:narrow-metric",
is_config=False,
)
def _get_flags(self):
"""
Getter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/flags (isis-metric-flags)
YANG Description: ISIS Delay Metric Flags.
"""
return self.__flags
def _set_flags(self, v, load=False):
"""
Setter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/flags (isis-metric-flags)
If this variable is read-only (config: false) in the
source YANG file, then _set_flags is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flags() directly.
YANG Description: ISIS Delay Metric Flags.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"INTERNAL": {}, "UNSUPPORTED": {}},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="isis-metric-flags",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """flags must be of a type compatible with isis-metric-flags""",
"defined-type": "openconfig-network-instance:isis-metric-flags",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'INTERNAL': {}, 'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)""",
}
)
self.__flags = t
if hasattr(self, "_set"):
self._set()
def _unset_flags(self):
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"INTERNAL": {}, "UNSUPPORTED": {}},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="isis-metric-flags",
is_config=False,
)
metric = __builtin__.property(_get_metric)
flags = __builtin__.property(_get_flags)
_pyangbind_elements = OrderedDict([("metric", metric), ("flags", flags)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv4-internal-reachability/prefixes/prefixes/delay-metric/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of delay-metric.
"""
__slots__ = ("_path_helper", "_extmethods", "__metric", "__flags")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..63"]},
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:narrow-metric",
is_config=False,
)
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"INTERNAL": {}, "UNSUPPORTED": {}},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="isis-metric-flags",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv4-internal-reachability",
"prefixes",
"prefixes",
"delay-metric",
"state",
]
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/metric (oc-isis-types:narrow-metric)
YANG Description: ISIS delay metric value. This metric measures the transit delay of
the associated circuit. It is an optional metric, which if assigned
to a circuit shall have a positive integral value. Higher values
indicate a longer transit delay.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/metric (oc-isis-types:narrow-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: ISIS delay metric value. This metric measures the transit delay of
the associated circuit. It is an optional metric, which if assigned
to a circuit shall have a positive integral value. Higher values
indicate a longer transit delay.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..63"]},
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:narrow-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-isis-types:narrow-metric""",
"defined-type": "oc-isis-types:narrow-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..63"]},
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:narrow-metric",
is_config=False,
)
def _get_flags(self):
"""
Getter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/flags (isis-metric-flags)
YANG Description: ISIS Delay Metric Flags.
"""
return self.__flags
def _set_flags(self, v, load=False):
"""
Setter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/flags (isis-metric-flags)
If this variable is read-only (config: false) in the
source YANG file, then _set_flags is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flags() directly.
YANG Description: ISIS Delay Metric Flags.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"INTERNAL": {}, "UNSUPPORTED": {}},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="isis-metric-flags",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """flags must be of a type compatible with isis-metric-flags""",
"defined-type": "openconfig-network-instance:isis-metric-flags",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'INTERNAL': {}, 'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)""",
}
)
self.__flags = t
if hasattr(self, "_set"):
self._set()
def _unset_flags(self):
self.__flags = YANGDynClass(
base=TypedListType(
allowed_type=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"INTERNAL": {}, "UNSUPPORTED": {}},
)
),
is_leaf=False,
yang_name="flags",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="isis-metric-flags",
is_config=False,
)
metric = __builtin__.property(_get_metric)
flags = __builtin__.property(_get_flags)
_pyangbind_elements = OrderedDict([("metric", metric), ("flags", flags)])
| {
"content_hash": "2b57cf15c04978f125bdee9f27c3e383",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 571,
"avg_line_length": 42.92714025500911,
"alnum_prop": 0.5671489795052403,
"repo_name": "napalm-automation/napalm-yang",
"id": "9fe7cf3e42e99b8c3d8d9c7d3f48467d68dad8e1",
"size": "23591",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes_/delay_metric/state/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
import Command
import recalboxFiles
from generators.Generator import Generator
import advMameControllers
import shutil
import os.path
class AdvMameGenerator(Generator):
# Main entry of the module
def generate(self, system, rom, playersControllers):
romName = os.path.basename(os.path.splitext(rom)[0])
commandArray = [recalboxFiles.recalboxBins[system.config['emulator']]]
if not system.config['configfile']:
# Using recalbox config file
system.config['configfile'] = recalboxFiles.advancemameConfig
advMameControllers.writeControllersConfig(system, playersControllers)
if 'args' in system.config and system.config['args'] is not None:
commandArray.extend(system.config['args'])
commandArray.extend( ['-cfg', system.config['configfile']] )
commandArray.append(romName)
#~ return Command.Command(videomode=system.config['videomode'], array=commandArray, env={"TERM":"linux"})
return Command.Command(videomode='default', array=commandArray, env={"TERM":"linux"})
| {
"content_hash": "630b910b498d79708ea4de4ad81a67a9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 113,
"avg_line_length": 41.592592592592595,
"alnum_prop": 0.6821015138023152,
"repo_name": "digitalLumberjack/recalbox-configgen",
"id": "7e149daa574f9f023a9de8fbd2491bf5dcddda1f",
"size": "1145",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "configgen/generators/advancemame/advMameGenerator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131403"
}
],
"symlink_target": ""
} |
import dork_compose.plugin
import os
from compose.config.config import VolumeSpec
from docker.api.client import APIClient
import logging
log = logging.getLogger(__name__)
import time
from docker.errors import APIError
class Plugin(dork_compose.plugin.Plugin):
def get_hotcode_volumes(self, service):
root = None
source = '.'
hotcode = ''
try:
image = service.client.inspect_image(service.image_name)
if image.get('Config', {}).get('Labels'):
root = image.get('Config', {}).get('Labels', {}).get('dork.root')
source = image.get('Config', {}).get('Labels', {}).get('dork.source', '.')
hotcode = image.get('Config', {}).get('Labels', {}).get('dork.hotcode', '')
except APIError:
pass
if isinstance(service.options.get('labels'), dict):
root = service.options.get('labels', {}).get('dork.root', root)
source = service.options.get('labels', {}).get('dork.source', source)
hotcode = service.options.get('labels').get('dork.hotcode', hotcode)
if hotcode and not root:
log.warn('Service %s has no dork.root information, can\'t apply hot code directories.' % service.name)
return []
paths = filter(lambda x: x, hotcode.split(';'))
return [VolumeSpec.parse(':'.join([
'%s/%s/%s' % (self.env['DORK_SOURCE'], source, path),
'%s/%s' % (root.rstrip('/'), path),
'cached'
])) for path in paths]
def creating_container(self, service):
"""
Inject volumes for all hot code paths.
"""
self.sync_code(service=service)
externals = [v.external for v in service.options['volumes']]
for v in self.get_hotcode_volumes(service):
if v.external not in externals:
service.options['volumes'].append(v)
def sync_code(self, service):
client = APIClient()
root = None
source = '.'
hotcode = ''
try:
image = service.client.inspect_image(service.image_name)
if image.get('Config', {}).get('Labels'):
root = image.get('Config', {}).get('Labels', {}).get('dork.root')
source = image.get('Config', {}).get('Labels', {}).get('dork.source', '.')
hotcode = image.get('Config', {}).get('Labels', {}).get('dork.hotcode', '')
except APIError:
pass
if isinstance(service.options.get('labels'), dict):
root = service.options.get('labels', {}).get('dork.root', root)
source = service.options.get('labels', {}).get('dork.source', source)
hotcode = service.options.get('labels').get('dork.hotcode', hotcode)
skip = filter(lambda x: x, hotcode.split(';')) if hotcode else []
# Don't sync if there are no hotcode directories.
if not skip or '.' in skip:
return
skip.append('.git')
skip.append('.auth')
skip.append('.no_auth')
skip.append('.no_auth.*')
skip.append('.env')
skip.append('.dork.env')
skip.append('.dockerignore')
skip.append('Dockerfile')
skip.append('.dork.dockerignore')
skip.append('.dork.Dockerfile')
lib = '/'.join(filter(lambda x: len(x), [
os.path.expanduser(self.env.get('DORK_LIBRARY_PATH', '')),
os.path.expanduser(self.env.get('DORK_LIBRARY', '')),
]))
# Ignore all files, that docker and dork ignore.
ignore_files = [
'%s/.dockerignore' % lib,
'.dockerignore',
'%s/.dork.dockerignore' % lib,
'.dork.dockerignore',
]
for ignore_file in ignore_files:
if os.path.isfile(ignore_file):
with open(ignore_file) as f:
skip += f.read().splitlines()
if not (source and root):
return
try:
client.inspect_image('iamdork/rsync')
except APIError:
client.pull('iamdork/rsync')
container = client.create_container(
image=service.image_name,
volumes=[root],
)['Id']
try:
dork_source = self.env.get('DORK_SOURCE')
src = '/'.join([root, source])
dst = '/'.join([dork_source, source])
log.info("Synching %s to %s." % (src, dst))
sync = client.create_container(
image='iamdork/rsync',
volumes=['/destination'],
cpu_shares=256,
host_config=client.create_host_config(
binds=['%s:/destination' % dst],
volumes_from=container
),
environment={
'SOURCE': src,
'EXCLUDE': ' '.join(skip)
}
)['Id']
try:
client.start(sync)
while client.inspect_container(sync)['State']['Running']:
time.sleep(0.5)
finally:
client.remove_container(sync)
finally:
client.remove_container(container)
| {
"content_hash": "c7d3d93164b381fde6f3bbd2d517f944",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 114,
"avg_line_length": 34.71523178807947,
"alnum_prop": 0.520030522701259,
"repo_name": "iamdork/compose",
"id": "843b9020ec1a1758f7856f08c723a14a76809a20",
"size": "5242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dork_compose/plugins/hotcode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "228"
},
{
"name": "Nginx",
"bytes": "710"
},
{
"name": "PHP",
"bytes": "51"
},
{
"name": "Python",
"bytes": "67396"
},
{
"name": "Shell",
"bytes": "14433"
}
],
"symlink_target": ""
} |
from distutils.core import setup
DESCRIPTION = "General tools for Astronomical Time Series in Python"
LONG_DESCRIPTION = """
gatspy: General tools for Astronomical Time Series in Python
============================================================
Gatspy (pronounced as F. Scott Fitzgerald might pronounce it) is a collection of tools for analyzing astronomical time series in Python.
For more information, visit http://github.com/astroml/gatspy/
"""
NAME = "gatspy"
AUTHOR = "Jake VanderPlas"
AUTHOR_EMAIL = "[email protected]"
MAINTAINER = "Jake VanderPlas"
MAINTAINER_EMAIL = "[email protected]"
URL = 'http://github.com/astroml/gatspy'
DOWNLOAD_URL = 'http://github.com/astroml/gatspy'
LICENSE = 'BSD 3-clause'
import gatspy
VERSION = gatspy.__version__
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['gatspy',
'gatspy.tests',
'gatspy.periodic',
'gatspy.periodic.tests',
'gatspy.datasets',
'gatspy.datasets.tests',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
)
| {
"content_hash": "b6917824a3abc82c503984ead07233e4",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 136,
"avg_line_length": 33.19230769230769,
"alnum_prop": 0.6176129779837776,
"repo_name": "bsipocz/gatspy",
"id": "4667c11240f4356dba86ca8790231558b420cf7e",
"size": "1726",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "112915"
}
],
"symlink_target": ""
} |
import mock
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ovsdb_monitor
from neutron.tests import base
class TestOvsdbMonitor(base.BaseTestCase):
def test___init__(self):
ovsdb_monitor.OvsdbMonitor('Interface')
def test___init___with_columns(self):
columns = ['col1', 'col2']
with mock.patch(
'neutron.agent.linux.async_process.AsyncProcess.__init__') as init:
ovsdb_monitor.OvsdbMonitor('Interface', columns=columns)
cmd = init.call_args_list[0][0][0]
self.assertEqual('col1,col2', cmd[-1])
def test___init___with_format(self):
with mock.patch(
'neutron.agent.linux.async_process.AsyncProcess.__init__') as init:
ovsdb_monitor.OvsdbMonitor('Interface', format='blob')
cmd = init.call_args_list[0][0][0]
self.assertEqual('--format=blob', cmd[-1])
class TestSimpleInterfaceMonitor(base.BaseTestCase):
def setUp(self):
super(TestSimpleInterfaceMonitor, self).setUp()
self.monitor = ovsdb_monitor.SimpleInterfaceMonitor()
def test_has_updates_is_false_if_active_with_no_output(self):
target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor'
'.is_active')
with mock.patch(target, return_value=True):
self.assertFalse(self.monitor.has_updates)
def test__kill_sets_data_received_to_false(self):
self.monitor.data_received = True
with mock.patch(
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._kill'):
self.monitor._kill()
self.assertFalse(self.monitor.data_received)
def test__read_stdout_sets_data_received_and_returns_output(self):
output = 'foo'
with mock.patch(
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._read_stdout',
return_value=output):
result = self.monitor._read_stdout()
self.assertTrue(self.monitor.data_received)
self.assertEqual(result, output)
def test__read_stdout_does_not_set_data_received_for_empty_ouput(self):
output = None
with mock.patch(
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._read_stdout',
return_value=output):
self.monitor._read_stdout()
self.assertFalse(self.monitor.data_received)
def test_has_updates_after_calling_get_events_is_false(self):
with mock.patch.object(
self.monitor, 'process_events') as process_events:
self.monitor.new_events = {'added': ['foo'], 'removed': ['foo1']}
self.assertTrue(self.monitor.has_updates)
self.monitor.get_events()
self.assertTrue(process_events.called)
self.assertFalse(self.monitor.has_updates)
def process_event_unassigned_of_port(self):
output = '{"data":[["e040fbec-0579-4990-8324-d338da33ae88","insert",'
output += '"m50",["set",[]],["map",[]]]],"headings":["row","action",'
output += '"name","ofport","external_ids"]}'
with mock.patch.object(
self.monitor, 'iter_stdout', return_value=[output]):
self.monitor.process_events()
self.assertEqual(self.monitor.new_events['added'][0]['ofport'],
ovs_lib.UNASSIGNED_OFPORT)
| {
"content_hash": "b2cdecef9d2a6ca8e41b63288244329c",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 41.0609756097561,
"alnum_prop": 0.6186516186516187,
"repo_name": "JianyuWang/neutron",
"id": "dc41c96e009f5a93d102735be1bbba015ec38163",
"size": "3973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/agent/linux/test_ovsdb_monitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7680418"
},
{
"name": "Shell",
"bytes": "14690"
}
],
"symlink_target": ""
} |
from selenium import webdriver
from pages.admin_panel_login_page import AdminPanelLoginPage
from pages.add_to_cart_page import AddToCartPage
from pages.main_page import MainPage
from pages.select_product_page import SelectProductPage
from pages.shopping_page import ShoppingPage
from pages.add_to_user_page import AddToUserPage
from pages.check_price_page import CheckPricePage
from pages.check_price_yd_page import CheckPriceYDPage
from pages.create_new_product_page import CreateNewProductPage
from pages.add_new_product_page import AddNewProductPage
from pages.add_new_products_in_basket_page import AddNewProductsInBasketPage
from pages.check_true_page import CheckTruePage
from pages.login_page import LoginPage
from pages.country_zone_page import CountryZone
class Application:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
self.driver = webdriver.Chrome(chrome_options=options)
self.driver.implicitly_wait(10)
self.admin_panel_login_page = AdminPanelLoginPage(self.driver)
self.add_to_cart_page = AddToCartPage(self.driver)
self.main_page = MainPage(self.driver)
self.select_product_page = SelectProductPage(self.driver)
self.shopping_page = ShoppingPage(self.driver)
self.add_to_user_page = AddToUserPage(self.driver)
self.check_price_page = CheckPricePage(self.driver)
self.check_price_yd_page = CheckPriceYDPage(self.driver)
self.create_new_product_page = CreateNewProductPage(self.driver)
self.add_new_product_page = AddNewProductPage(self.driver)
self.add_new_products_in_basket_page = AddNewProductsInBasketPage(self.driver)
self.check_true_page = CheckTruePage(self.driver)
self.login_page = LoginPage(self.driver)
self.country_zone_page = CountryZone(self.driver)
def quit(self):
self.driver.quit()
def login(self, username, password):
self.admin_panel_login_page.open()
self.admin_panel_login_page.enter_username(username)
self.admin_panel_login_page.enter_password(password)
self.admin_panel_login_page.submit_login()
| {
"content_hash": "88a78a3680b5f47b2f493f6b6453db6f",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 86,
"avg_line_length": 46.5531914893617,
"alnum_prop": 0.746800731261426,
"repo_name": "skostya64/Selenium_tasks",
"id": "4a6c56030fc4f3b84c79e7a58f8da0df6bd639f9",
"size": "2188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25896"
}
],
"symlink_target": ""
} |
import smtplib
import pystache
import os
import html.parser
from email.mime.text import MIMEText
from werkzeug.utils import secure_filename
from flask import url_for
from truecraft.database import db
from truecraft.objects import User
from truecraft.config import _cfg, _cfgi
def send_confirmation(user):
if _cfg("smtp-host") == "":
return
smtp = smtplib.SMTP(_cfg("smtp-host"), _cfgi("smtp-port"))
smtp.login(_cfg("smtp-user"), _cfg("smtp-password"))
with open("emails/confirm-account") as f:
message = MIMEText(html.parser.HTMLParser().unescape(\
pystache.render(f.read(), { 'user': user, "domain": _cfg("domain"), 'confirmation': user.confirmation })))
message['X-MC-Important'] = "true"
message['X-MC-PreserveRecipients'] = "false"
message['Subject'] = "Confirm your TrueCraft account"
message['From'] = "[email protected]"
message['To'] = user.email
smtp.sendmail("[email protected]", [ user.email ], message.as_string())
smtp.quit()
def send_reset(user):
if _cfg("smtp-host") == "":
return
smtp = smtplib.SMTP(_cfg("smtp-host"), _cfgi("smtp-port"))
smtp.login(_cfg("smtp-user"), _cfg("smtp-password"))
with open("emails/reset") as f:
message = MIMEText(html.parser.HTMLParser().unescape(\
pystache.render(f.read(), {
'user': user,
"domain": _cfg("domain"),
"protocol": _cfg("protocol"),
'confirmation': user.passwordReset
})))
message['X-MC-Important'] = "true"
message['X-MC-PreserveRecipients'] = "false"
message['Subject'] = "Reset your TrueCraft password"
message['From'] = "[email protected]"
message['To'] = user.email
smtp.sendmail("[email protected]", [ user.email ], message.as_string())
smtp.quit()
| {
"content_hash": "a9d1de6633a8cae7018e6f26f14eaf9a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 118,
"avg_line_length": 38.458333333333336,
"alnum_prop": 0.628385698808234,
"repo_name": "MaxLeiter/truecraft.io",
"id": "04b7eabd87cd527cbba9b59e423887aaf62c4518",
"size": "1846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "truecraft/email.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2676"
},
{
"name": "HTML",
"bytes": "25694"
},
{
"name": "Makefile",
"bytes": "1117"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "25831"
}
],
"symlink_target": ""
} |
import ujson as json
from peachbox.task import Task
"""Copyright 2015 D. Britzger, P. Pahl, S. Schubert
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class ImportProperties(Task):
"""
Import attributes to define an edge.
An edge (or also called `relationship`) relates two nodes (also called entity) with each other.
Abstract class. Edged must implement functions: `rh_side`, `lh_side` and `partition_key`.
"""
def __init__(self):
"""Model defines schema of edge and must inhert from `peachbox.model.MasterDataSet`."""
self.ms = None
def set_master_schema(self, master_schema):
self.ms = master_schema
def execute(self, rdd):
if not self.ms:
raise AttributeError
return self.ms.fill_properties(rdd)
# return rdd.map(lambda row: self.fill_properties(row))
| {
"content_hash": "1a1b1b6b5bdb4a70ed23c1ad3904f16a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 99,
"avg_line_length": 34.35897435897436,
"alnum_prop": 0.7082089552238806,
"repo_name": "PeachstoneIO/peachbox",
"id": "58353db8480274ea7d8a598be2063bdcce78cf4c",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peachbox/task/import_properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125688"
},
{
"name": "Shell",
"bytes": "655"
}
],
"symlink_target": ""
} |
from moto.core.responses import BaseResponse
from moto.core.utils import tags_from_query_string
from .models import eb_backends
from .exceptions import InvalidParameterValueError
class EBResponse(BaseResponse):
def __init__(self):
super().__init__(service_name="elasticbeanstalk")
@property
def backend(self):
"""
:rtype: EBBackend
"""
return eb_backends[self.current_account][self.region]
def create_application(self):
app = self.backend.create_application(
application_name=self._get_param("ApplicationName")
)
template = self.response_template(EB_CREATE_APPLICATION)
return template.render(region_name=self.backend.region_name, application=app)
def describe_applications(self):
template = self.response_template(EB_DESCRIBE_APPLICATIONS)
return template.render(applications=self.backend.applications.values())
def create_environment(self):
application_name = self._get_param("ApplicationName")
try:
app = self.backend.applications[application_name]
except KeyError:
raise InvalidParameterValueError(
f"No Application named '{application_name}' found."
)
tags = tags_from_query_string(self.querystring, prefix="Tags.member")
env = self.backend.create_environment(
app,
environment_name=self._get_param("EnvironmentName"),
stack_name=self._get_param("SolutionStackName"),
tags=tags,
)
template = self.response_template(EB_CREATE_ENVIRONMENT)
return template.render(environment=env, region=self.backend.region_name)
def describe_environments(self):
envs = self.backend.describe_environments()
template = self.response_template(EB_DESCRIBE_ENVIRONMENTS)
return template.render(environments=envs)
def list_available_solution_stacks(self):
return EB_LIST_AVAILABLE_SOLUTION_STACKS
def update_tags_for_resource(self):
resource_arn = self._get_param("ResourceArn")
tags_to_add = tags_from_query_string(
self.querystring, prefix="TagsToAdd.member"
)
tags_to_remove = self._get_multi_param("TagsToRemove.member")
self.backend.update_tags_for_resource(resource_arn, tags_to_add, tags_to_remove)
return EB_UPDATE_TAGS_FOR_RESOURCE
def list_tags_for_resource(self):
resource_arn = self._get_param("ResourceArn")
tags = self.backend.list_tags_for_resource(resource_arn)
template = self.response_template(EB_LIST_TAGS_FOR_RESOURCE)
return template.render(tags=tags, arn=resource_arn)
EB_CREATE_APPLICATION = """
<CreateApplicationResponse xmlns="http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/">
<CreateApplicationResult>
<Application>
<ConfigurationTemplates/>
<DateCreated>2019-09-03T13:08:29.049Z</DateCreated>
<ResourceLifecycleConfig>
<VersionLifecycleConfig>
<MaxAgeRule>
<DeleteSourceFromS3>false</DeleteSourceFromS3>
<MaxAgeInDays>180</MaxAgeInDays>
<Enabled>false</Enabled>
</MaxAgeRule>
<MaxCountRule>
<DeleteSourceFromS3>false</DeleteSourceFromS3>
<MaxCount>200</MaxCount>
<Enabled>false</Enabled>
</MaxCountRule>
</VersionLifecycleConfig>
</ResourceLifecycleConfig>
<ApplicationArn>{{ application.arn }}</ApplicationArn>
<ApplicationName>{{ application.application_name }}</ApplicationName>
<DateUpdated>2019-09-03T13:08:29.049Z</DateUpdated>
</Application>
</CreateApplicationResult>
<ResponseMetadata>
<RequestId>1b6173c8-13aa-4b0a-99e9-eb36a1fb2778</RequestId>
</ResponseMetadata>
</CreateApplicationResponse>
"""
EB_DESCRIBE_APPLICATIONS = """
<DescribeApplicationsResponse xmlns="http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/">
<DescribeApplicationsResult>
<Applications>
{% for application in applications %}
<member>
<ConfigurationTemplates/>
<DateCreated>2019-09-03T13:08:29.049Z</DateCreated>
<ResourceLifecycleConfig>
<VersionLifecycleConfig>
<MaxAgeRule>
<MaxAgeInDays>180</MaxAgeInDays>
<DeleteSourceFromS3>false</DeleteSourceFromS3>
<Enabled>false</Enabled>
</MaxAgeRule>
<MaxCountRule>
<DeleteSourceFromS3>false</DeleteSourceFromS3>
<MaxCount>200</MaxCount>
<Enabled>false</Enabled>
</MaxCountRule>
</VersionLifecycleConfig>
</ResourceLifecycleConfig>
<ApplicationArn>{{ application.arn }}</ApplicationArn>
<ApplicationName>{{ application.application_name }}</ApplicationName>
<DateUpdated>2019-09-03T13:08:29.049Z</DateUpdated>
</member>
{% endfor %}
</Applications>
</DescribeApplicationsResult>
<ResponseMetadata>
<RequestId>015a05eb-282e-4b76-bd18-663fdfaf42e4</RequestId>
</ResponseMetadata>
</DescribeApplicationsResponse>
"""
EB_CREATE_ENVIRONMENT = """
<CreateEnvironmentResponse xmlns="http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/">
<CreateEnvironmentResult>
<SolutionStackName>{{ environment.solution_stack_name }}</SolutionStackName>
<Health>Grey</Health>
<EnvironmentArn>{{ environment.environment_arn }}</EnvironmentArn>
<DateUpdated>2019-09-04T09:41:24.222Z</DateUpdated>
<DateCreated>2019-09-04T09:41:24.222Z</DateCreated>
<EnvironmentId>{{ environment_id }}</EnvironmentId>
<PlatformArn>{{ environment.platform_arn }}</PlatformArn>
<Tier>
<Name>WebServer</Name>
<Type>Standard</Type>
<Version>1.0</Version>
</Tier>
<EnvironmentName>{{ environment.environment_name }}</EnvironmentName>
<ApplicationName>{{ environment.application_name }}</ApplicationName>
<Status>Launching</Status>
</CreateEnvironmentResult>
<ResponseMetadata>
<RequestId>18dc8158-f5d7-4d5a-82ef-07fcaadf81c6</RequestId>
</ResponseMetadata>
</CreateEnvironmentResponse>
"""
EB_DESCRIBE_ENVIRONMENTS = """
<DescribeEnvironmentsResponse xmlns="http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/">
<DescribeEnvironmentsResult>
<Environments>
{% for env in environments %}
<member>
<SolutionStackName>{{ env.solution_stack_name }}</SolutionStackName>
<Health>Grey</Health>
<EnvironmentArn>{{ env.environment_arn }}</EnvironmentArn>
<MinCapacityEnabled>false</MinCapacityEnabled>
<DateUpdated>2019-08-30T09:35:10.913Z</DateUpdated>
<AbortableOperationInProgress>false</AbortableOperationInProgress>
<Alerts/>
<DateCreated>2019-08-22T07:02:47.332Z</DateCreated>
<EnvironmentId>{{ env.environment_id }}</EnvironmentId>
<VersionLabel>1</VersionLabel>
<PlatformArn>{{ env.platform_arn }}</PlatformArn>
<Tier>
<Name>WebServer</Name>
<Type>Standard</Type>
<Version>1.0</Version>
</Tier>
<HealthStatus>No Data</HealthStatus>
<EnvironmentName>{{ env.environment_name }}</EnvironmentName>
<EndpointURL></EndpointURL>
<CNAME></CNAME>
<EnvironmentLinks/>
<ApplicationName>{{ env.application_name }}</ApplicationName>
<Status>Ready</Status>
</member>
{% endfor %}
</Environments>
</DescribeEnvironmentsResult>
<ResponseMetadata>
<RequestId>dd56b215-01a0-40b2-bd1e-57589c39424f</RequestId>
</ResponseMetadata>
</DescribeEnvironmentsResponse>
"""
# Current list as of 2019-09-04
EB_LIST_AVAILABLE_SOLUTION_STACKS = """
<ListAvailableSolutionStacksResponse xmlns="http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/">
<ListAvailableSolutionStacksResult>
<SolutionStacks>
<member>64bit Amazon Linux 2018.03 v4.10.1 running Node.js</member>
<member>64bit Amazon Linux 2018.03 v4.9.2 running Node.js</member>
<member>64bit Amazon Linux 2018.03 v4.8.0 running Node.js</member>
<member>64bit Amazon Linux 2018.03 v4.6.0 running Node.js</member>
<member>64bit Amazon Linux 2018.03 v4.5.3 running Node.js</member>
<member>64bit Amazon Linux 2018.03 v4.5.1 running Node.js</member>
<member>64bit Amazon Linux 2018.03 v4.5.0 running Node.js</member>
<member>64bit Amazon Linux 2017.09 v4.4.6 running Node.js</member>
<member>64bit Amazon Linux 2017.09 v4.4.5 running Node.js</member>
<member>64bit Amazon Linux 2017.09 v4.4.4 running Node.js</member>
<member>64bit Amazon Linux 2017.09 v4.4.2 running Node.js</member>
<member>64bit Amazon Linux 2017.09 v4.4.0 running Node.js</member>
<member>64bit Amazon Linux 2017.03 v4.3.0 running Node.js</member>
<member>64bit Amazon Linux 2017.03 v4.2.2 running Node.js</member>
<member>64bit Amazon Linux 2017.03 v4.2.1 running Node.js</member>
<member>64bit Amazon Linux 2017.03 v4.2.0 running Node.js</member>
<member>64bit Amazon Linux 2017.03 v4.1.1 running Node.js</member>
<member>64bit Amazon Linux 2017.03 v4.1.0 running Node.js</member>
<member>64bit Amazon Linux 2016.09 v4.0.1 running Node.js</member>
<member>64bit Amazon Linux 2016.09 v4.0.0 running Node.js</member>
<member>64bit Amazon Linux 2016.09 v3.3.1 running Node.js</member>
<member>64bit Amazon Linux 2016.09 v3.1.0 running Node.js</member>
<member>64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.4</member>
<member>64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.5</member>
<member>64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.6</member>
<member>64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.0</member>
<member>64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.1</member>
<member>64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.2</member>
<member>64bit Amazon Linux 2018.03 v2.8.12 running PHP 7.2</member>
<member>64bit Amazon Linux 2018.03 v2.8.7 running PHP 7.1</member>
<member>64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.1</member>
<member>64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.2</member>
<member>64bit Amazon Linux 2018.03 v2.8.5 running PHP 7.2</member>
<member>64bit Amazon Linux 2018.03 v2.8.4 running PHP 7.2</member>
<member>64bit Amazon Linux 2018.03 v2.8.3 running PHP 7.2</member>
<member>64bit Amazon Linux 2018.03 v2.8.2 running PHP 7.2</member>
<member>64bit Amazon Linux 2018.03 v2.8.1 running PHP 7.2</member>
<member>64bit Amazon Linux 2018.03 v2.8.0 running PHP 7.1</member>
<member>64bit Amazon Linux 2018.03 v2.7.1 running PHP 5.6</member>
<member>64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.0</member>
<member>64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.1</member>
<member>64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.0</member>
<member>64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.1</member>
<member>64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.4</member>
<member>64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.6</member>
<member>64bit Amazon Linux 2017.09 v2.6.6 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.09 v2.6.5 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.4</member>
<member>64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.5</member>
<member>64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.6</member>
<member>64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.1</member>
<member>64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.4</member>
<member>64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.5</member>
<member>64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.6</member>
<member>64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.1</member>
<member>64bit Amazon Linux 2017.09 v2.6.2 running PHP 5.6</member>
<member>64bit Amazon Linux 2017.09 v2.6.2 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.09 v2.6.1 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.4</member>
<member>64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.5</member>
<member>64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.6</member>
<member>64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.1</member>
<member>64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.1</member>
<member>64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.5</member>
<member>64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.6</member>
<member>64bit Amazon Linux 2017.03 v2.4.4 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.03 v2.4.3 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.4</member>
<member>64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.5</member>
<member>64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.6</member>
<member>64bit Amazon Linux 2017.03 v2.4.2 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.03 v2.4.1 running PHP 7.0</member>
<member>64bit Amazon Linux 2017.03 v2.4.0 running PHP 7.0</member>
<member>64bit Amazon Linux 2016.09 v2.3.2 running PHP 7.0</member>
<member>64bit Amazon Linux 2016.09 v2.3.1 running PHP 7.0</member>
<member>64bit Amazon Linux 2018.03 v2.9.1 running Python 3.6</member>
<member>64bit Amazon Linux 2018.03 v2.9.1 running Python 3.4</member>
<member>64bit Amazon Linux 2018.03 v2.9.1 running Python</member>
<member>64bit Amazon Linux 2018.03 v2.9.1 running Python 2.7</member>
<member>64bit Amazon Linux 2018.03 v2.7.5 running Python 3.6</member>
<member>64bit Amazon Linux 2018.03 v2.7.1 running Python 3.6</member>
<member>64bit Amazon Linux 2018.03 v2.7.0 running Python 3.6</member>
<member>64bit Amazon Linux 2017.09 v2.6.4 running Python 3.6</member>
<member>64bit Amazon Linux 2017.09 v2.6.1 running Python 3.6</member>
<member>64bit Amazon Linux 2017.03 v2.4.0 running Python 3.4</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Puma)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Puma)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Puma)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Puma)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Puma)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Puma)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Puma)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 1.9.3</member>
<member>64bit Amazon Linux 2018.03 v2.8.0 running Ruby 2.5 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Puma)</member>
<member>64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Passenger Standalone)</member>
<member>64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8.5 Java 8</member>
<member>64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8 Java 8</member>
<member>64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 7</member>
<member>64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 6</member>
<member>64bit Amazon Linux 2018.03 v3.1.1 running Tomcat 8.5 Java 8</member>
<member>64bit Amazon Linux 2017.03 v2.6.5 running Tomcat 8 Java 8</member>
<member>64bit Amazon Linux 2017.03 v2.6.2 running Tomcat 8 Java 8</member>
<member>64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 8 Java 8</member>
<member>64bit Amazon Linux 2017.03 v2.6.0 running Tomcat 8 Java 8</member>
<member>64bit Amazon Linux 2016.09 v2.5.4 running Tomcat 8 Java 8</member>
<member>64bit Amazon Linux 2016.03 v2.1.0 running Tomcat 8 Java 8</member>
<member>64bit Windows Server Core 2016 v2.2.1 running IIS 10.0</member>
<member>64bit Windows Server 2016 v2.2.1 running IIS 10.0</member>
<member>64bit Windows Server Core 2012 R2 v2.2.1 running IIS 8.5</member>
<member>64bit Windows Server 2012 R2 v2.2.1 running IIS 8.5</member>
<member>64bit Windows Server Core 2016 v1.2.0 running IIS 10.0</member>
<member>64bit Windows Server 2016 v1.2.0 running IIS 10.0</member>
<member>64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5</member>
<member>64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5</member>
<member>64bit Windows Server 2012 v1.2.0 running IIS 8</member>
<member>64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5</member>
<member>64bit Windows Server Core 2012 R2 running IIS 8.5</member>
<member>64bit Windows Server 2012 R2 running IIS 8.5</member>
<member>64bit Windows Server 2012 running IIS 8</member>
<member>64bit Windows Server 2008 R2 running IIS 7.5</member>
<member>64bit Amazon Linux 2018.03 v2.12.16 running Docker 18.06.1-ce</member>
<member>64bit Amazon Linux 2016.09 v2.5.2 running Docker 1.12.6</member>
<member>64bit Amazon Linux 2018.03 v2.15.2 running Multi-container Docker 18.06.1-ce (Generic)</member>
<member>64bit Debian jessie v2.12.16 running Go 1.4 (Preconfigured - Docker)</member>
<member>64bit Debian jessie v2.12.16 running Go 1.3 (Preconfigured - Docker)</member>
<member>64bit Debian jessie v2.12.16 running Python 3.4 (Preconfigured - Docker)</member>
<member>64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker)</member>
<member>64bit Amazon Linux 2018.03 v2.9.1 running Java 8</member>
<member>64bit Amazon Linux 2018.03 v2.9.1 running Java 7</member>
<member>64bit Amazon Linux 2018.03 v2.8.0 running Java 8</member>
<member>64bit Amazon Linux 2018.03 v2.7.6 running Java 8</member>
<member>64bit Amazon Linux 2018.03 v2.7.5 running Java 8</member>
<member>64bit Amazon Linux 2018.03 v2.7.4 running Java 8</member>
<member>64bit Amazon Linux 2018.03 v2.7.2 running Java 8</member>
<member>64bit Amazon Linux 2018.03 v2.7.1 running Java 8</member>
<member>64bit Amazon Linux 2017.09 v2.6.8 running Java 8</member>
<member>64bit Amazon Linux 2017.09 v2.6.5 running Java 8</member>
<member>64bit Amazon Linux 2017.09 v2.6.4 running Java 8</member>
<member>64bit Amazon Linux 2017.09 v2.6.3 running Java 8</member>
<member>64bit Amazon Linux 2017.09 v2.6.0 running Java 8</member>
<member>64bit Amazon Linux 2017.03 v2.5.4 running Java 8</member>
<member>64bit Amazon Linux 2017.03 v2.5.3 running Java 8</member>
<member>64bit Amazon Linux 2017.03 v2.5.2 running Java 8</member>
<member>64bit Amazon Linux 2016.09 v2.4.4 running Java 8</member>
<member>64bit Amazon Linux 2018.03 v2.12.1 running Go 1.12.7</member>
<member>64bit Amazon Linux 2018.03 v2.6.14 running Packer 1.0.3</member>
<member>64bit Amazon Linux 2018.03 v2.12.16 running GlassFish 5.0 Java 8 (Preconfigured - Docker)</member>
</SolutionStacks>
<SolutionStackDetails>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v4.10.1 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v4.9.2 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v4.8.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v4.6.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v4.5.3 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v4.5.1 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v4.5.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v4.4.6 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v4.4.5 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v4.4.4 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v4.4.2 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v4.4.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v4.3.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v4.2.2 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v4.2.1 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v4.2.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v4.1.1 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v4.1.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v4.0.1 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v4.0.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v3.3.1 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v3.1.0 running Node.js</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.4</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.2</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.12 running PHP 7.2</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.7 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.2</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.5 running PHP 7.2</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.4 running PHP 7.2</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.3 running PHP 7.2</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.2 running PHP 7.2</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.1 running PHP 7.2</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.0 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.1 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.4</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.6 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.5 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.4</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.4</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.2 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.2 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.1 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.4</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.1</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.4 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.3 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.4</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.2 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.1 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.0 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v2.3.2 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v2.3.1 running PHP 7.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.9.1 running Python 3.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.9.1 running Python 3.4</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.9.1 running Python</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.9.1 running Python 2.7</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.5 running Python 3.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.1 running Python 3.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.0 running Python 3.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.4 running Python 3.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.1 running Python 3.6</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.0 running Python 3.4</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Puma)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Puma)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Puma)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Puma)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Puma)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Puma)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Puma)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.10.1 running Ruby 1.9.3</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.0 running Ruby 2.5 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Puma)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Passenger Standalone)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8.5 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 7</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 6</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v3.1.1 running Tomcat 8.5 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.6.5 running Tomcat 8 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.6.2 running Tomcat 8 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 8 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.6.0 running Tomcat 8 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v2.5.4 running Tomcat 8 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.03 v2.1.0 running Tomcat 8 Java 8</SolutionStackName>
<PermittedFileTypes>
<member>war</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server Core 2016 v2.2.1 running IIS 10.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2016 v2.2.1 running IIS 10.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server Core 2012 R2 v2.2.1 running IIS 8.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2012 R2 v2.2.1 running IIS 8.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server Core 2016 v1.2.0 running IIS 10.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2016 v1.2.0 running IIS 10.0</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2012 v1.2.0 running IIS 8</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server Core 2012 R2 running IIS 8.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2012 R2 running IIS 8.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2012 running IIS 8</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Windows Server 2008 R2 running IIS 7.5</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.12.16 running Docker 18.06.1-ce</SolutionStackName>
<PermittedFileTypes/>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v2.5.2 running Docker 1.12.6</SolutionStackName>
<PermittedFileTypes/>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.15.2 running Multi-container Docker 18.06.1-ce (Generic)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
<member>json</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Debian jessie v2.12.16 running Go 1.4 (Preconfigured - Docker)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Debian jessie v2.12.16 running Go 1.3 (Preconfigured - Docker)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Debian jessie v2.12.16 running Python 3.4 (Preconfigured - Docker)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.9.1 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.9.1 running Java 7</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.8.0 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.6 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.5 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.4 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.2 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.7.1 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.8 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.5 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.4 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.3 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.09 v2.6.0 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.5.4 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.5.3 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2017.03 v2.5.2 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2016.09 v2.4.4 running Java 8</SolutionStackName>
<PermittedFileTypes>
<member>jar</member>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.12.1 running Go 1.12.7</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.6.14 running Packer 1.0.3</SolutionStackName>
<PermittedFileTypes/>
</member>
<member>
<SolutionStackName>64bit Amazon Linux 2018.03 v2.12.16 running GlassFish 5.0 Java 8 (Preconfigured - Docker)</SolutionStackName>
<PermittedFileTypes>
<member>zip</member>
</PermittedFileTypes>
</member>
</SolutionStackDetails>
</ListAvailableSolutionStacksResult>
<ResponseMetadata>
<RequestId>bd6bd2b2-9983-4845-b53b-fe53e8a5e1e7</RequestId>
</ResponseMetadata>
</ListAvailableSolutionStacksResponse>
"""
EB_UPDATE_TAGS_FOR_RESOURCE = """
<UpdateTagsForResourceResponse xmlns="http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/">
<ResponseMetadata>
<RequestId>f355d788-e67e-440f-b915-99e35254ffee</RequestId>
</ResponseMetadata>
</UpdateTagsForResourceResponse>
"""
EB_LIST_TAGS_FOR_RESOURCE = """
<ListTagsForResourceResponse xmlns="http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/">
<ListTagsForResourceResult>
<ResourceTags>
{% for key, value in tags.items() %}
<member>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</member>
{% endfor %}
</ResourceTags>
<ResourceArn>{{ arn }}</ResourceArn>
</ListTagsForResourceResult>
<ResponseMetadata>
<RequestId>178e410f-3b57-456f-a64c-a3b6a16da9ab</RequestId>
</ResponseMetadata>
</ListTagsForResourceResponse>
"""
| {
"content_hash": "f9819596c6a9ba30da989ee60452f391",
"timestamp": "",
"source": "github",
"line_count": 1389,
"max_line_length": 136,
"avg_line_length": 41.176385889128866,
"alnum_prop": 0.645854460258069,
"repo_name": "spulec/moto",
"id": "74d345a6c6bd187b622bb6b068b05ead13ad6036",
"size": "57194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/elasticbeanstalk/responses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django_odesk.conf import settings
# DESIGN NOTE:
# We could do something like this in the default settings:
# ODESK_USER_MODEL = 'auth.User'
# But we need to be sure that we are using `django.contrib.auth.models.User`
# by default, which might not be the case if there is another 'auth' app
# installed. That is why we use ODESK_CUSTOM_USER_MODEL, which is set to None
# by default
def get_user_model():
custom_model = settings.ODESK_CUSTOM_USER_MODEL
if not custom_model:
return User
app_label, model_name = custom_model.split('.')
model = models.get_model(app_label, model_name)
if model is None:
raise ImproperlyConfigured(
'Unable to load the user model'
'check ODESK_CUSTOM_USER_MODEL in your project settings')
return model
| {
"content_hash": "3195845dbc7d7dba0805fe53278ff216",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 37.28,
"alnum_prop": 0.7188841201716738,
"repo_name": "reputation/django-odesk",
"id": "2725935238d706fa5bd7c1dc6ad2d1af144e6bd0",
"size": "932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_odesk/auth/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18946"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.urls import reverse
from django.contrib import auth
class ExtTestCase(TestCase):
def create_and_log_in_user(self):
user = auth.models.User.objects.create(username='test_user', email='[email protected]')
user.set_password('password')
user.save()
self.client.post(reverse('account_login'), {'login': user.username, 'password': 'password'})
# html = response.content.decode('utf8')
# print(html)
return user
| {
"content_hash": "8c9a4e3885198dd25231151accf5b941",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 100,
"avg_line_length": 36.214285714285715,
"alnum_prop": 0.6666666666666666,
"repo_name": "jarnoln/cvdb",
"id": "8d902d8af1d4451baefcaa172cebfa6b985f4648",
"size": "507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/tests/ext_test_case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "1482"
},
{
"name": "HTML",
"bytes": "35209"
},
{
"name": "Jinja",
"bytes": "2472"
},
{
"name": "Python",
"bytes": "134639"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/item/shared_item_tool_demagnetizer.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "4f147116962eec802e40d77d06ee369a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 23.23076923076923,
"alnum_prop": 0.6920529801324503,
"repo_name": "anhstudios/swganh",
"id": "f4577dac4d1ef17ade96ea876f65cd6bdd7473f6",
"size": "447",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/item/shared_item_tool_demagnetizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from linear_plot import linear_plot
from scatter_plot import scatter_plot
from dr_v_r import dr_v_r_plot
from dr_r_v_r import dr_r_v_r_plot
from semilog_plot import semilog_plot
from semilogy_plot import semilogy_plot, semilogy_scatter | {
"content_hash": "fd1cf1876e97afa085c7deb02ddcc06c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 57,
"avg_line_length": 39.166666666666664,
"alnum_prop": 0.8042553191489362,
"repo_name": "chrisjdavie/ws_cross_project",
"id": "1b693987f4f7dd917ae79efbe726ebbb482369f5",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generic_plotting/linplots_cjd/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88997"
}
],
"symlink_target": ""
} |
"""Tests for the step limit wrapper."""
from acme import wrappers
from acme.testing import fakes
import numpy as np
from absl.testing import absltest
ACTION = np.array(0, dtype=np.int32)
class StepLimitWrapperTest(absltest.TestCase):
def test_step(self):
fake_env = fakes.DiscreteEnvironment(episode_length=5)
env = wrappers.StepLimitWrapper(fake_env, step_limit=2)
env.reset()
env.step(ACTION)
self.assertTrue(env.step(ACTION).last())
def test_step_on_new_env(self):
fake_env = fakes.DiscreteEnvironment(episode_length=5)
env = wrappers.StepLimitWrapper(fake_env, step_limit=2)
self.assertTrue(env.step(ACTION).first())
self.assertFalse(env.step(ACTION).last())
self.assertTrue(env.step(ACTION).last())
def test_step_after_truncation(self):
fake_env = fakes.DiscreteEnvironment(episode_length=5)
env = wrappers.StepLimitWrapper(fake_env, step_limit=2)
env.reset()
env.step(ACTION)
self.assertTrue(env.step(ACTION).last())
self.assertTrue(env.step(ACTION).first())
self.assertFalse(env.step(ACTION).last())
self.assertTrue(env.step(ACTION).last())
def test_step_after_termination(self):
fake_env = fakes.DiscreteEnvironment(episode_length=5)
fake_env.reset()
fake_env.step(ACTION)
fake_env.step(ACTION)
fake_env.step(ACTION)
fake_env.step(ACTION)
self.assertTrue(fake_env.step(ACTION).last())
env = wrappers.StepLimitWrapper(fake_env, step_limit=2)
self.assertTrue(env.step(ACTION).first())
self.assertFalse(env.step(ACTION).last())
self.assertTrue(env.step(ACTION).last())
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "4cbd7b7763e6f26ffe94a7f015af469c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 59,
"avg_line_length": 27.683333333333334,
"alnum_prop": 0.7025888019265503,
"repo_name": "deepmind/acme",
"id": "78fbc47856677e1ab3818220bdb6a0e2b444fd84",
"size": "2277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acme/wrappers/step_limit_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2182865"
},
{
"name": "Shell",
"bytes": "2668"
}
],
"symlink_target": ""
} |
"""Downsample strategies.
Contains all of the downsample strategies. Use downsample(), and
secondary_downsample() for downsampling records stored in files.
"""
from math import ceil
FLOAT_PRECISION = 4
SECOND_TO_MICROSECOND = 1E6
STRATEGIES = ['max', 'min', 'avg']
def _max_min_downsample(records, is_max, downsample_factor):
"""Downsamples records by maximum or minimum value.
Args:
records: A list of records ([time, power, channel]) in 1 second.
is_max: A boolean indicating if using max or not.
downsample_factor: Take one record per "downsample_factor" records.
Returns:
A list of records with lower sampling rate.
Example:
[
[time,power,channel1],
[time,power,channel1],
[time,power,channel1]
]
"""
if downsample_factor <= 1:
return records
number_records = ceil(len(records) / downsample_factor)
result = list()
for index in range(number_records):
records_in_timespan = records[index *
downsample_factor: (index+1)*downsample_factor]
if is_max:
result.append(
max(records_in_timespan, key=lambda record: record[1]))
else:
result.append(
min(records_in_timespan, key=lambda record: record[1]))
return result
def _average_downsample(records, downsample_factor):
"""Downsamples records by average value.
Args:
records: A list of records ([time, power, channel]) in 1 second.
downsample_factor: Take one record per "downsample_factor" records.
Returns:
A list of downsampled records.
Example:
[
[time,power,channel1],
[time,power,channel1],
[time,power,channel1]
]
"""
if downsample_factor <= 1:
return records
number_records = ceil(len(records) / downsample_factor)
result = list()
for index in range(number_records):
records_in_timespan = records[index *
downsample_factor: (index+1)*downsample_factor]
average = [0, 0, records_in_timespan[0][2]]
for record in records_in_timespan:
average[0] += record[0]
average[1] += record[1]
average[0] /= len(records_in_timespan)
average[1] /= len(records_in_timespan)
average[0] = int(average[0])
average[1] = round(average[1], FLOAT_PRECISION)
result.append(average)
return result
def strategy_reducer(records, strategy, downsample_factor):
"""Applies relative downsample function to the records, based on strategy string.
Args:
records: A list of records ([time, power, channel]).
strategy: A string representing downsampling strategy.
downsample_factor: Take one record per "downsample_factor" records.
Returns:
A list of downsampled records with number under max_records.
Example:
[
[time,power,channel1],
[time,power,channel1],
[time,power,channel1]
]
Raises:
TypeError: if strategy is undefined.
"""
if strategy == 'max':
res = _max_min_downsample(
records, is_max=True, downsample_factor=downsample_factor)
elif strategy == 'min':
res = _max_min_downsample(
records, is_max=False, downsample_factor=downsample_factor)
elif strategy == 'avg':
res = _average_downsample(
records, downsample_factor=downsample_factor)
else:
raise TypeError
return res
| {
"content_hash": "ae5ae6ab414977cd8538a010e725231f",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 85,
"avg_line_length": 31.33050847457627,
"alnum_prop": 0.5929131728428455,
"repo_name": "googleinterns/power-data-graphing-intern-2020",
"id": "ce05b79b57ae39cb833d4fe157bd497642f505d6",
"size": "4351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/downsample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "9694"
},
{
"name": "JavaScript",
"bytes": "2499"
},
{
"name": "Python",
"bytes": "86091"
},
{
"name": "SCSS",
"bytes": "4921"
},
{
"name": "Shell",
"bytes": "2122"
},
{
"name": "TypeScript",
"bytes": "73299"
}
],
"symlink_target": ""
} |
def mapper(book):
def checker(page, element):
width=int(element.get('WIDTH'))
height=int(element.get('HEIGHT'))
x=int(element.get('HPOS'))
y=int(element.get('VPOS'))
area=width*height
fracarea=area/(page.width*page.height)
return page, [width, height, x, y, fracarea]
reducer=merge_under(add)
def shuffler(year, count):
return year%count
| {
"content_hash": "5cca3b794c5f69c386c675bbe43d368f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 52,
"avg_line_length": 27,
"alnum_prop": 0.6172839506172839,
"repo_name": "UCL-dataspring/cluster-code",
"id": "576e8a572a418da8df6de95ed4b8bcdf32220a04",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "queries/find_figures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49104"
},
{
"name": "Shell",
"bytes": "1186"
}
],
"symlink_target": ""
} |
"""
TCP support for IOCP reactor
"""
from twisted.internet import interfaces, error, address, main, defer
from twisted.internet.abstract import isIPAddress
from twisted.internet.tcp import _SocketCloser, Connector as TCPConnector
from twisted.persisted import styles
from twisted.python import log, failure, reflect, util
from zope.interface import implements
import socket, operator, errno, struct
from twisted.internet.iocpreactor import iocpsupport as _iocp, abstract
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.iocpreactor.const import ERROR_IO_PENDING
from twisted.internet.iocpreactor.const import SO_UPDATE_CONNECT_CONTEXT
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
from twisted.internet.iocpreactor.const import ERROR_CONNECTION_REFUSED
from twisted.internet.iocpreactor.const import ERROR_NETWORK_UNREACHABLE
# ConnectEx returns these. XXX: find out what it does for timeout
connectExErrors = {
ERROR_CONNECTION_REFUSED: errno.WSAECONNREFUSED,
ERROR_NETWORK_UNREACHABLE: errno.WSAENETUNREACH,
}
class Connection(abstract.FileHandle, _SocketCloser):
implements(IReadWriteHandle, interfaces.ITCPTransport,
interfaces.ISystemHandle)
def __init__(self, sock, proto, reactor=None):
abstract.FileHandle.__init__(self, reactor)
self.socket = sock
self.getFileHandle = sock.fileno
self.protocol = proto
def getHandle(self):
return self.socket
def dataReceived(self, rbuffer):
# XXX: some day, we'll have protocols that can handle raw buffers
self.protocol.dataReceived(str(rbuffer))
def readFromHandle(self, bufflist, evt):
return _iocp.recv(self.getFileHandle(), bufflist, evt)
def writeToHandle(self, buff, evt):
return _iocp.send(self.getFileHandle(), buff, evt)
def _closeWriteConnection(self):
try:
getattr(self.socket, self._socketShutdownMethod)(1)
except socket.error:
pass
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except:
f = failure.Failure()
log.err()
self.connectionLost(f)
def readConnectionLost(self, reason):
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
def connectionLost(self, reason):
abstract.FileHandle.connectionLost(self, reason)
self._closeSocket()
protocol = self.protocol
del self.protocol
del self.socket
del self.getFileHandle
protocol.connectionLost(reason)
def logPrefix(self):
"""
Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
class Client(Connection):
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
def __init__(self, host, port, bindAddress, connector, reactor):
self.connector = connector
self.addr = (host, port)
self.reactor = reactor
# ConnectEx documentation says socket _has_ to be bound
if bindAddress is None:
bindAddress = ('', 0)
try:
try:
skt = reactor.createSocket(self.addressFamily, self.socketType)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
else:
try:
skt.bind(bindAddress)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
self.socket = skt
Connection.__init__(self, skt, None, reactor)
reactor.callLater(0, self.resolveAddress)
except error.ConnectBindError, err:
reactor.callLater(0, self.failIfNotConnected, err)
def resolveAddress(self):
if isIPAddress(self.addr[0]):
self._setRealAddress(self.addr[0])
else:
d = self.reactor.resolve(self.addr[0])
d.addCallbacks(self._setRealAddress, self.failIfNotConnected)
def _setRealAddress(self, address):
self.realAddress = (address, self.addr[1])
self.doConnect()
def failIfNotConnected(self, err):
if (self.connected or self.disconnected or
not hasattr(self, "connector")):
return
try:
self._closeSocket()
except AttributeError:
pass
else:
del self.socket, self.getFileHandle
self.reactor.removeActiveHandle(self)
self.connector.connectionFailed(failure.Failure(err))
del self.connector
def stopConnecting(self):
"""
Stop attempt to connect.
"""
self.failIfNotConnected(error.UserError())
def cbConnect(self, rc, bytes, evt):
if rc:
rc = connectExErrors.get(rc, rc)
self.failIfNotConnected(error.getConnectError((rc,
errno.errorcode.get(rc, 'Unknown error'))))
else:
self.socket.setsockopt(socket.SOL_SOCKET,
SO_UPDATE_CONNECT_CONTEXT,
struct.pack('I', self.socket.fileno()))
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = True
self.logstr = self.protocol.__class__.__name__+",client"
self.protocol.makeConnection(self)
self.startReading()
def doConnect(self):
if not hasattr(self, "connector"):
# this happens if we connector.stopConnecting in
# factory.startedConnecting
return
assert _iocp.have_connectex
self.reactor.addActiveHandle(self)
evt = _iocp.Event(self.cbConnect, self)
rc = _iocp.connect(self.socket.fileno(), self.realAddress, evt)
if rc == ERROR_IO_PENDING:
return
else:
evt.ignore = True
self.cbConnect(rc, 0, 0, evt)
def getHost(self):
"""
Returns an IPv4Address.
This indicates the address from which I am connecting.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() +
('INET',)))
def getPeer(self):
"""
Returns an IPv4Address.
This indicates the address that I am connected to.
"""
return address.IPv4Address('TCP', *(self.realAddress + ('INET',)))
def __repr__(self):
s = ('<%s to %s at %x>' %
(self.__class__, self.addr, util.unsignedID(self)))
return s
def connectionLost(self, reason):
if not self.connected:
self.failIfNotConnected(error.ConnectError(string=reason))
else:
Connection.connectionLost(self, reason)
self.connector.connectionLost(reason)
class Server(Connection):
"""
Serverside socket-stream connection class.
I am a serverside network connection transport; a socket which came from an
accept() on a server.
"""
def __init__(self, sock, protocol, clientAddr, serverAddr, sessionno, reactor):
"""
Server(sock, protocol, client, server, sessionno)
Initialize me with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol, reactor)
self.serverAddr = serverAddr
self.clientAddr = clientAddr
self.sessionno = sessionno
self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__,
sessionno, self.clientAddr.host)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
self.sessionno, self.serverAddr.port)
self.connected = True
self.startReading()
def __repr__(self):
"""
A string representation of this connection.
"""
return self.repstr
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
return self.serverAddr
def getPeer(self):
"""
Returns an IPv4Address.
This indicates the client's address.
"""
return self.clientAddr
class Connector(TCPConnector):
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self,
self.reactor)
class Port(styles.Ephemeral, _SocketCloser):
implements(interfaces.IListeningPort)
connected = False
disconnected = False
disconnecting = False
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
sessionno = 0
maxAccepts = 100
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
def __init__(self, port, factory, backlog=50, interface='', reactor=None):
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
self.reactor = reactor
def __repr__(self):
if self._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__,
self.factory.__class__,
self._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__,
self.factory.__class__)
def startListening(self):
try:
skt = self.reactor.createSocket(self.addressFamily,
self.socketType)
# TODO: resolve self.interface if necessary
skt.bind((self.interface, self.port))
except socket.error, le:
raise error.CannotListenError, (self.interface, self.port, le)
self.addrLen = _iocp.maxAddrLen(skt.fileno())
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (self.factory.__class__,
self._realPortNumber))
self.factory.doStart()
skt.listen(self.backlog)
self.connected = True
self.disconnected = False
self.reactor.addActiveHandle(self)
self.socket = skt
self.getFileHandle = self.socket.fileno
self.doAccept()
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Stop accepting connections on this port.
This will shut down my socket and call self.connectionLost().
It returns a deferred which will fire successfully when the
port is actually closed.
"""
self.disconnecting = True
if self.connected:
self.deferred = defer.Deferred()
self.reactor.callLater(0, self.connectionLost, connDone)
return self.deferred
stopListening = loseConnection
def connectionLost(self, reason):
"""
Cleans up the socket.
"""
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
d = None
if hasattr(self, "deferred"):
d = self.deferred
del self.deferred
self.disconnected = True
self.reactor.removeActiveHandle(self)
self.connected = False
self._closeSocket()
del self.socket
del self.getFileHandle
try:
self.factory.doStop()
except:
self.disconnecting = False
if d is not None:
d.errback(failure.Failure())
else:
raise
else:
self.disconnecting = False
if d is not None:
d.callback(None)
def logPrefix(self):
"""
Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() +
('INET',)))
def cbAccept(self, rc, bytes, evt):
self.handleAccept(rc, evt)
if not (self.disconnecting or self.disconnected):
self.doAccept()
def handleAccept(self, rc, evt):
if self.disconnecting or self.disconnected:
return False
# possible errors:
# (WSAEMFILE, WSAENOBUFS, WSAENFILE, WSAENOMEM, WSAECONNABORTED)
if rc:
log.msg("Could not accept new connection -- %s (%s)" %
(errno.errorcode.get(rc, 'unknown error'), rc))
return False
else:
evt.newskt.setsockopt(socket.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
struct.pack('I', self.socket.fileno()))
family, lAddr, rAddr = _iocp.get_accept_addrs(evt.newskt.fileno(),
evt.buff)
assert family == self.addressFamily
protocol = self.factory.buildProtocol(
address._ServerFactoryIPv4Address('TCP', rAddr[0], rAddr[1]))
if protocol is None:
evt.newskt.close()
else:
s = self.sessionno
self.sessionno = s+1
transport = Server(evt.newskt, protocol,
address.IPv4Address('TCP', rAddr[0], rAddr[1], 'INET'),
address.IPv4Address('TCP', lAddr[0], lAddr[1], 'INET'),
s, self.reactor)
protocol.makeConnection(transport)
return True
def doAccept(self):
numAccepts = 0
while 1:
evt = _iocp.Event(self.cbAccept, self)
# see AcceptEx documentation
evt.buff = buff = _iocp.AllocateReadBuffer(2 * (self.addrLen + 16))
evt.newskt = newskt = self.reactor.createSocket(self.addressFamily,
self.socketType)
rc = _iocp.accept(self.socket.fileno(), newskt.fileno(), buff, evt)
if (rc == ERROR_IO_PENDING
or (not rc and numAccepts >= self.maxAccepts)):
break
else:
evt.ignore = True
if not self.handleAccept(rc, evt):
break
numAccepts += 1
| {
"content_hash": "a72fcce473f8738aa81b672e14176702",
"timestamp": "",
"source": "github",
"line_count": 507,
"max_line_length": 83,
"avg_line_length": 31.007889546351084,
"alnum_prop": 0.5729915399783729,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "d40a0919d8df4de36b568963111756ea9e615437",
"size": "15800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Twisted/twisted/internet/iocpreactor/tcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
} |
from abc import ABCMeta, abstractmethod
class CloudAdapter(metaclass=ABCMeta):
"""
defines an interface which should be used to implement functionality for a cloud provider
"""
class CloudConnectionException(Exception):
"""
raised if something goes wrong, while communicating with the cloud
"""
class InvalidCloudSettingsException(Exception):
"""
raised if the given cloud settings are not valid
"""
pass
def __init__(self, settings):
self._settings = settings
@abstractmethod
def create_target(self, name, bootstrapping_network_interface, network_interfaces, volumes, ram, cores):
"""
creates a target machine in the cloud
:param bootstrapping_network_interface:
:param name: the name of the target machine
:type name: str
:param bootstrapping_network_interface: the network interface which is used during the migration
:type bootstrapping_network_interface: {'ip: str, 'network_id': str}
:param network_interfaces: the network interfaces which should be created
:type network_interfaces: [{'ip: str, 'network_id': str}]
:param volumes: a list of volume sizes in gb, which should be created
:type volumes: list[int]
:param ram: the ram size in mb as a multiple of 256
:type ram: int
:param cores: the number of cores the target machine should get
:type cores: int
:return: the created target
:rtype: dict
"""
pass
@abstractmethod
def delete_target(self, server_id):
"""
deletes the target with the given id
:param server_id: the cloud id of the target machine
:type server_id: str
"""
pass
@abstractmethod
def start_target(self, server_id):
"""
starts the target with the given id and waits for it to be started
:param server_id: the cloud id of the target machine
:type server_id: str
"""
pass
@abstractmethod
def stop_target(self, server_id):
"""
stops the target with the given id and waits for it to be stopped
:param server_id: the cloud id of the target machine
:type server_id: str
"""
pass
@abstractmethod
def delete_volume(self, volume_id):
"""
deletes the volume with the given id
:param volume_id: the volume id of the volume which should be deleted
:type volume_id: str
"""
pass
@abstractmethod
def make_volume_boot(self, server_id, volume_id):
"""
changes which device a target machine should boot from and waits for the change to be finished
:param server_id: the cloud id of the target machine
:type server_id: str
:param volume_id: the volume id of the volume which should become the new boot device
:type volume_id: str
:return:
"""
pass
@abstractmethod
def delete_nic(self, server_id, nic_id):
"""
deletes the network interface with the given id
:param server_id: the cloud id of the target machine
:type server_id: str
:param nic_id: the volume id of the volume which should be deleted
:type nic_id: str
"""
pass
| {
"content_hash": "ad69af09261b59cc12b5a18b80e8099a",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 108,
"avg_line_length": 31.583333333333332,
"alnum_prop": 0.6115508648490179,
"repo_name": "jdepoix/goto_cloud",
"id": "2fbd5acb2767654e481f66b0900fb1189cba7f71",
"size": "3411",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "goto_cloud/cloud_management/cloud_adapter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "354421"
},
{
"name": "Shell",
"bytes": "619"
}
],
"symlink_target": ""
} |
import os
import datetime
import re
import codecs
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.conf import settings
from sqp import models as sqp_models
class Migration(DataMigration):
def forwards(self, orm):
try:
sql = 'ALTER TABLE sqp_item DROP INDEX unique_name;'
db.execute_many(sql)
print "unique_name index dropped"
except:
print "unique_name index not dropped (most likely already deleted)"
log_text = ''
Q_BASE_DIR = settings.PROJECT_DIR + '/data/questions_omarti_20161212_2/'
files = []
r,d,files = os.walk(Q_BASE_DIR).next()
#looking for russian A and B chars
item_regex = re.compile(ur'^(P\.)?[\u0041-\u005A\u0410\u0412\u0421]{1,2}[0-9]{1,3}([A-Za-z\u0410\u0412\u0421\u0430\u0432\u0441]{1,3})?(\.)?$')
text_area_regex = re.compile(ur'\{[A-Z]+\}')
q_regex = re.compile(ur'Q{1}[0-9]{1,4}')
for file_name in sorted(files):
file_log_text = []
CREATED_ITEMS = 0
CREATED_QUESTIONS = 0
EDITED_QUESTIONS = 0
NOT_EDITED = 0
SKIPPED_AREAS = 0
IMPORTED_LINES = 0
SKIPPED_LINES = []
#utf-8-sig to get rid of the utf-8 BOM /ufeff
#http://stackoverflow.com/questions/9228202/tokenizing-unicode-using-nltk
file = codecs.open(Q_BASE_DIR + file_name, "r", "utf-8-sig")
if not '.txt' in file_name:
continue
print "NOW CHECKING file %s" % file.name
params = file_name.replace('.txt', '').split('_')
if len(params) > 3:
round_name, country_iso, language_iso, supplemental = file_name.replace('.txt', '').split('_')
else:
round_name, country_iso, language_iso = file_name.replace('.txt', '').split('_')
language = sqp_models.Language.objects.get(iso=language_iso)
country = sqp_models.Country.objects.get(iso=country_iso)
round_name = round_name.replace('ESS', 'ESS Round ')
study = sqp_models.Study.objects.get(name=round_name)
key = None
questions = {}
text_areas = ['INTRO',
'QUESTION',
'ANSWERS',
'TRASH']
line_number = 0
for line in file:
line_number += 1
#Get rid of any Q13 Q12 crap
if q_regex.match(line):
line = re.sub(q_regex, '', line).strip()
key = None
if item_regex.match(line.strip()):
key = item_regex.match(line.strip()).group(0)
#russian chars
key = key.replace(u'\u0410', 'A')
key = key.replace(u'\u0412', 'B')
key = key.replace(u'\u0421', 'C')
key = key.replace(u'\u0430', 'a')
key = key.replace(u'\u0432', 'b')
key = key.replace(u'\u0441', 'c')
#P.
key = key.replace('P.', '')
key = key.replace(' ', '')
#Trailing .
key = key.replace('.', '')
questions[key] = {'INTRO' : '',
'QUESTION' : '',
'ANSWERS' : '',
'found_text_areas' : []
}
current_text_area = 'QUESTION'
continue
elif key and text_area_regex.match(line):
match = text_area_regex.match(line).group(0)
current_text_area = match.replace('{', '').replace('}', '')
if current_text_area == 'ANSWERS 1':
current_text_area ='ANSWERS'
elif current_text_area == 'ANSWERS 2':
SKIPPED_AREAS += 1
continue
if current_text_area in questions[key]['found_text_areas']:
current_text_area = 'TRASH'
else:
questions[key]['found_text_areas'].append(current_text_area)
if current_text_area not in text_areas:
raise Exception('Unrecognized text area "%s"' % current_text_area)
continue
#Only take the first occurence of QUESTION / INTRO / ANSWERS
if key and current_text_area != 'TRASH':
questions[key][current_text_area] += line
IMPORTED_LINES += 1
elif line.strip() != '':
SKIPPED_LINES.append({'line_number' : line_number,
'content': line})
n = 0
for key in questions:
n +=1
#if n > 10:break
#print "NOW SAVING question %s" % key
try:
item, i_was_created = sqp_models.Item.objects.get_or_create(admin=key, study=study)
if i_was_created:
CREATED_ITEMS += 1
except Exception as ex:
print '!!!!!!!!!!BAD KEY!!!!!!!!!!!!!!!%s' % key
file_log_text.append('!!!!!!!!!!BAD KEY!!!!!!!!!!!!!!!%s' % key)
#raise Exception()
question, q_was_created = sqp_models.Question.objects.get_or_create(item=item, country=country, language=language)
if q_was_created:
CREATED_QUESTIONS += 1
if question.rfa_text or question.introduction_text or question.answer_text:
NOT_EDITED += 1
else:
question.introduction_text = questions[key]['INTRO'].strip()
question.rfa_text = questions[key]['QUESTION'].strip()
question.answer_text = questions[key]['ANSWERS'].strip()
if q_was_created:
question.imported_from = 'jorge-created'
else:
question.imported_from = 'jorge-existing'
question.save(create_suggestions = False)
EDITED_QUESTIONS += 1
file_log_text.append('%s %s %s new items:%s, total qs:%s, created qs:%s, edited qs:%s, not edited qs:%s, skipped keys:%s' %\
(country_iso, language_iso, round_name,
CREATED_ITEMS, len(questions), CREATED_QUESTIONS, EDITED_QUESTIONS, NOT_EDITED, SKIPPED_AREAS))
file_log_text.append('LINES SKIPPED %s / IMPORTED %s' % (len(SKIPPED_LINES), IMPORTED_LINES))
if SKIPPED_LINES:
file_log_text.append('SKIPPED_LINES')
for l in SKIPPED_LINES:
file_log_text.append(' %s: %s' % (l['line_number'], l['content'].replace('\n', '')))
file_log_text.append('IMPORTED ITEMS: %s' % ','.join(questions.keys()))
file_log_text.append('------------------------------------------------------------------------')
print '\n'.join(file_log_text)
print
log_text += '\n'.join(file_log_text) + '\n\n\n'
log_file = codecs.open('/tmp/omarti_import_20161212_2.log', 'w', "utf-8-sig")
log_file.write(log_text)
log_file.close()
print "LOG STORED AT '/tmp/omarti_import_20161212_2.log'"
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.branch': {
'Meta': {'ordering': "('label__characteristic__name', 'label__id')", 'object_name': 'Branch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Label']"}),
'to_characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"})
},
'sqp.characteristic': {
'Meta': {'ordering': "['name']", 'object_name': 'Characteristic'},
'auto_fill_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'desc': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'suggestion': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'validation_rules': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sqp.ValidationRule']", 'null': 'True', 'blank': 'True'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Widget']"})
},
'sqp.characteristicset': {
'Meta': {'ordering': "['id']", 'object_name': 'CharacteristicSet'},
'branches': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Branch']", 'symmetrical': 'False'}),
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.coding': {
'Meta': {'ordering': "['user', 'characteristic']", 'object_name': 'Coding'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'choice': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'seconds_taken': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'sqp.codingchange': {
'Meta': {'object_name': 'CodingChange'},
'change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_by_user_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'change_type': ('django.db.models.fields.IntegerField', [], {}),
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'coding_change_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CodingChangeGroup']"}),
'coding_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'coding_user_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'error_occured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'new_value_by_related_country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']", 'null': 'True', 'blank': 'True'}),
'new_value_by_related_lang': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']", 'null': 'True', 'blank': 'True'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processing_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'question_id': ('django.db.models.fields.IntegerField', [], {})
},
'sqp.codingchangegroup': {
'Meta': {'ordering': "['id']", 'object_name': 'CodingChangeGroup'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sqp.codingsuggestion': {
'Meta': {'object_name': 'CodingSuggestion'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'explanation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.completion': {
'Meta': {'object_name': 'Completion'},
'authorized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'characteristic_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CharacteristicSet']"}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'out_of_date': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'potential_improvements': ('sqp.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'predictions': ('sqp.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sqp.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_three': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.faq': {
'Meta': {'object_name': 'FAQ'},
'answer': ('django.db.models.fields.TextField', [], {}),
'asker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {})
},
'sqp.history': {
'Meta': {'object_name': 'History'},
'action_description': ('django.db.models.fields.TextField', [], {}),
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'actor': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_model': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '170'}),
'previous_values': ('django.db.models.fields.TextField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'sqp.item': {
'Meta': {'ordering': "('study', 'admin_letter', 'admin_number', 'id')", 'object_name': 'Item'},
'admin': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'admin_letter': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'admin_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_item_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Study']"})
},
'sqp.itemgroup': {
'Meta': {'object_name': 'ItemGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Item']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.label': {
'Meta': {'ordering': "('characteristic__name', 'id')", 'object_name': 'Label'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'compute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'True'", 'max_length': '150'})
},
'sqp.language': {
'Meta': {'ordering': "('name',)", 'object_name': 'Language'},
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'iso2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.parameter': {
'Meta': {'ordering': "['order']", 'object_name': 'Parameter'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.View']", 'through': "orm['sqp.Prediction']", 'symmetrical': 'False'})
},
'sqp.prediction': {
'Meta': {'object_name': 'Prediction'},
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'paramater': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Parameter']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.View']"})
},
'sqp.question': {
'Meta': {'ordering': "('item__study', 'country', 'language', 'item__admin_letter', 'item__admin_number', 'item__id')", 'object_name': 'Question'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_question_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_from': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'introduction_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Item']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']"}),
'rel': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rel_hi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rel_lo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'relz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'relz_se': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rfa_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_hi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_lo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'valz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'valz_se': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.questionbulkassignments': {
'Meta': {'object_name': 'QuestionBulkAssignments'},
'assignments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.UserQuestion']", 'symmetrical': 'False', 'blank': 'True'}),
'can_edit_details': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_edit_text': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']", 'null': 'True'}),
'has_been_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.ItemGroup']", 'null': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']", 'null': 'True'}),
'last_run_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'sqp.questionbulkcreation': {
'Meta': {'object_name': 'QuestionBulkCreation'},
'copy_text_from_study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Study']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']"}),
'created_questions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Question']", 'symmetrical': 'False', 'blank': 'True'}),
'has_been_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.ItemGroup']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']"}),
'last_run_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.study': {
'Meta': {'ordering': "('name',)", 'object_name': 'Study'},
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_study_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'sqp.usedcountry': {
'Meta': {'ordering': "['name']", 'object_name': 'UsedCountry', 'db_table': "'vw_country_question'"},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'default_characteristic_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CharacteristicSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trusted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'sqp.userquestion': {
'Meta': {'object_name': 'UserQuestion'},
'can_edit_details': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_edit_text': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sqp.validationrule': {
'Meta': {'object_name': 'ValidationRule'},
'failure_message': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'rule': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '7'})
},
'sqp.view': {
'Meta': {'ordering': "['order']", 'object_name': 'View'},
'expects': ('django.db.models.fields.CharField', [], {'default': "'tuple'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'sqp.widget': {
'Meta': {'object_name': 'Widget'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sqp']
| {
"content_hash": "e31ee7a9ac24c1dbb9e1d6b0ac06d4fe",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 182,
"avg_line_length": 64.72916666666667,
"alnum_prop": 0.5200193112327004,
"repo_name": "recsm/SQP",
"id": "13eb17bda92b9103a5650139181fcce2080844f3",
"size": "31088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqp/migrations/0072_import_questions_omarti_20161212_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "284413"
},
{
"name": "HTML",
"bytes": "581366"
},
{
"name": "JavaScript",
"bytes": "583584"
},
{
"name": "Makefile",
"bytes": "1104"
},
{
"name": "Python",
"bytes": "2144259"
},
{
"name": "Shell",
"bytes": "221"
}
],
"symlink_target": ""
} |
from django.db import models
from .base import Pessoa, UF_SIGLA
class Transportadora(Pessoa):
class Meta:
verbose_name = "Transportadora"
class Veiculo(models.Model):
transportadora_veiculo = models.ForeignKey(
'cadastro.Transportadora', related_name="veiculo", on_delete=models.CASCADE)
descricao = models.CharField(max_length=255)
placa = models.CharField(max_length=8, blank=True, null=True)
uf = models.CharField(max_length=3, null=True,
blank=True, choices=UF_SIGLA)
def __unicode__(self):
return u'%s / %s / %s' % (self.descricao, self.placa, self.uf)
def __str__(self):
return u'%s / %s / %s' % (self.descricao, self.placa, self.uf)
| {
"content_hash": "a36803ad2523375225f06c4e6e618b91",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 84,
"avg_line_length": 31.869565217391305,
"alnum_prop": 0.6439290586630286,
"repo_name": "thiagopena/djangoSIGE",
"id": "835ebfd2ae304d5eee0cc2133c36f700c81e442e",
"size": "758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangosige/apps/cadastro/models/transportadora.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "62842"
},
{
"name": "Dockerfile",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "387328"
},
{
"name": "JavaScript",
"bytes": "188596"
},
{
"name": "Makefile",
"bytes": "313"
},
{
"name": "Python",
"bytes": "805518"
}
],
"symlink_target": ""
} |
try:
import textile
except ImportError:
pass
try:
import markdown2
except ImportError:
pass
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
from ccgallery import settings as c_settings
from ccgallery.models import get_model
register = template.Library()
class ItemNode(template.Node):
def __init__(self, varname, categories=None):
self.varname = varname
self.categories = categories
def render(self, context):
items = get_model().objects\
.visible()
if self.categories is not None:
try:
categories = self.categories.split(',')
except AttributeError:
categories = [self.categories]
items = items.filter(
categories__pk__in=categories)
context[self.varname] = items
return ''
@register.tag
def get_items(parser, token):
bits = token.contents.split()
try:
categories = bits[4]
except IndexError:
categories = None
return ItemNode(bits[2], categories)
class RandomItemNode(template.Node):
def __init__(self, display, varname):
self.display = display
self.varname = varname
def render(self, context):
items = get_model().objects\
.visible()\
.order_by('?')[:self.display]
context[self.varname] = items
return ''
@register.tag
def get_gallery_items(parser, token):
bits = token.contents.split()
return RandomItemNode(bits[1], bits[3])
@register.inclusion_tag('ccgallery/_js.html')
def gallery_js():
return {
'STATIC_URL': settings.STATIC_URL,
}
@register.inclusion_tag('ccgallery/_css.html')
def gallery_css():
return {
'STATIC_URL': settings.STATIC_URL,
}
@register.filter
def markup(text):
"""output the description according to whatever markup
language is set in the settings"""
html = ''
if c_settings.MARKUP_LANGUAGE == 'textile':
html = textile.textile(text)
if c_settings.MARKUP_LANGUAGE == 'markdown':
html = markdown2.markdown(text)
return mark_safe(html)
| {
"content_hash": "14cb9a6923b79f528cf219f073df7847",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 58,
"avg_line_length": 24.752808988764045,
"alnum_prop": 0.6259645937358148,
"repo_name": "designcc/django-ccgallery",
"id": "7d65f897537770f0f8f93baf92e2bf133fe577a7",
"size": "2203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccgallery/templatetags/ccgallery_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "75754"
},
{
"name": "Python",
"bytes": "32960"
}
],
"symlink_target": ""
} |
from lib.action import St2BaseAction
import s3
__all__ = [
'UploadToS3'
]
class UploadToS3(St2BaseAction):
def run(self, file_name, remote_file, bucket):
remote_name = s3.S3Name(remote_file, bucket=bucket)
connection = s3.S3Connection(**self.config['s3'])
storage = s3.Storage(connection)
storage.write(file_name, remote_name)
payload = {
"status": "ok",
"uploaded_file": remote_file,
}
return payload
| {
"content_hash": "cad2a7e1a75d2ffd65ec424f859ed4cd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 59,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.6,
"repo_name": "lmEshoo/st2contrib",
"id": "66dc1d4bd57d041147794451e0d306f1934fa436",
"size": "495",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packs/st2/actions/upload_to_s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8530"
},
{
"name": "Makefile",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "444890"
},
{
"name": "Shell",
"bytes": "3635"
}
],
"symlink_target": ""
} |
import time
import datetime
import traceback
import multiprocessing
import urllib2
import xml.sax
import redis
import random
import pymongo
import re
import requests
import dateutil.parser
import isodate
import urlparse
from django.conf import settings
from django.db import IntegrityError
from django.core.cache import cache
from apps.reader.models import UserSubscription
from apps.rss_feeds.models import Feed, MStory
from apps.rss_feeds.page_importer import PageImporter
from apps.rss_feeds.icon_importer import IconImporter
from apps.push.models import PushSubscription
from apps.statistics.models import MAnalyticsFetcher
# from utils import feedparser
from utils import feedparser
from utils.story_functions import pre_process_story, strip_tags, linkify
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError, utf8encode, cache_bust_url
from BeautifulSoup import BeautifulSoup
from django.utils import feedgenerator
from django.utils.html import linebreaks
from utils import json_functions as json
# from utils.feed_functions import mail_feed_error_to_admin
# Refresh feed code adapted from Feedjack.
# http://feedjack.googlecode.com
FEED_OK, FEED_SAME, FEED_ERRPARSE, FEED_ERRHTTP, FEED_ERREXC = range(5)
def mtime(ttime):
""" datetime auxiliar function.
"""
return datetime.datetime.fromtimestamp(time.mktime(ttime))
class FetchFeed:
def __init__(self, feed_id, options):
self.feed = Feed.get_by_id(feed_id)
self.options = options
self.fpf = None
@timelimit(30)
def fetch(self):
"""
Uses feedparser to download the feed. Will be parsed later.
"""
start = time.time()
identity = self.get_identity()
log_msg = u'%2s ---> [%-30s] ~FYFetching feed (~FB%d~FY), last update: %s' % (identity,
self.feed.title[:30],
self.feed.id,
datetime.datetime.now() - self.feed.last_update)
logging.debug(log_msg)
etag=self.feed.etag
modified = self.feed.last_modified.utctimetuple()[:7] if self.feed.last_modified else None
address = self.feed.feed_address
if (self.options.get('force') or random.random() <= .01):
modified = None
etag = None
address = cache_bust_url(address)
logging.debug(u' ---> [%-30s] ~FBForcing fetch: %s' % (
self.feed.title[:30], address))
elif (not self.feed.fetched_once or not self.feed.known_good):
modified = None
etag = None
USER_AGENT = ('NewsBlur Feed Fetcher - %s subscriber%s - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) '
'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 '
'Safari/534.48.3)' % (
self.feed.num_subscribers,
's' if self.feed.num_subscribers != 1 else '',
self.feed.permalink,
))
if self.options.get('feed_xml'):
logging.debug(u' ---> [%-30s] ~FM~BKFeed has been fat pinged. Ignoring fat: %s' % (
self.feed.title[:30], len(self.options.get('feed_xml'))))
if self.options.get('fpf'):
self.fpf = self.options.get('fpf')
logging.debug(u' ---> [%-30s] ~FM~BKFeed fetched in real-time with fat ping.' % (
self.feed.title[:30]))
return FEED_OK, self.fpf
if 'youtube.com' in address:
try:
youtube_feed = self.fetch_youtube(address)
except (requests.adapters.ConnectionError):
youtube_feed = None
if not youtube_feed:
logging.debug(u' ***> [%-30s] ~FRYouTube fetch failed: %s.' %
(self.feed.title[:30], address))
return FEED_ERRHTTP, None
self.fpf = feedparser.parse(youtube_feed)
if not self.fpf:
try:
self.fpf = feedparser.parse(address,
agent=USER_AGENT,
etag=etag,
modified=modified)
except (TypeError, ValueError, KeyError, EOFError), e:
logging.debug(u' ***> [%-30s] ~FR%s, turning off headers.' %
(self.feed.title[:30], e))
self.fpf = feedparser.parse(address, agent=USER_AGENT)
except (TypeError, ValueError, KeyError, EOFError), e:
logging.debug(u' ***> [%-30s] ~FR%s fetch failed: %s.' %
(self.feed.title[:30], e))
return FEED_ERRHTTP, None
logging.debug(u' ---> [%-30s] ~FYFeed fetch in ~FM%.4ss' % (
self.feed.title[:30], time.time() - start))
return FEED_OK, self.fpf
def get_identity(self):
identity = "X"
current_process = multiprocessing.current_process()
if current_process._identity:
identity = current_process._identity[0]
return identity
def fetch_youtube(self, address):
username = None
channel_id = None
list_id = None
if 'gdata.youtube.com' in address:
try:
username_groups = re.search('gdata.youtube.com/feeds/\w+/users/(\w+)/', address)
if not username_groups:
return
username = username_groups.group(1)
except IndexError:
return
elif 'youtube.com/feeds/videos.xml?user=' in address:
try:
username = urlparse.parse_qs(urlparse.urlparse(address).query)['user'][0]
except IndexError:
return
elif 'youtube.com/feeds/videos.xml?channel_id=' in address:
try:
channel_id = urlparse.parse_qs(urlparse.urlparse(address).query)['channel_id'][0]
except IndexError:
return
elif 'youtube.com/playlist' in address:
try:
list_id = urlparse.parse_qs(urlparse.urlparse(address).query)['list'][0]
except IndexError:
return
if channel_id:
video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?channel_id=%s" % channel_id)
channel_json = requests.get("https://www.googleapis.com/youtube/v3/channels?part=snippet&id=%s&key=%s" %
(channel_id, settings.YOUTUBE_API_KEY))
channel = json.decode(channel_json.content)
try:
username = channel['items'][0]['snippet']['title']
description = channel['items'][0]['snippet']['description']
except IndexError:
return
elif list_id:
playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlists?part=snippet&id=%s&key=%s" %
(list_id, settings.YOUTUBE_API_KEY))
playlist = json.decode(playlist_json.content)
try:
username = playlist['items'][0]['snippet']['title']
description = playlist['items'][0]['snippet']['description']
except IndexError:
return
channel_url = "https://www.youtube.com/playlist?list=%s" % list_id
elif username:
video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?user=%s" % username)
description = "YouTube videos uploaded by %s" % username
else:
return
if list_id:
playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&key=%s" %
(list_id, settings.YOUTUBE_API_KEY))
playlist = json.decode(playlist_json.content)
try:
video_ids = [video['snippet']['resourceId']['videoId'] for video in playlist['items']]
except IndexError:
return
else:
if video_ids_xml.status_code != 200:
return
video_ids_soup = BeautifulSoup(video_ids_xml.content)
channel_url = video_ids_soup.find('author').find('uri').getText()
video_ids = []
for video_id in video_ids_soup.findAll('yt:videoid'):
video_ids.append(video_id.getText())
videos_json = requests.get("https://www.googleapis.com/youtube/v3/videos?part=contentDetails%%2Csnippet&id=%s&key=%s" %
(','.join(video_ids), settings.YOUTUBE_API_KEY))
videos = json.decode(videos_json.content)
data = {}
data['title'] = ("%s's YouTube Videos" % username if 'Uploads' not in username else username)
data['link'] = channel_url
data['description'] = description
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'NewsBlur YouTube API v3 Decrapifier - %s' % settings.NEWSBLUR_URL
data['docs'] = None
data['feed_url'] = address
rss = feedgenerator.Atom1Feed(**data)
for video in videos['items']:
thumbnail = video['snippet']['thumbnails'].get('maxres')
if not thumbnail:
thumbnail = video['snippet']['thumbnails'].get('high')
if not thumbnail:
thumbnail = video['snippet']['thumbnails'].get('medium')
duration_sec = isodate.parse_duration(video['contentDetails']['duration']).seconds
if duration_sec >= 3600:
hours = (duration_sec / 3600)
minutes = (duration_sec - (hours*3600)) / 60
seconds = duration_sec - (hours*3600) - (minutes*60)
duration = "%s:%s:%s" % (hours, '{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
else:
minutes = duration_sec / 60
seconds = duration_sec - (minutes*60)
duration = "%s:%s" % ('{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
content = """<div class="NB-youtube-player"><iframe allowfullscreen="true" src="%s"></iframe></div>
<div class="NB-youtube-stats"><small>
<b>From:</b> <a href="%s">%s</a><br />
<b>Duration:</b> %s<br />
</small></div><hr>
<div class="NB-youtube-description">%s</div>
<img src="%s" style="display:none" />""" % (
("https://www.youtube.com/embed/" + video['id']),
channel_url, username,
duration,
linkify(linebreaks(video['snippet']['description'])),
thumbnail['url'] if thumbnail else "",
)
link = "http://www.youtube.com/watch?v=%s" % video['id']
story_data = {
'title': video['snippet']['title'],
'link': link,
'description': content,
'author_name': username,
'categories': [],
'unique_id': "tag:youtube.com,2008:video:%s" % video['id'],
'pubdate': dateutil.parser.parse(video['snippet']['publishedAt']),
}
rss.add_item(**story_data)
return rss.writeString('utf-8')
class ProcessFeed:
def __init__(self, feed_id, fpf, options):
self.feed_id = feed_id
self.options = options
self.fpf = fpf
def refresh_feed(self):
self.feed = Feed.get_by_id(self.feed_id)
if self.feed_id != self.feed.pk:
logging.debug(" ***> Feed has changed: from %s to %s" % (self.feed_id, self.feed.pk))
self.feed_id = self.feed.pk
def process(self):
""" Downloads and parses a feed.
"""
start = time.time()
self.refresh_feed()
ret_values = dict(new=0, updated=0, same=0, error=0)
if hasattr(self.fpf, 'status'):
if self.options['verbose']:
if self.fpf.bozo and self.fpf.status != 304:
logging.debug(u' ---> [%-30s] ~FRBOZO exception: %s ~SB(%s entries)' % (
self.feed.title[:30],
self.fpf.bozo_exception,
len(self.fpf.entries)))
if self.fpf.status == 304:
self.feed = self.feed.save()
self.feed.save_feed_history(304, "Not modified")
return FEED_SAME, ret_values
# 302: Temporary redirect: ignore
# 301: Permanent redirect: save it (after 20 tries)
if self.fpf.status == 301:
if self.fpf.href.endswith('feedburner.com/atom.xml'):
return FEED_ERRHTTP, ret_values
redirects, non_redirects = self.feed.count_redirects_in_history('feed')
self.feed.save_feed_history(self.fpf.status, "HTTP Redirect (%d to go)" % (20-len(redirects)))
if len(redirects) >= 20 or len(non_redirects) == 0:
self.feed.feed_address = self.fpf.href
if not self.feed.known_good:
self.feed.fetched_once = True
logging.debug(" ---> [%-30s] ~SB~SK~FRFeed is %s'ing. Refetching..." % (self.feed.title[:30], self.fpf.status))
self.feed = self.feed.schedule_feed_fetch_immediately()
if not self.fpf.entries:
self.feed = self.feed.save()
self.feed.save_feed_history(self.fpf.status, "HTTP Redirect")
return FEED_ERRHTTP, ret_values
if self.fpf.status >= 400:
logging.debug(" ---> [%-30s] ~SB~FRHTTP Status code: %s. Checking address..." % (self.feed.title[:30], self.fpf.status))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(self.fpf.status, "HTTP Error")
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRHTTP, ret_values
if not self.fpf.entries:
if self.fpf.bozo and isinstance(self.fpf.bozo_exception, feedparser.NonXMLContentType):
logging.debug(" ---> [%-30s] ~SB~FRFeed is Non-XML. %s entries. Checking address..." % (self.feed.title[:30], len(self.fpf.entries)))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(552, 'Non-xml feed', self.fpf.bozo_exception)
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRPARSE, ret_values
elif self.fpf.bozo and isinstance(self.fpf.bozo_exception, xml.sax._exceptions.SAXException):
logging.debug(" ---> [%-30s] ~SB~FRFeed has SAX/XML parsing issues. %s entries. Checking address..." % (self.feed.title[:30], len(self.fpf.entries)))
fixed_feed = None
if not self.feed.known_good:
fixed_feed, feed = self.feed.check_feed_link_for_feed_address()
if not fixed_feed:
self.feed.save_feed_history(553, 'SAX Exception', self.fpf.bozo_exception)
else:
self.feed = feed
self.feed = self.feed.save()
return FEED_ERRPARSE, ret_values
# the feed has changed (or it is the first time we parse it)
# saving the etag and last_modified fields
original_etag = self.feed.etag
self.feed.etag = self.fpf.get('etag')
if self.feed.etag:
self.feed.etag = self.feed.etag[:255]
# some times this is None (it never should) *sigh*
if self.feed.etag is None:
self.feed.etag = ''
if self.feed.etag != original_etag:
self.feed.save(update_fields=['etag'])
original_last_modified = self.feed.last_modified
try:
self.feed.last_modified = mtime(self.fpf.modified)
except:
self.feed.last_modified = None
pass
if self.feed.last_modified != original_last_modified:
self.feed.save(update_fields=['last_modified'])
self.fpf.entries = self.fpf.entries[:100]
original_title = self.feed.feed_title
if self.fpf.feed.get('title'):
self.feed.feed_title = strip_tags(self.fpf.feed.get('title'))
if self.feed.feed_title != original_title:
self.feed.save(update_fields=['feed_title'])
tagline = self.fpf.feed.get('tagline', self.feed.data.feed_tagline)
if tagline:
original_tagline = self.feed.data.feed_tagline
self.feed.data.feed_tagline = utf8encode(tagline)
if self.feed.data.feed_tagline != original_tagline:
self.feed.data.save(update_fields=['feed_tagline'])
if not self.feed.feed_link_locked:
new_feed_link = self.fpf.feed.get('link') or self.fpf.feed.get('id') or self.feed.feed_link
if new_feed_link != self.feed.feed_link:
logging.debug(" ---> [%-30s] ~SB~FRFeed's page is different: %s to %s" % (self.feed.title[:30], self.feed.feed_link, new_feed_link))
redirects, non_redirects = self.feed.count_redirects_in_history('page')
self.feed.save_page_history(301, "HTTP Redirect (%s to go)" % (20-len(redirects)))
if len(redirects) >= 20 or len(non_redirects) == 0:
self.feed.feed_link = new_feed_link
self.feed.save(update_fields=['feed_link'])
# Determine if stories aren't valid and replace broken guids
guids_seen = set()
permalinks_seen = set()
for entry in self.fpf.entries:
guids_seen.add(entry.get('guid'))
permalinks_seen.add(Feed.get_permalink(entry))
guid_difference = len(guids_seen) != len(self.fpf.entries)
single_guid = len(guids_seen) == 1
replace_guids = single_guid and guid_difference
permalink_difference = len(permalinks_seen) != len(self.fpf.entries)
single_permalink = len(permalinks_seen) == 1
replace_permalinks = single_permalink and permalink_difference
# Compare new stories to existing stories, adding and updating
start_date = datetime.datetime.utcnow()
story_hashes = []
stories = []
for entry in self.fpf.entries:
story = pre_process_story(entry)
if story.get('published') < start_date:
start_date = story.get('published')
if replace_guids:
if replace_permalinks:
new_story_guid = unicode(story.get('published'))
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBReplacing guid (%s) with timestamp: %s' % (
self.feed.title[:30],
story.get('guid'), new_story_guid))
story['guid'] = new_story_guid
else:
new_story_guid = Feed.get_permalink(story)
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBReplacing guid (%s) with permalink: %s' % (
self.feed.title[:30],
story.get('guid'), new_story_guid))
story['guid'] = new_story_guid
story['story_hash'] = MStory.feed_guid_hash_unsaved(self.feed.pk, story.get('guid'))
stories.append(story)
story_hashes.append(story.get('story_hash'))
existing_stories = dict((s.story_hash, s) for s in MStory.objects(
story_hash__in=story_hashes,
# story_date__gte=start_date,
# story_feed_id=self.feed.pk
))
ret_values = self.feed.add_update_stories(stories, existing_stories,
verbose=self.options['verbose'],
updates_off=self.options['updates_off'])
if (hasattr(self.fpf, 'feed') and
hasattr(self.fpf.feed, 'links') and self.fpf.feed.links):
hub_url = None
self_url = self.feed.feed_address
for link in self.fpf.feed.links:
if link['rel'] == 'hub' and not hub_url:
hub_url = link['href']
elif link['rel'] == 'self':
self_url = link['href']
push_expired = False
if self.feed.is_push:
try:
push_expired = self.feed.push.lease_expires < datetime.datetime.now()
except PushSubscription.DoesNotExist:
self.feed.is_push = False
if (hub_url and self_url and not settings.DEBUG and
self.feed.active_subscribers > 0 and
(push_expired or not self.feed.is_push or self.options.get('force'))):
logging.debug(u' ---> [%-30s] ~BB~FW%sSubscribing to PuSH hub: %s' % (
self.feed.title[:30],
"~SKRe-~SN" if push_expired else "", hub_url))
try:
PushSubscription.objects.subscribe(self_url, feed=self.feed, hub=hub_url)
except TimeoutError:
logging.debug(u' ---> [%-30s] ~BB~FW~FRTimed out~FW subscribing to PuSH hub: %s' % (
self.feed.title[:30], hub_url))
elif (self.feed.is_push and
(self.feed.active_subscribers <= 0 or not hub_url)):
logging.debug(u' ---> [%-30s] ~BB~FWTurning off PuSH, no hub found' % (
self.feed.title[:30]))
self.feed.is_push = False
self.feed = self.feed.save()
logging.debug(u' ---> [%-30s] ~FYParsed Feed: %snew=%s~SN~FY %sup=%s~SN same=%s%s~SN %serr=%s~SN~FY total=~SB%s' % (
self.feed.title[:30],
'~FG~SB' if ret_values['new'] else '', ret_values['new'],
'~FY~SB' if ret_values['updated'] else '', ret_values['updated'],
'~SB' if ret_values['same'] else '', ret_values['same'],
'~FR~SB' if ret_values['error'] else '', ret_values['error'],
len(self.fpf.entries)))
self.feed.update_all_statistics(full=bool(ret_values['new']), force=self.options['force'])
if ret_values['new']:
self.feed.trim_feed()
self.feed.expire_redis()
self.feed.save_feed_history(200, "OK")
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBTIME: feed parse in ~FM%.4ss' % (
self.feed.title[:30], time.time() - start))
return FEED_OK, ret_values
class Dispatcher:
def __init__(self, options, num_threads):
self.options = options
self.feed_stats = {
FEED_OK:0,
FEED_SAME:0,
FEED_ERRPARSE:0,
FEED_ERRHTTP:0,
FEED_ERREXC:0}
self.feed_trans = {
FEED_OK:'ok',
FEED_SAME:'unchanged',
FEED_ERRPARSE:'cant_parse',
FEED_ERRHTTP:'http_error',
FEED_ERREXC:'exception'}
self.feed_keys = sorted(self.feed_trans.keys())
self.num_threads = num_threads
self.time_start = datetime.datetime.utcnow()
self.workers = []
def refresh_feed(self, feed_id):
"""Update feed, since it may have changed"""
return Feed.objects.using('default').get(pk=feed_id)
def process_feed_wrapper(self, feed_queue):
delta = None
current_process = multiprocessing.current_process()
identity = "X"
feed = None
if current_process._identity:
identity = current_process._identity[0]
for feed_id in feed_queue:
start_duration = time.time()
feed_fetch_duration = None
feed_process_duration = None
page_duration = None
icon_duration = None
feed_code = None
ret_entries = None
start_time = time.time()
ret_feed = FEED_ERREXC
try:
feed = self.refresh_feed(feed_id)
skip = False
if self.options.get('fake'):
skip = True
weight = "-"
quick = "-"
rand = "-"
elif (self.options.get('quick') and not self.options['force'] and
feed.known_good and feed.fetched_once and not feed.is_push):
weight = feed.stories_last_month * feed.num_subscribers
random_weight = random.randint(1, max(weight, 1))
quick = float(self.options.get('quick', 0))
rand = random.random()
if random_weight < 100 and rand < quick:
skip = True
elif False and feed.feed_address.startswith("http://news.google.com/news"):
skip = True
weight = "-"
quick = "-"
rand = "-"
if skip:
logging.debug(' ---> [%-30s] ~BGFaking fetch, skipping (%s/month, %s subs, %s < %s)...' % (
feed.title[:30],
weight,
feed.num_subscribers,
rand, quick))
continue
ffeed = FetchFeed(feed_id, self.options)
ret_feed, fetched_feed = ffeed.fetch()
feed_fetch_duration = time.time() - start_duration
if ((fetched_feed and ret_feed == FEED_OK) or self.options['force']):
pfeed = ProcessFeed(feed_id, fetched_feed, self.options)
ret_feed, ret_entries = pfeed.process()
feed = pfeed.feed
feed_process_duration = time.time() - start_duration
if (ret_entries and ret_entries['new']) or self.options['force']:
start = time.time()
if not feed.known_good or not feed.fetched_once:
feed.known_good = True
feed.fetched_once = True
feed = feed.save()
if self.options['force'] or random.random() <= 0.02:
logging.debug(' ---> [%-30s] ~FBPerforming feed cleanup...' % (feed.title[:30],))
start_cleanup = time.time()
feed.sync_redis()
logging.debug(' ---> [%-30s] ~FBDone with feed cleanup. Took ~SB%.4s~SN sec.' % (feed.title[:30], time.time() - start_cleanup))
try:
self.count_unreads_for_subscribers(feed)
except TimeoutError:
logging.debug(' ---> [%-30s] Unread count took too long...' % (feed.title[:30],))
if self.options['verbose']:
logging.debug(u' ---> [%-30s] ~FBTIME: unread count in ~FM%.4ss' % (
feed.title[:30], time.time() - start))
except urllib2.HTTPError, e:
logging.debug(' ---> [%-30s] ~FRFeed throws HTTP error: ~SB%s' % (unicode(feed_id)[:30], e.fp.read()))
feed.save_feed_history(e.code, e.msg, e.fp.read())
fetched_feed = None
except Feed.DoesNotExist, e:
logging.debug(' ---> [%-30s] ~FRFeed is now gone...' % (unicode(feed_id)[:30]))
continue
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRFeed fetch timed out...' % (feed.title[:30]))
feed.save_feed_history(505, 'Timeout', e)
feed_code = 505
fetched_feed = None
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
ret_feed = FEED_ERREXC
feed = Feed.get_by_id(getattr(feed, 'pk', feed_id))
if not feed: continue
feed.save_feed_history(500, "Error", tb)
feed_code = 500
fetched_feed = None
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
if not feed_code:
if ret_feed == FEED_OK:
feed_code = 200
elif ret_feed == FEED_SAME:
feed_code = 304
elif ret_feed == FEED_ERRHTTP:
feed_code = 400
if ret_feed == FEED_ERREXC:
feed_code = 500
elif ret_feed == FEED_ERRPARSE:
feed_code = 550
if not feed: continue
feed = self.refresh_feed(feed.pk)
if ((self.options['force']) or
(random.random() > .9) or
(fetched_feed and
feed.feed_link and
feed.has_page and
(ret_feed == FEED_OK or
(ret_feed == FEED_SAME and feed.stories_last_month > 10)))):
logging.debug(u' ---> [%-30s] ~FYFetching page: %s' % (feed.title[:30], feed.feed_link))
page_importer = PageImporter(feed)
try:
page_data = page_importer.fetch_page()
page_duration = time.time() - start_duration
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRPage fetch timed out...' % (feed.title[:30]))
page_data = None
feed.save_page_history(555, 'Timeout', '')
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
feed.save_page_history(550, "Page Error", tb)
fetched_feed = None
page_data = None
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
feed = self.refresh_feed(feed.pk)
logging.debug(u' ---> [%-30s] ~FYFetching icon: %s' % (feed.title[:30], feed.feed_link))
force = self.options['force']
if random.random() > .99:
force = True
icon_importer = IconImporter(feed, page_data=page_data, force=force)
try:
icon_importer.save()
icon_duration = time.time() - start_duration
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~FRIcon fetch timed out...' % (feed.title[:30]))
feed.save_page_history(556, 'Timeout', '')
except Exception, e:
logging.debug('[%d] ! -------------------------' % (feed_id,))
tb = traceback.format_exc()
logging.error(tb)
logging.debug('[%d] ! -------------------------' % (feed_id,))
# feed.save_feed_history(560, "Icon Error", tb)
# mail_feed_error_to_admin(feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
else:
logging.debug(u' ---> [%-30s] ~FBSkipping page fetch: (%s on %s stories) %s' % (feed.title[:30], self.feed_trans[ret_feed], feed.stories_last_month, '' if feed.has_page else ' [HAS NO PAGE]'))
feed = self.refresh_feed(feed.pk)
delta = time.time() - start_time
feed.last_load_time = round(delta)
feed.fetched_once = True
try:
feed = feed.save(update_fields=['last_load_time', 'fetched_once'])
except IntegrityError:
logging.debug(" ---> [%-30s] ~FRIntegrityError on feed: %s" % (feed.title[:30], feed.feed_address,))
if ret_entries and ret_entries['new']:
self.publish_to_subscribers(feed)
done_msg = (u'%2s ---> [%-30s] ~FYProcessed in ~FM~SB%.4ss~FY~SN (~FB%s~FY) [%s]' % (
identity, feed.title[:30], delta,
feed.pk, self.feed_trans[ret_feed],))
logging.debug(done_msg)
total_duration = time.time() - start_duration
MAnalyticsFetcher.add(feed_id=feed.pk, feed_fetch=feed_fetch_duration,
feed_process=feed_process_duration,
page=page_duration, icon=icon_duration,
total=total_duration, feed_code=feed_code)
self.feed_stats[ret_feed] += 1
if len(feed_queue) == 1:
return feed
# time_taken = datetime.datetime.utcnow() - self.time_start
def publish_to_subscribers(self, feed):
try:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
listeners_count = r.publish(str(feed.pk), 'story:new')
if listeners_count:
logging.debug(" ---> [%-30s] ~FMPublished to %s subscribers" % (feed.title[:30], listeners_count))
except redis.ConnectionError:
logging.debug(" ***> [%-30s] ~BMRedis is unavailable for real-time." % (feed.title[:30],))
def count_unreads_for_subscribers(self, feed):
user_subs = UserSubscription.objects.filter(feed=feed,
active=True,
user__profile__last_seen_on__gte=feed.unread_cutoff)\
.order_by('-last_read_date')
if not user_subs.count():
return
for sub in user_subs:
if not sub.needs_unread_recalc:
sub.needs_unread_recalc = True
sub.save()
if self.options['compute_scores']:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
stories = MStory.objects(story_feed_id=feed.pk,
story_date__gte=feed.unread_cutoff)
stories = Feed.format_stories(stories, feed.pk)
story_hashes = r.zrangebyscore('zF:%s' % feed.pk, int(feed.unread_cutoff.strftime('%s')),
int(time.time() + 60*60*24))
missing_story_hashes = set(story_hashes) - set([s['story_hash'] for s in stories])
if missing_story_hashes:
missing_stories = MStory.objects(story_feed_id=feed.pk,
story_hash__in=missing_story_hashes)\
.read_preference(pymongo.ReadPreference.PRIMARY)
missing_stories = Feed.format_stories(missing_stories, feed.pk)
stories = missing_stories + stories
logging.debug(u' ---> [%-30s] ~FYFound ~SB~FC%s(of %s)/%s~FY~SN un-secondaried stories while computing scores' % (feed.title[:30], len(missing_stories), len(missing_story_hashes), len(stories)))
cache.set("S:%s" % feed.pk, stories, 60)
logging.debug(u' ---> [%-30s] ~FYComputing scores: ~SB%s stories~SN with ~SB%s subscribers ~SN(%s/%s/%s)' % (
feed.title[:30], len(stories), user_subs.count(),
feed.num_subscribers, feed.active_subscribers, feed.premium_subscribers))
self.calculate_feed_scores_with_stories(user_subs, stories)
elif self.options.get('mongodb_replication_lag'):
logging.debug(u' ---> [%-30s] ~BR~FYSkipping computing scores: ~SB%s seconds~SN of mongodb lag' % (
feed.title[:30], self.options.get('mongodb_replication_lag')))
@timelimit(10)
def calculate_feed_scores_with_stories(self, user_subs, stories):
for sub in user_subs:
silent = False if self.options['verbose'] >= 2 else True
sub.calculate_feed_scores(silent=silent, stories=stories)
def add_jobs(self, feeds_queue, feeds_count=1):
""" adds a feed processing job to the pool
"""
self.feeds_queue = feeds_queue
self.feeds_count = feeds_count
def run_jobs(self):
if self.options['single_threaded']:
return self.process_feed_wrapper(self.feeds_queue[0])
else:
for i in range(self.num_threads):
feed_queue = self.feeds_queue[i]
self.workers.append(multiprocessing.Process(target=self.process_feed_wrapper,
args=(feed_queue,)))
for i in range(self.num_threads):
self.workers[i].start()
| {
"content_hash": "0ecb49d9b7a44bfdf4f879a7f8b32e79",
"timestamp": "",
"source": "github",
"line_count": 802,
"max_line_length": 212,
"avg_line_length": 48.84538653366584,
"alnum_prop": 0.50298667483535,
"repo_name": "Suninus/NewsBlur",
"id": "1be57bf69e65d885822f1afb2377b4b9f6c8769b",
"size": "39174",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/feed_fetcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4431"
},
{
"name": "C++",
"bytes": "2926"
},
{
"name": "CSS",
"bytes": "674585"
},
{
"name": "CoffeeScript",
"bytes": "6451"
},
{
"name": "HTML",
"bytes": "266332"
},
{
"name": "Java",
"bytes": "700898"
},
{
"name": "JavaScript",
"bytes": "1561448"
},
{
"name": "M",
"bytes": "47696"
},
{
"name": "Nginx",
"bytes": "897"
},
{
"name": "Objective-C",
"bytes": "3716549"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "2385592"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "40018"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
"""
Show Botan module dependencies as a list or graph.
Requires graphviz from pip when graphical output is selected:
https://pypi.python.org/pypi/graphviz
(C) 2015 Simon Warta (Kullo GmbH)
Botan is released under the Simplified BSD License (see license.txt)
"""
# global
import argparse
import copy
import sys
import subprocess
from collections import OrderedDict
import glob
import os
# Assume this script is in botan/src/scripts
botan_root = os.path.join(os.path.dirname(sys.argv[0]), "..", "..")
# locale
sys.path.append(botan_root)
from configure import ModuleInfo
parser = argparse.ArgumentParser(description=
'Show Botan module dependencies. '
'The output is reduced by indirect dependencies, '
'i.e. you must look at the result recursively to get all dependencies.')
parser.add_argument('mode',
choices=["list", "draw"],
help='The output mode')
parser.add_argument('--format',
nargs='?',
choices=["pdf", "png"],
default="pdf",
help='The file format (drawing mode only)')
parser.add_argument('--engine',
nargs='?',
choices=["fdp", "dot"],
default="fdp",
help='The graph engine (drawing mode only)')
parser.add_argument('--all', dest='all', action='store_const',
const=True, default=False,
help='Show all dependencies. Default: direct dependencies only. (list mode only)')
parser.add_argument('--verbose', dest='verbose', action='store_const',
const=True, default=False,
help='Verbose output (default: false)')
args = parser.parse_args()
files = []
files += glob.glob(botan_root + '/src/lib/*/*/*/*/*/*/info.txt')
files += glob.glob(botan_root + '/src/lib/*/*/*/*/*/info.txt')
files += glob.glob(botan_root + '/src/lib/*/*/*/*/info.txt')
files += glob.glob(botan_root + '/src/lib/*/*/*/info.txt')
files += glob.glob(botan_root + '/src/lib/*/*/info.txt')
files += glob.glob(botan_root + '/src/lib/*/info.txt')
files += glob.glob(botan_root + '/src/lib/info.txt')
files.sort()
if len(files) == 0:
print("No info.txt files found.")
sys.exit(1)
modules = []
def dicts(t): return {k: dicts(t[k]) for k in t}
def paths(t, path = [], level=0):
ret = []
for key in t:
ret.append(path + [key])
ret += paths(t[key], path + [key], level+1)
return ret
if args.verbose:
print("Getting dependencies from into.txt files ...")
for filename in files:
(rest, info_txt) = os.path.split(filename)
(rest, modname) = os.path.split(rest)
module = ModuleInfo(filename)
modules.append(module)
if args.verbose:
print(module.basename)
print("\t" + str(set(module.dependencies())))
if args.verbose:
print(str(len(modules)) + " modules:")
names=[m.basename for m in modules]
names.sort()
print(names)
print("")
if args.verbose:
print("resolving dependencies ...")
def cartinality(depdict):
return sum([len(depdict[k]) for k in depdict])
registered_dependencies = dict()
all_dependencies = dict()
direct_dependencies = dict()
for module in modules:
lst = module.dependencies()
registered_dependencies[module.basename] = set(lst) - set([module.basename])
# Get all_dependencies from registered_dependencies
def add_dependency():
for key in all_dependencies:
potentially_new_modules_for_key = None
new_modules_for_key = None
for currently_in in all_dependencies[key]:
if currently_in in all_dependencies:
potentially_new_modules_for_key = all_dependencies[currently_in] - set([key])
if not potentially_new_modules_for_key <= all_dependencies[key]:
new_modules_for_key = potentially_new_modules_for_key.copy()
break
if new_modules_for_key:
all_dependencies[key] |= new_modules_for_key
return
all_dependencies = copy.deepcopy(registered_dependencies)
direct_dependencies = copy.deepcopy(registered_dependencies)
# Sort
all_dependencies = OrderedDict(sorted(all_dependencies.items()))
direct_dependencies = OrderedDict(sorted(direct_dependencies.items()))
#print(direct_dependencies)
last_card = -1
while True:
card = cartinality(all_dependencies)
# print(card)
if card == last_card:
break;
last_card = card
add_dependency()
# Return true iff a depends on b,
# i.e. b is in the dependencies of a
def depends_on(a, b):
if not a in direct_dependencies:
return False
else:
return b in direct_dependencies[a]
def remove_indirect_dependencies():
for mod in direct_dependencies:
for one in direct_dependencies[mod]:
others = direct_dependencies[mod] - set([one])
for other in others:
if depends_on(other, one):
direct_dependencies[mod].remove(one)
return
# Go to next mod
last_card = -1
while True:
card = cartinality(direct_dependencies)
# print(card)
if card == last_card:
break;
last_card = card
remove_indirect_dependencies()
def openfile(f):
if sys.platform.startswith('linux'):
subprocess.call(["xdg-open", f])
else:
os.startfile(f)
if args.verbose:
print("Done resolving dependencies.")
if args.mode == "list":
if args.all:
for key in all_dependencies:
print(key.ljust(17) + " : " + ", ".join(sorted(all_dependencies[key])))
else:
for key in direct_dependencies:
print(key.ljust(17) + " : " + ", ".join(sorted(direct_dependencies[key])))
if args.mode == "draw":
import graphviz as gv
import tempfile
tmpdir = tempfile.mkdtemp(prefix="botan-")
g2 = gv.Digraph(format=args.format, engine=args.engine)
for key in direct_dependencies:
g2.node(key)
for dep in direct_dependencies[key]:
g2.edge(key, dep)
if args.verbose:
print("Rendering graph ...")
filename = g2.render(filename='graph', directory=tmpdir)
if args.verbose:
print("Opening " + filename + " ...")
openfile(filename)
| {
"content_hash": "895922c15154fc788779065a2a86c553",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 102,
"avg_line_length": 29.881516587677726,
"alnum_prop": 0.6177636796193497,
"repo_name": "Rohde-Schwarz-Cybersecurity/botan",
"id": "937626a8640628aa56ff619d80a6c1684032cb21",
"size": "6305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/scripts/show_dependencies.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "543"
},
{
"name": "C",
"bytes": "122862"
},
{
"name": "C++",
"bytes": "5400646"
},
{
"name": "Emacs Lisp",
"bytes": "1445"
},
{
"name": "HTML",
"bytes": "184"
},
{
"name": "Perl",
"bytes": "22545"
},
{
"name": "Python",
"bytes": "292335"
},
{
"name": "Shell",
"bytes": "5564"
},
{
"name": "XS",
"bytes": "16219"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup ( name = 'HoverSpace',
version = '1.0,'
description = '',
long_description = '',
author = 'HoverSpace Developers',
author_email = '',
maintainer = 'Shubham Gupta, Vidhan Jain, Prashant Sharma, Shashwat Shalvi, Mohit Kumar',
maintainer_email = '[email protected], [email protected]',
url = '',
download_url = '',
packagrs = [],
classifiers = [],
platforms = [],
license = '',
zip_safe = False,
)
| {
"content_hash": "27581d6756f1ecc048abedd69e2ef848",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 97,
"avg_line_length": 31.055555555555557,
"alnum_prop": 0.5366726296958855,
"repo_name": "LNM-HoverSpace/HoverSpace",
"id": "c2e3266d6f78fe86a6c74fc52961c80d4e01b5bd",
"size": "559",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20801"
},
{
"name": "HTML",
"bytes": "22234"
},
{
"name": "JavaScript",
"bytes": "4606"
},
{
"name": "Python",
"bytes": "42898"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from config import DEFAULT_CONFIG as CONFIG
from metric_mock import MetricMock
# metrics libraries
import mixpanel
import librato
import Queue
import logging
import os
import platform
import pprint
import threading
import uuid
import copy as _copy
import requests as _requests
import sys
import urllib as _urllib
__ALL__ = [ 'MetricTracker' ]
try:
from .. import minipsutil
TOTAL_PHYMEM = minipsutil.total_memory()
NUM_CPUS = minipsutil.cpu_count()
except ImportError:
TOTAL_PHYMEM = 0
NUM_CPUS = 0
# global objects for producer/consumer for background metrics publishing
METRICS_QUEUE = Queue.Queue(maxsize=100)
METRICS_THREAD = None
SHUTDOWN_MESSAGE = 'SHUTTING_DOWN'
class _MetricsWorkerThread(threading.Thread):
"""Worker Thread for publishing metrics in the background."""
def __init__(self, mode, source):
threading.Thread.__init__(self, name='metrics-worker')
if CONFIG.version.endswith('.gpu'):
self._version = CONFIG.version.split('.gpu')[0]
self._isgpu = True
else:
self._version = CONFIG.version
self._isgpu = False
self._mode = mode
self._source = source
try:
# product key
from .. import product_key
self._product_key = product_key.get_product_key()
except Exception, e:
self._product_key = None
self.queue = METRICS_QUEUE
root_package_name = __import__(__name__.split('.')[0]).__name__
self.logger = logging.getLogger(root_package_name + '.metrics')
self._tracker = None # librato metrics tracker
self._mixpanel = None # Mixpanel metrics tracker
buffer_size = 5
offline_buffer_size = 25
self._sys_info_set = False
self._usable = False
try:
self._metrics_url = CONFIG.metrics_url
self._requests = _requests # support mocking out requests library in unit-tests
if self._mode != 'PROD':
self.logger.info("Using MetricMock instead of real metrics, mode is: %s" % self._mode)
self._tracker = MetricMock()
self._mixpanel = MetricMock()
else:
self._tracker = librato.connect(CONFIG.librato_user, CONFIG.librato_token)
self._mixpanel = mixpanel.Mixpanel(CONFIG.mixpanel_user)
except Exception, e:
self.logger.warning("Unexpected exception connecting to Metrics service, disabling metrics, exception %s" % e)
else:
self._usable = True
self._distinct_id = 'unknown'
self._distinct_id = self._get_distinct_id()
def run(self):
while True:
try:
metric = self.queue.get() # block until something received
if (metric['event_name'] == SHUTDOWN_MESSAGE):
# shutting down
self.queue.task_done()
break
self._track(metric['event_name'], metric['value'], metric['type'], metric['properties'], metric['meta'], metric['send_sys_info'])
self.queue.task_done()
except Exception as e:
pass
def _get_distinct_id(self):
if self._distinct_id == 'unknown':
poss_id = 'unknown'
gldir = os.path.join(os.path.expanduser('~'),'.graphlab')
try:
if not os.path.isdir(gldir):
os.makedirs(gldir)
except:
pass
id_file_path = os.path.join(gldir, "id")
if os.path.isfile(id_file_path):
try:
with open(id_file_path, 'r') as f:
poss_id = f.readline()
except:
return "session-" + str(uuid.uuid4())
else:
# no distinct id found from installation,
# try to create one and write it to the appropriate location
# if not able to write to appropriate location, then create temp one
new_id = str(uuid.uuid4())
try:
with open(id_file_path, "w") as id_file:
id_file.write(new_id)
except:
return "session-" + str(uuid.uuid4())
return new_id
return poss_id.strip()
else:
return self._distinct_id
@staticmethod
def _get_bucket_name_suffix(buckets, value):
"""
Given a list of buckets and a value, generate a suffix for the bucket
name, corresponding to either one of the buckets given, or the largest
bucket with "+" appended.
"""
suffix = None
for bucket in buckets:
if value <= bucket:
suffix = str(bucket)
break
# if we get here and suffix is None, value must be > the largest bucket
if suffix is None:
suffix = '%d+' % buckets[-1]
return suffix
@staticmethod
def _bucketize_mixpanel(event_name, value):
"""
Take events that we would like to bucketize and bucketize them before sending to mixpanel
@param event_name current event name, used to assess if bucketization is required
@param value value used to decide which bucket for event
@return event_name if updated then will have bucket appended as suffix, otherwise original returned
"""
if value == 1:
return event_name
bucket_events = {
'col.size': [ 5, 10, 20 ],
'row.size': [ 100000, 1000000, 10000000, 100000000 ],
'duration.secs': [ 300, 1800, 3600, 7200 ],
'duration.ms': [ 10, 100, 1000, 10000, 100000 ]
}
for (event_suffix, buckets) in bucket_events.iteritems():
if event_name.endswith(event_suffix):
# if the suffix matches one we expect, bucketize using the buckets defined above
return '%s.%s' % (event_name, _MetricsWorkerThread._get_bucket_name_suffix(buckets, value))
# if there was no suffix match, just use the original event name
return event_name
def _set_sys_info(self):
# Don't do this if system info has been set
if self._sys_info_set:
return
self._sys_info = {}
# Get OS-specific info
self._sys_info['system'] = platform.system()
if self._sys_info['system'] == 'Linux':
self._sys_info['os_version'] = self._tup_to_flat_str(platform.linux_distribution())
self._sys_info['libc_version'] = self._tup_to_flat_str(platform.libc_ver())
elif self._sys_info['system'] == 'Darwin':
self._sys_info['os_version'] = self._tup_to_flat_str(platform.mac_ver())
elif self._sys_info['system'] == 'Windows':
self._sys_info['os_version'] = self._tup_to_flat_str(platform.win32_ver())
elif self._sys_info['system'] == 'Java':
self._sys_info['os_version'] = self._tup_to_flat_str(platform.java_ver())
# Python specific stuff
self._sys_info['python_implementation'] = platform.python_implementation()
self._sys_info['python_version'] = platform.python_version()
self._sys_info['python_build'] = self._tup_to_flat_str(platform.python_build())
self._sys_info['python_executable'] = sys.executable
# Dato specific stuff
self._sys_info['dato_launcher'] = 'DATO_LAUNCHER' in os.environ
# Get architecture info
self._sys_info['architecture'] = self._tup_to_flat_str(platform.architecture())
self._sys_info['platform'] = platform.platform()
self._sys_info['num_cpus'] = NUM_CPUS
# Get RAM size
self._sys_info['total_mem'] = TOTAL_PHYMEM
self._sys_info_set = True
def _print_sys_info(self):
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(self._sys_info)
def _tup_to_flat_str(self, tup):
tmp_list = []
for t in tup:
if isinstance(t, tuple):
tmp_str =self._tup_to_flat_str(t)
tmp_list.append(tmp_str)
elif isinstance(t, str):
tmp_list.append(t)
else:
# UNEXPECTED! Just don't crash
try:
tmp_list.append(str(t))
except:
pass
return " ".join(tmp_list)
def _send_mixpanel(self, event_name, value, properties, meta):
# Only send 'engine-started' events to Mixpanel. All other events are not sent to Mixpanel.
if 'engine-started' in event_name:
try:
# since mixpanel cannot send sizes or numbers, just tracks events, bucketize these here
if value != 1:
event_name = self._bucketize_mixpanel(event_name, value)
properties['value'] = value
properties['source'] = self._source
self._mixpanel.track(self._distinct_id, event_name, properties=properties, meta=meta)
except Exception as e:
pass
def _track(self, event_name, value=1, type="gauge", properties={}, meta={}, send_sys_info=False):
"""
Internal method to actually send metrics, expected to be called from background thread only.
"""
if not self._usable:
return
the_properties = {}
if send_sys_info:
if not self._sys_info_set:
self._set_sys_info()
the_properties.update(self._sys_info)
the_properties.update(properties)
try:
# librato
self._tracker.submit(name=event_name, value=value, type="gauge", source=self._source, attributes=the_properties)
except Exception as e:
pass
self._send_mixpanel(event_name=event_name, value=value, properties=the_properties, meta=meta)
try:
# homebrew metrics - cloudfront
if self._metrics_url != '':
cloudfront_props = {}
props = _copy.deepcopy(the_properties)
props.update(meta)
cloudfront_props['event_name'] = event_name
cloudfront_props['value'] = value
cloudfront_props['distinct_id'] = self._distinct_id
cloudfront_props['version'] = self._version
cloudfront_props['isgpu'] = self._isgpu
cloudfront_props['properties'] = _urllib.quote_plus(str(props))
# if product key is not set, then try to get it now when submitting
if not self._product_key:
try:
# product key
from .. import product_key
self._product_key = product_key.get_product_key()
except Exception, e:
self._product_key = 'Unknown'
pass
cloudfront_props['product_key'] = self._product_key
# self.logger.debug("SENDING '%s' to %s" % (cloudfront_props, self._metrics_url))
logging.getLogger('requests').setLevel(logging.CRITICAL)
self._requests.get(self._metrics_url, params=cloudfront_props)
except Exception as e:
pass
class MetricTracker:
def __init__(self, mode='UNIT', background_thread=True):
# setup logging
root_package_name = __import__(__name__.split('.')[0]).__name__
self.logger = logging.getLogger(root_package_name + '.metrics')
self._mode = mode
self._queue = METRICS_QUEUE
self._source = ("%s-%s" % (self._mode, CONFIG.version))
self.logger.debug("Running with metric source: %s" % self._source)
# background thread for metrics
self._thread = None
if background_thread:
self._start_queue_thread()
def __del__(self):
try:
self._stop_queue_thread()
except:
# Lot of strange exceptions can happen when destructing, not really anything we can do...
pass
def _stop_queue_thread(self):
# send the shutting down message, wait for thread to exit
if self._thread is not None:
self.track(SHUTDOWN_MESSAGE)
self._thread.join(2.0)
def track(self, event_name, value=1, type="gauge", properties={}, meta={}, send_sys_info=False):
"""
Publishes event / metric to metrics providers.
This method is a facade / proxy, queuing up this metric for a background thread to process.
"""
if self._mode != 'PROD' and (not (isinstance(value, int) or isinstance(value, float))):
raise Exception("Metrics attempted with value being not a number, unsupported.")
try:
item = dict(event_name=event_name, value=value, type=type, properties=properties, meta=meta, send_sys_info=send_sys_info)
self._queue.put_nowait(item) # don't wait if Queue is full, just silently ignore
except Queue.Full:
if not self._thread or not self._thread.is_alive():
self.logger.debug("Queue is full and background thread is no longer alive, trying to restart")
self._restart_queue_thread()
else:
self.logger.debug("Queue is full, doing nothing.")
except Exception as e:
self.logger.debug("Unexpected exception in queueing metrics, %s" % e)
def _start_queue_thread(self):
global METRICS_THREAD
if (self._thread is None):
self.logger.debug("Starting background thread")
self._thread = _MetricsWorkerThread(self._mode, self._source)
METRICS_THREAD = self._thread
self._thread.daemon = True
self._thread.start()
def _restart_queue_thread(self):
global METRICS_THREAD
if (self._thread is not None and self._thread.is_alive()):
self._stop_queue_thread()
METRICS_THREAD = None
del self._thread
self._thread = None
self._start_queue_thread()
| {
"content_hash": "19392f5d976eff1448bbcb23c807bdca",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 137,
"avg_line_length": 32.464646464646464,
"alnum_prop": 0.6369788425637835,
"repo_name": "nkhuyu/SFrame",
"id": "f66341e6757fa515f909c99fab01d7571c8f7f92",
"size": "12856",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "oss_src/unity/python/sframe/util/metric_tracker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "144591"
},
{
"name": "C++",
"bytes": "11412408"
},
{
"name": "CMake",
"bytes": "102271"
},
{
"name": "CSS",
"bytes": "127000"
},
{
"name": "HTML",
"bytes": "24575"
},
{
"name": "Hack",
"bytes": "277"
},
{
"name": "JavaScript",
"bytes": "20909"
},
{
"name": "Makefile",
"bytes": "9614"
},
{
"name": "Perl",
"bytes": "9663"
},
{
"name": "Python",
"bytes": "1947961"
},
{
"name": "R",
"bytes": "86286"
},
{
"name": "Scala",
"bytes": "5232"
},
{
"name": "Shell",
"bytes": "48586"
},
{
"name": "Smarty",
"bytes": "966"
},
{
"name": "XSLT",
"bytes": "74068"
}
],
"symlink_target": ""
} |
"""
Call NRL MSISE-00 using f2py from Python
Michael Hirsch, Ph.D.
Original fortran code from
http://nssdcftp.gsfc.nasa.gov/models/atmospheric/msis/nrlmsise00/
"""
from __future__ import annotations
import typing as T
import os
import logging
import importlib.resources
from datetime import datetime, date
import xarray
import numpy as np
import subprocess
import shutil
from .timeutils import todatetime
import geomagindices as gi
species = ["He", "O", "N2", "O2", "Ar", "Total", "H", "N", "AnomalousO"]
ttypes = ["Texo", "Tn"]
first = True
def build():
"""
attempt to build using CMake
"""
cmake = shutil.which("cmake")
if not cmake:
raise FileNotFoundError("CMake not available")
with importlib.resources.path(__package__, "CMakeLists.txt") as f:
s = f.parent
b = s / "build"
g = []
if os.name == "nt" and not os.environ.get("CMAKE_GENERATOR"):
g = ["-G", "MinGW Makefiles"]
subprocess.check_call([cmake, f"-S{s}", f"-B{b}"] + g)
subprocess.check_call([cmake, "--build", str(b), "--parallel"])
def run(
time: datetime,
altkm: float,
glat: float,
glon: float,
indices: dict[str, T.Any] = None,
) -> xarray.Dataset:
"""
loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran.
"""
glat = np.atleast_2d(glat)
glon = np.atleast_2d(glon) # has to be here
# %% altitude 1-D
if glat.size == 1 and glon.size == 1 and isinstance(time, (str, date, datetime, np.datetime64)):
atmos = rungtd1d(time, altkm, glat.squeeze()[()], glon.squeeze()[()], indices)
# %% lat/lon grid at 1 altitude
else:
atmos = loopalt_gtd(time, glat, glon, altkm, indices)
return atmos
def loopalt_gtd(
time: datetime,
glat: float | np.ndarray,
glon: float | np.ndarray,
altkm: float,
indices: dict[str, T.Any] = None,
) -> xarray.Dataset:
"""
loop over location and time
time: datetime or numpy.datetime64 or list of datetime or ndarray of datetime
glat: float or 2-D ndarray
glon: float or 2-D ndarray
altkm: float or list or 1-D ndarray
"""
glat = np.atleast_2d(glat)
glon = np.atleast_2d(glon)
assert glat.ndim == glon.ndim == 2
times = np.atleast_1d(time) # type: ignore
assert times.ndim == 1
atmos = xarray.Dataset()
for t in times:
print("computing", t)
for i in range(glat.shape[0]):
for j in range(glat.shape[1]):
# atmos = xarray.concat((atmos, rungtd1d(t, altkm, glat[i,j], glon[i,j])),
# data_vars='minimal',coords='minimal',dim='lon')
atm = rungtd1d(t, altkm, glat[i, j], glon[i, j], indices)
atmos = xarray.merge((atmos, atm))
atmos.attrs = atm.attrs
return atmos
def rungtd1d(
time: datetime, altkm: float, glat: float, glon: float, indices: dict[str, T.Any] = None
) -> xarray.Dataset:
"""
This is the "atomic" function looped by other functions
"""
time = todatetime(time)
# %% get solar parameters for date
if not indices:
indices = gi.get_indices(time, smoothdays=81).squeeze().to_dict()
assert isinstance(indices, dict)
# %% dimensions
altkm = np.atleast_1d(altkm)
if altkm.ndim != 1:
raise ValueError("altitude read incorrectly")
if not isinstance(glon, (int, float, np.int32, np.int64)):
raise TypeError("single longitude only")
if not isinstance(glat, (int, float, np.int32, np.int64)):
raise TypeError("single latitude only")
# %%
doy = time.strftime("%j")
altkm = np.atleast_1d(altkm)
# %%
dens = np.empty((altkm.size, len(species)))
temp = np.empty((altkm.size, len(ttypes)))
# %% build on run
exe_name = "msise00_driver"
if os.name == "nt":
exe_name += ".exe"
# check inputs for error, especially unavailable indices
if not np.isfinite(glat).all():
raise ValueError("glat is not finite.")
if not np.isfinite(glon).all():
raise ValueError("glon is not finite.")
f107s = indices["f107s"]
if not np.isfinite(f107s):
raise ValueError("f107s is not finite.")
f107s = indices["f107s"]
if not np.isfinite(f107s):
raise ValueError("f107s is not finite.")
f107 = indices["f107"]
if not np.isfinite(f107):
raise ValueError("f107 is not finite.")
Ap = indices["Ap"]
if not np.isfinite(Ap):
raise ValueError("Ap is not finite.")
try:
with importlib.resources.path(__package__, exe_name) as exe:
pass
except FileNotFoundError:
build()
with importlib.resources.path(__package__, exe_name) as exe:
for i, a in enumerate(altkm):
cmd = [
str(exe),
doy,
str(time.hour),
str(time.minute),
str(time.second),
str(glat),
str(glon),
str(f107s),
str(f107),
str(Ap),
str(a),
]
logging.info(" ".join(cmd))
ret = subprocess.check_output(cmd, text=True)
# different compilers throw in extra \n
raw = list(map(float, ret.split()))
if not len(raw) == 9 + 2:
raise ValueError(ret)
dens[i, :] = raw[:9]
temp[i, :] = raw[9:]
dsf = {
k: (("time", "alt_km", "lat", "lon"), v[None, :, None, None])
for (k, v) in zip(species, dens.T)
}
dsf.update(
{
"Tn": (("time", "alt_km", "lat", "lon"), temp[:, 1][None, :, None, None]),
"Texo": (("time", "alt_km", "lat", "lon"), temp[:, 0][None, :, None, None]),
}
)
atmos = xarray.Dataset(
dsf, # type: ignore
coords={"time": [time], "alt_km": altkm, "lat": [glat], "lon": [glon]},
attrs={
"species": species,
"f107s": indices["f107s"],
"f107": indices["f107"],
"Ap": indices["Ap"],
},
)
return atmos
| {
"content_hash": "42a259802620c0f77370b8e1d1571727",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 100,
"avg_line_length": 28.19178082191781,
"alnum_prop": 0.5573372206025268,
"repo_name": "scienceopen/msise00",
"id": "97b1bad899e576253e11305cb2b5d74c8d8794e9",
"size": "6174",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/msise00/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "255159"
},
{
"name": "Makefile",
"bytes": "673"
},
{
"name": "Python",
"bytes": "12600"
}
],
"symlink_target": ""
} |
"""Tests for tensor2tensor.data_generators.algorithmic_math."""
# TODO(rsepassi): This test is flaky. Disable, remove, or update.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import sympy
from tensor2tensor.data_generators import algorithmic_math
import tensorflow.compat.v1 as tf
class AlgorithmicMathTest(tf.test.TestCase):
def testAlgebraInverse(self):
dataset_objects = algorithmic_math.math_dataset_init(26)
counter = 0
for d in algorithmic_math.algebra_inverse(26, 0, 3, 10):
counter += 1
decoded_input = dataset_objects.int_decoder(d["inputs"])
solve_var, expression = decoded_input.split(":")
lhs, rhs = expression.split("=")
# Solve for the solve-var.
result = sympy.solve("%s-(%s)" % (lhs, rhs), solve_var)
target_expression = dataset_objects.int_decoder(d["targets"])
# Check that the target and sympy's solutions are equivalent.
self.assertEqual(
0, sympy.simplify(str(result[0]) + "-(%s)" % target_expression))
self.assertEqual(counter, 10)
def testAlgebraSimplify(self):
dataset_objects = algorithmic_math.math_dataset_init(8, digits=5)
counter = 0
for d in algorithmic_math.algebra_simplify(8, 0, 3, 10):
counter += 1
expression = dataset_objects.int_decoder(d["inputs"])
target = dataset_objects.int_decoder(d["targets"])
# Check that the input and output are equivalent expressions.
self.assertEqual(0, sympy.simplify("%s-(%s)" % (expression, target)))
self.assertEqual(counter, 10)
def testCalculusIntegrate(self):
dataset_objects = algorithmic_math.math_dataset_init(
8, digits=5, functions={"log": "L"})
counter = 0
for d in algorithmic_math.calculus_integrate(8, 0, 3, 10):
counter += 1
decoded_input = dataset_objects.int_decoder(d["inputs"])
var, expression = decoded_input.split(":")
target = dataset_objects.int_decoder(d["targets"])
for fn_name, fn_char in six.iteritems(dataset_objects.functions):
target = target.replace(fn_char, fn_name)
# Take the derivative of the target.
derivative = str(sympy.diff(target, var))
# Check that the derivative of the integral equals the input.
self.assertEqual(0, sympy.simplify("%s-(%s)" % (expression, derivative)))
self.assertEqual(counter, 10)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "8b69ce11309c6df20b97fc6ef236ead6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 36.23529411764706,
"alnum_prop": 0.6728896103896104,
"repo_name": "tensorflow/tensor2tensor",
"id": "b9d89a575a267574026767a1de6cc4612de3d39d",
"size": "3070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/data_generators/algorithmic_math_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "32015"
},
{
"name": "HTML",
"bytes": "34684"
},
{
"name": "JavaScript",
"bytes": "78408"
},
{
"name": "Jupyter Notebook",
"bytes": "2859453"
},
{
"name": "Python",
"bytes": "5109255"
},
{
"name": "Shell",
"bytes": "11941"
}
],
"symlink_target": ""
} |
from piston.handler import BaseHandler, rc
from systems.models import SystemRack
import re
try:
import json
except:
from django.utils import simplejson as json
from django.test.client import Client
from settings import API_ACCESS
class SystemRackHandler(BaseHandler):
#allowed_methods = API_ACCESS
allowed_methods = ('GET')
#exclude = ('name','id', ('location', ('name', 'id') ) )
fields = (
('id'),
('name'),
('site', ('id', 'full_name')),
)
model = SystemRack
def create(self, request, network_adapter_id=None):
n = NetworkAdapter()
if 'system_id' in request.POST:
n.system_id = request.POST['system_id']
if 'mac_address' in request.POST:
n.mac_address = request.POST['mac_address']
if 'ip_address' in request.POST:
n.ip_address = request.POST['ip_address']
if 'adapter_name' in request.POST:
n.adapter_name = request.POST['adapter_name']
if 'option_file_name' in request.POST:
n.option_file_name = request.POST['option_file_name']
if 'option_domain_name' in request.POST:
n.option_domain_name = request.POST['option_domain_name']
if 'option_host_name' in request.POST:
n.option_domain_name = request.POST['option_host_name']
if 'dhcp_scope' in request.POST:
try:
n.dhcp_scope = DHCP.objects.get(scope_name=request.POST['dhcp_scope'])
except:
pass
try:
n.save()
resp = rc.ALL_OK
resp.write('json = {"id":%i}' % (n.id))
except:
resp = rc.NOT_FOUND
resp.write('Unable to Create Host')
return resp
def read(self, request, system_rack_id=None):
base = SystemRack.objects
if system_rack_id:
try:
return base.order_by('id').get(id=system_rack_id)
except:
resp = rc.NOT_FOUND
resp.write('Unable to find System Rack')
return resp
else:
return base.order_by('id').all()
def update(self, request, network_adapter_id=None):
if request.method == 'PUT':
try:
n = NetworkAdapter.objects.get(pk=network_adapter_id)
if 'system_id' in request.POST:
n.system_id = request.POST['system_id']
if 'mac_address' in request.POST:
n.mac_address = request.POST['mac_address']
if 'ip_address' in request.POST:
n.ip_address = request.POST['ip_address']
if 'adapter_name' in request.POST:
n.adapter_name = request.POST['adapter_name']
if 'option_file_name' in request.POST:
n.file_name = request.POST['option_file_name']
else:
n.file_name = ''
if 'option_domain_name' in request.POST:
n.option_domain_name = request.POST['option_domain_name']
else:
n.option_domain_name = ''
if 'option_host_name' in request.POST:
n.option_host_name = request.POST['option_host_name']
else:
n.option_host_name = ''
if 'dhcp_scope' in request.POST:
try:
n.dhcp_scope = DHCP.objects.get(scope_name=request.POST['dhcp_scope'])
except:
pass
n.save()
resp = rc.ALL_OK
resp.write('json = {"id":%i, "mac_address":"%s", "ip_address":"%s", "dhcp_scope":"%s", "system_id":"%s","option_file_name":"%s"}' % (n.id, n.mac_address, n.ip_address, n.dhcp_scope, n.system_id, n.file_name))
except:
resp = rc.NOT_FOUND
else:
resp = rc.NOT_FOUND
return resp
def delete(self, request, network_adapter_id=None):
try:
n = NetworkAdapter.objects.get(id=network_adapter_id)
n.delete()
network_adapter_id = str(network_adapter_id)
resp = rc.ALL_OK
resp.write('json = {"id":%s}' % (network_adapter_id))
except:
resp = rc.NOT_FOUND
return resp
| {
"content_hash": "cb4cb8cde56eb57c721154ed7944e427",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 224,
"avg_line_length": 38.417391304347824,
"alnum_prop": 0.5135808057944772,
"repo_name": "rtucker-mozilla/inventory",
"id": "93bde49a638cca44a39a29ec6d83b20bc0cfcd76",
"size": "4418",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api_v2/system_rack_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5104"
},
{
"name": "CSS",
"bytes": "362837"
},
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "HTML",
"bytes": "1195738"
},
{
"name": "JavaScript",
"bytes": "1530665"
},
{
"name": "Makefile",
"bytes": "14421"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Python",
"bytes": "3642241"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
} |
"""Echo WebSocket handler for real time collaboration with Yjs"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import uuid
import time
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketHandler
from enum import IntEnum
## The y-protocol defines messages types that just need to be propagated to all other peers.
## Here, we define some additional messageTypes that the server can interpret.
## Messages that the server can't interpret should be broadcasted to all other clients.
class ServerMessageType(IntEnum):
# The client is asking for a lock. Should return a lock-identifier if one is available.
ACQUIRE_LOCK = 127
# The client is asking to release a lock to make it available to other users again.
RELEASE_LOCK = 126
# The client is asking to retrieve the initial state of the Yjs document. Return an empty buffer when nothing is available.
REQUEST_INITIALIZED_CONTENT = 125
# The client retrieved an empty "initial content" and generated the initial state of the document after acquiring a lock. Store this.
PUT_INITIALIZED_CONTENT = 124
# The client moved the document to a different location. After receiving this message, we make the current document available under a different url.
# The other clients are automatically notified of this change because the path is shared through the Yjs document as well.
RENAME_SESSION = 123
class YjsRoom:
def __init__(self):
self.lock = None
self.timeout = None
self.lock_holder = None
self.clients = {}
self.content = bytes([])
class YjsEchoWebSocket(WebSocketHandler):
rooms = {}
# Override max_message size to 1GB
@property
def max_message_size(self):
return 1024 * 1024 * 1024
def open(self, guid):
#print("[YJSEchoWS]: open", guid)
cls = self.__class__
self.id = str(uuid.uuid4())
self.room_id = guid
room = cls.rooms.get(self.room_id)
if room is None:
room = YjsRoom()
cls.rooms[self.room_id] = room
room.clients[self.id] = ( IOLoop.current(), self.hook_send_message, self )
# Send SyncStep1 message (based on y-protocols)
self.write_message(bytes([0, 0, 1, 0]), binary=True)
def on_message(self, message):
#print("[YJSEchoWS]: message, ", message)
cls = self.__class__
room_id = self.room_id
room = cls.rooms.get(room_id)
if message[0] == ServerMessageType.ACQUIRE_LOCK:
now = int(time.time())
if room.lock is None or now - room.timeout > (10 * len(room.clients)) : # no lock or timeout
room.lock = now
room.timeout = now
room.lock_holder = self.id
# print('Acquired new lock: ', room.lock)
# return acquired lock
self.write_message(bytes([ServerMessageType.ACQUIRE_LOCK]) + room.lock.to_bytes(4, byteorder = 'little'), binary=True)
elif room.lock_holder == self.id :
# print('Update lock: ', room.timeout)
room.timeout = now
elif message[0] == ServerMessageType.RELEASE_LOCK:
releasedLock = int.from_bytes(message[1:], byteorder = 'little')
# print("trying release lock: ", releasedLock)
if room.lock == releasedLock:
# print('released lock: ', room.lock)
room.lock = None
room.timeout = None
room.lock_holder = None
elif message[0] == ServerMessageType.REQUEST_INITIALIZED_CONTENT:
# print("client requested initial content")
self.write_message(bytes([ServerMessageType.REQUEST_INITIALIZED_CONTENT]) + room.content, binary=True)
elif message[0] == ServerMessageType.PUT_INITIALIZED_CONTENT:
# print("client put initialized content")
room.content = message[1:]
elif message[0] == ServerMessageType.RENAME_SESSION:
# We move the room to a different entry and also change the room_id property of each connected client
new_room_id = message[1:].decode("utf-8")
for client_id, (loop, hook_send_message, client) in room.clients.items() :
client.room_id = new_room_id
cls.rooms.pop(room_id)
cls.rooms[new_room_id] = room
# print("renamed room to " + new_room_id + ". Old room name was " + room_id)
elif room:
for client_id, (loop, hook_send_message, client) in room.clients.items() :
if self.id != client_id :
loop.add_callback(hook_send_message, message)
def on_close(self):
# print("[YJSEchoWS]: close")
cls = self.__class__
room = cls.rooms.get(self.room_id)
room.clients.pop(self.id)
if len(room.clients) == 0 :
cls.rooms.pop(self.room_id)
# print("[YJSEchoWS]: close room " + self.room_id)
return True
def check_origin(self, origin):
#print("[YJSEchoWS]: check origin")
return True
def hook_send_message(self, msg):
self.write_message(msg, binary=True)
| {
"content_hash": "644b71b8b8f5b037c98aa00ca397e000",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 152,
"avg_line_length": 43.52892561983471,
"alnum_prop": 0.6217960888551357,
"repo_name": "jupyter/jupyterlab",
"id": "a9bbd7b90a1c14873029dcdb7c38f470f61e9556",
"size": "5267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jupyterlab/handlers/yjs_echo_ws.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7475"
},
{
"name": "CSS",
"bytes": "94068"
},
{
"name": "HTML",
"bytes": "1493"
},
{
"name": "JavaScript",
"bytes": "9240"
},
{
"name": "Makefile",
"bytes": "7654"
},
{
"name": "Python",
"bytes": "74649"
},
{
"name": "Shell",
"bytes": "2344"
},
{
"name": "TypeScript",
"bytes": "1090669"
}
],
"symlink_target": ""
} |
from diasporapy.engine import handlers
import firenado.core
class PodComponent(firenado.core.TornadoComponent):
def get_handlers(self):
return [
(r'/', handlers.IndexHandler),
(r'/stream', handlers.StreamHandler),
]
def install(self):
import diasporapy.pod.models as models
from firenado.util.sqlalchemy_util import Base
import uuid
import datetime
print 'Installing Diasporapy Pod...'
print 'Creating Pod ...'
print self.application.get_data_source('pod').get_connection()
engine = self.application.get_data_source('pod').get_connection()['engine']
engine.echo = True
# Dropping all
# TODO Not to drop all if something is installed right?
Base.metadata.drop_all(engine)
# Creating database
Base.metadata.create_all(engine)
#session = self.__get_connection_handler().get_connection()['session']
#session.commit()
#print 'Colony %s created at %s' % (
#base_colony.name, base_colony.created_at)
if __name__ == '__main__':
import firenado.conf
from firenado.core import TornadoApplication
app = TornadoApplication()
app.components['pod'].install() | {
"content_hash": "9bd869121063cdda9cd9f7bb8e354534",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 83,
"avg_line_length": 27.04255319148936,
"alnum_prop": 0.6270653029110936,
"repo_name": "candango/socialspider",
"id": "5181f605d6ac42c018a3d9b1f40aa02373e8a72a",
"size": "1933",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "diasporapy/engine/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3151"
},
{
"name": "HTML",
"bytes": "13743"
},
{
"name": "JavaScript",
"bytes": "5895"
},
{
"name": "Python",
"bytes": "91435"
}
],
"symlink_target": ""
} |
"""Implementation of Inspector abstraction for Hyper-V"""
import collections
import functools
import sys
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_utils import units
import six
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
def convert_exceptions(function, exception_map):
expected_exceptions = tuple(exception_map.keys())
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except expected_exceptions as ex:
# exception might be a subclass of an expected exception.
for expected in expected_exceptions:
if isinstance(ex, expected):
raised_exception = exception_map[expected]
break
exc_info = sys.exc_info()
# NOTE(claudiub): Python 3 raises the exception object given as
# the second argument in six.reraise.
# The original message will be maintained by passing the original
# exception.
exc = raised_exception(six.text_type(exc_info[1]))
six.reraise(raised_exception, exc, exc_info[2])
return wrapper
def decorate_all_methods(decorator, *args, **kwargs):
def decorate(cls):
for attr in cls.__dict__:
class_member = getattr(cls, attr)
if callable(class_member):
setattr(cls, attr, decorator(class_member, *args, **kwargs))
return cls
return decorate
exception_conversion_map = collections.OrderedDict([
# NOTE(claudiub): order should be from the most specialized exception type
# to the most generic exception type.
# (expected_exception, converted_exception)
(os_win_exc.NotFound, virt_inspector.InstanceNotFoundException),
(os_win_exc.OSWinException, virt_inspector.InspectorException),
])
# NOTE(claudiub): the purpose of the decorator below is to prevent any
# os_win exceptions (subclasses of OSWinException) to leak outside of the
# HyperVInspector.
@decorate_all_methods(convert_exceptions, exception_conversion_map)
class HyperVInspector(virt_inspector.Inspector):
def __init__(self):
super(HyperVInspector, self).__init__()
self._utils = utilsfactory.get_metricsutils()
self._host_max_cpu_clock = self._compute_host_max_cpu_clock()
def _compute_host_max_cpu_clock(self):
hostutils = utilsfactory.get_hostutils()
# host's number of CPUs and CPU clock speed will not change.
cpu_info = hostutils.get_cpus_info()
host_cpu_count = len(cpu_info)
host_cpu_clock = cpu_info[0]['MaxClockSpeed']
return float(host_cpu_clock * host_cpu_count)
def inspect_cpus(self, instance):
instance_name = util.instance_name(instance)
(cpu_clock_used,
cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name)
cpu_percent_used = cpu_clock_used / self._host_max_cpu_clock
# Nanoseconds
cpu_time = (int(uptime * cpu_percent_used) * units.k)
return virt_inspector.CPUStats(number=cpu_count, time=cpu_time)
def inspect_memory_usage(self, instance, duration=None):
instance_name = util.instance_name(instance)
usage = self._utils.get_memory_metrics(instance_name)
return virt_inspector.MemoryUsageStats(usage=usage)
def inspect_vnics(self, instance):
instance_name = util.instance_name(instance)
for vnic_metrics in self._utils.get_vnic_metrics(instance_name):
interface = virt_inspector.Interface(
name=vnic_metrics["element_name"],
mac=vnic_metrics["address"],
fref=None,
parameters=None)
stats = virt_inspector.InterfaceStats(
rx_bytes=vnic_metrics['rx_mb'] * units.Mi,
rx_packets=0,
tx_bytes=vnic_metrics['tx_mb'] * units.Mi,
tx_packets=0)
yield (interface, stats)
def inspect_disks(self, instance):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_metrics(instance_name):
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
stats = virt_inspector.DiskStats(
read_requests=0,
# Return bytes
read_bytes=disk_metrics['read_mb'] * units.Mi,
write_requests=0,
write_bytes=disk_metrics['write_mb'] * units.Mi,
errors=0)
yield (disk, stats)
def inspect_disk_latency(self, instance):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_latency_metrics(
instance_name):
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
stats = virt_inspector.DiskLatencyStats(
disk_latency=disk_metrics['disk_latency'])
yield (disk, stats)
def inspect_disk_iops(self, instance):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_iops_count(instance_name):
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
stats = virt_inspector.DiskIOPSStats(
iops_count=disk_metrics['iops_count'])
yield (disk, stats)
| {
"content_hash": "ee4582d70ca62624773a29af93846b70",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 37.35616438356164,
"alnum_prop": 0.6338467180051338,
"repo_name": "idegtiarov/ceilometer",
"id": "38409295405d7fc566986d2d06ce949283944553",
"size": "6042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/compute/virt/hyperv/inspector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2506039"
},
{
"name": "Shell",
"bytes": "33383"
}
],
"symlink_target": ""
} |
import os
import csv
import bz2
import gzip
import re
import urllib2
import socket
from apache_reader import ApacheLogReader
# dictionary key structure: filename, user_agent, package_name
class LocalStats(object):
"""Base class that writes the log file
"""
def _get_logs(self, logfile, file_urls):
"""Needs to return an iterator. Each entry
should be a dictionary"""
if callable(logfile):
return logfile(file_urls)
raise NotImplementedError
def _get_file_obj(self, path, mode='r', compression=None):
"""returns a file object"""
if compression == 'bz2':
return bz2.BZ2File(path, mode)
elif compression == 'gz':
return gzip.open(path, mode)
return open(path, mode)
def _build_stats(self, logfile, fileobj, files_url='/packages',
filter=None, compression=None):
"""Builds a stats file
- logfile: path to the original log file, or callable
- fileobj : a file object or a path to create a file
- files_url : a filter that define the beginnin of package urls
- filter: if given, a callable that receives the
current line. if the callable returns True,
the line is not included
"""
downloads = {}
for log in self._get_logs(logfile, files_url):
if filter is not None:
if filter(log):
continue
filename = log['filename']
user_agent = log['useragent']
package_name = log['packagename']
key = (filename, user_agent, package_name)
count = log.get('count', 1)
if key in downloads:
downloads[key] += count
else:
downloads[key] = count
self._write_stats(fileobj, downloads, compression=compression)
def _write_stats(self, fileobj, downloads, compression=None):
if isinstance(fileobj, str):
fileobj = self._get_file_obj(fileobj, 'w', compression)
file_created = True
else:
file_created = False
writer = csv.writer(fileobj)
filenames = downloads.keys()
filenames.sort()
for key in filenames:
filename, user_agent, package_name = key
count = downloads[key]
writer.writerow((package_name, filename, user_agent, count))
if file_created:
fileobj.close()
def build_daily_stats(self, year, month, day, logfile, fileobj,
files_url='/packages', compression=None):
"""creates a daily stats file using an apache log file.
- year, month, day: values for the day
- logfile : path to the log file, or callable
- fileobj : a file object or a path to create a file
- files_url : a filter that define the beginning of package urls
"""
def _filter(log):
return (day != log['day'] or month != log['month'] or
year != log['year'])
self._build_stats(logfile, fileobj, files_url, _filter, compression)
def build_monthly_stats(self, year, month, logfile, fileobj,
files_url='/packages', compression=None):
"""creates a monthly stats file using an apache log file.
- year, month: values for the month
- logfile : path to the log file
- fileobj : a file object or a path to create a file
- files_url : a filter that define the beginnin of package urls
"""
def _filter(log):
return (month != log['month'] or year != log['year'])
self._build_stats(logfile, fileobj, files_url, _filter, compression)
def read_stats(self, stats_file):
"""Returns an iterator over a stats file"""
if isinstance(stats_file, str):
ext = os.path.splitext(stats_file)[-1][1:]
stats_file = self._get_file_obj(stats_file, 'r', ext)
reader = csv.reader(stats_file)
for line in reader:
yield {'packagename': line[0],
'filename': line[1],
'useragent': line[2],
'count': int(line[3])}
#reader.close()
def read_stats_dict(self, stats_file):
res = {}
for r in self.read_stats(stats_file):
key = (r['filename'], r['useragent'], r['packagename'])
value = r['count']
res[key] = value
return res
def build_local_stats(self, year, month, day, logfile, directory=None):
"""builds local stats with default values"""
filename = '%d-%.2d-%.2d.bz2' % (year, month, day)
if directory is not None:
filename = os.path.join(directory, filename)
self.build_daily_stats(year, month, day, logfile, filename,
compression='bz2')
def integrate_stats(self, targetdir, year, month, day, fd):
new = self.read_stats_dict(fd)
oldpath = "%s/days/%s-%.2s-%.2s.bz2" % (targetdir, year, month, day)
if os.path.exists(oldpath):
old = self.read_stats_dict(oldpath)
for k, v in new.items():
old[k] = old.get(k, 0) + v
else:
old = new
self._write_stats(oldpath, old, 'bz2')
monthpath = "%s/months/%s-%.2s.bz2" % (targetdir, year, month)
if os.path.exists(monthpath):
old = self.read_stats_dict(monthpath)
for k, v in new.items():
old[k] = old.get(k, 0) + v
else:
old = new
self._write_stats(monthpath, old, 'bz2')
return new
class ApacheLocalStats(LocalStats):
"""concrete class that uses the ApacheLogReader"""
def _get_logs(self, logfile, files_url):
return ApacheLogReader(logfile, files_url)
class ApacheDistantLocalStats(ApacheLocalStats):
"""Concrete class that gets the data from a distant file"""
is_url = re.compile(r'^http://')
def __init__(self, cache_folder='', timeout=5):
self.cache_folder = cache_folder
if not os.path.exists(cache_folder):
os.makedirs(cache_folder)
self.timeout = timeout
def get_and_cache(self, url):
"""retrieve the distant file and add it in the local
cache"""
basename = url.split('/')[-1]
filename = os.path.join(self.cache_folder, basename)
if os.path.exists(filename):
# in cache, let's return it
return filename, open(filename)
# not in cache, we need to retrieve it
# and store it
oldtimeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(self.timeout)
try:
try:
content = urllib2.urlopen(url).read()
except (urllib2.URLError, socket.timeout):
return '', None
finally:
socket.setdefaulttimeout(oldtimeout)
f = open(filename, 'w')
try:
f.write(content)
finally:
f.close()
return filename, open(filename)
def read_stats(self, stats_file):
"""retrieve a distant file and works with it"""
if self.is_url.search(stats_file) is not None:
path, fileobj = self.get_and_cache(stats_file)
if path == '':
return iter([])
return ApacheLocalStats.read_stats(self, path)
| {
"content_hash": "bbbbf3ea9d24a80cb631e0168bd7fe9a",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 76,
"avg_line_length": 36.2512077294686,
"alnum_prop": 0.5611673773987207,
"repo_name": "techtonik/pydotorg.pypi",
"id": "1142fcee0f8407974a87e4743f85f6657c5cdca3",
"size": "7504",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/apache_stats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "195"
},
{
"name": "CSS",
"bytes": "52191"
},
{
"name": "Makefile",
"bytes": "208"
},
{
"name": "PLpgSQL",
"bytes": "10792"
},
{
"name": "Python",
"bytes": "397864"
}
],
"symlink_target": ""
} |
""" Make REST API requests """
from typing import Any, Dict, Iterable, Optional, Protocol
import aiohttp
from gcpdiag.queries import apis_utils
class Creds(Protocol):
def update_headers(self, headers: Dict[str, str]) -> None:
pass
class Sleeper(Protocol):
async def sleep(self, seconds: float) -> None:
pass
class RetryStrategy(Protocol):
def get_sleep_intervals(self) -> Iterable[float]:
pass
class API:
""" Class abstracting aspects of REST API requests """
def __init__(self, creds: Creds, retry_strategy: RetryStrategy,
sleeper: Sleeper) -> None:
self._creds = creds
self._retry_strategy = retry_strategy
self._sleeper = sleeper
async def call(self,
method: str,
url: str,
json: Optional[Any] = None) -> Any:
for timeout in self._retry_strategy.get_sleep_intervals():
async with aiohttp.request(method,
url,
headers=self._get_headers(),
json=json) as resp:
if resp.status == 200:
return await resp.json()
if not apis_utils.should_retry(resp.status):
raise RuntimeError(
f'http status {resp.status} calling {method} {url}')
await self._sleeper.sleep(timeout)
raise RuntimeError('failed to get an API response')
def _get_headers(self) -> Dict[str, str]:
headers: Dict[str, str] = {}
self._creds.update_headers(headers)
return headers
| {
"content_hash": "a4641f3966392ed74b35b395b118992d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 66,
"avg_line_length": 27.446428571428573,
"alnum_prop": 0.5972674040338322,
"repo_name": "GoogleCloudPlatform/gcpdiag",
"id": "d11db9a1b1abf61fa86091816f9a568547ec8a49",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gcpdiag/async_queries/api/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4610"
},
{
"name": "HCL",
"bytes": "90111"
},
{
"name": "HTML",
"bytes": "8149"
},
{
"name": "Jinja",
"bytes": "1231"
},
{
"name": "Makefile",
"bytes": "51860"
},
{
"name": "Python",
"bytes": "792739"
},
{
"name": "SCSS",
"bytes": "1435"
},
{
"name": "Shell",
"bytes": "10973"
},
{
"name": "Smarty",
"bytes": "726"
}
],
"symlink_target": ""
} |
"""
The Pygments reStructuredText directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.5 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
"""Pygmentize text."""
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
formatter = DEFAULT
if self.options:
# take an arbitrary option if more than one is given
key = list(self.options.keys())[0]
formatter = VARIANTS[key]
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
| {
"content_hash": "b55288bbac20b10f32fac3c72a9d5245",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 73,
"avg_line_length": 31.31764705882353,
"alnum_prop": 0.6652892561983471,
"repo_name": "wummel/wok",
"id": "de0a03f360ca8996164e08701f76922648d65fb9",
"size": "2691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "woklib/rst_pygments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3896"
},
{
"name": "HTML",
"bytes": "3784"
},
{
"name": "Makefile",
"bytes": "1003"
},
{
"name": "Python",
"bytes": "76114"
}
],
"symlink_target": ""
} |
""" Wicked hack to get .pyc files to do bytecode tracing instead of
line tracing.
"""
import marshal, new, opcode, sys, types
from lnotab import lnotab_numbers, lnotab_string
class PycFile:
def read(self, f):
if isinstance(f, basestring):
f = open(f, "rb")
self.magic = f.read(4)
self.modtime = f.read(4)
self.code = marshal.load(f)
def write(self, f):
if isinstance(f, basestring):
f = open(f, "wb")
f.write(self.magic)
f.write(self.modtime)
marshal.dump(self.code, f)
def hack_line_numbers(self):
self.code = hack_line_numbers(self.code)
def hack_line_numbers(code):
""" Replace a code object's line number information to claim that every
byte of the bytecode is a new source line. Returns a new code
object. Also recurses to hack the line numbers in nested code objects.
"""
# Create a new lnotab table. Each opcode is claimed to be at
# 1000*lineno + (opcode number within line), so for example, the opcodes on
# source line 12 will be given new line numbers 12000, 12001, 12002, etc.
old_num = list(lnotab_numbers(code.co_lnotab, code.co_firstlineno))
n_bytes = len(code.co_code)
new_num = []
line = 0
opnum_in_line = 0
i_byte = 0
while i_byte < n_bytes:
if old_num and i_byte == old_num[0][0]:
line = old_num.pop(0)[1]
opnum_in_line = 0
new_num.append((i_byte, 100000000 + 1000*line + opnum_in_line))
if ord(code.co_code[i_byte]) >= opcode.HAVE_ARGUMENT:
i_byte += 3
else:
i_byte += 1
opnum_in_line += 1
# new_num is a list of pairs, (byteoff, lineoff). Turn it into an lnotab.
new_firstlineno = new_num[0][1]-1
new_lnotab = lnotab_string(new_num, new_firstlineno)
# Recurse into code constants in this code object.
new_consts = []
for const in code.co_consts:
if type(const) == types.CodeType:
new_consts.append(hack_line_numbers(const))
else:
new_consts.append(const)
# Create a new code object, just like the old one, except with new
# line numbers.
new_code = new.code(
code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags,
code.co_code, tuple(new_consts), code.co_names, code.co_varnames,
code.co_filename, code.co_name, new_firstlineno, new_lnotab
)
return new_code
def hack_file(f):
pyc = PycFile()
pyc.read(f)
pyc.hack_line_numbers()
pyc.write(f)
if __name__ == '__main__':
hack_file(sys.argv[1])
| {
"content_hash": "225552112df47a8c0a1e64227362d985",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 32.13414634146341,
"alnum_prop": 0.6060721062618596,
"repo_name": "nedbat/coveragepy",
"id": "60b8459b54bfa7b189cbbc3ea668e2d96e70bd5f",
"size": "2791",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lab/hack_pyc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "48728"
},
{
"name": "CSS",
"bytes": "37343"
},
{
"name": "HTML",
"bytes": "213879"
},
{
"name": "JavaScript",
"bytes": "48668"
},
{
"name": "Makefile",
"bytes": "9529"
},
{
"name": "Python",
"bytes": "1324579"
},
{
"name": "SCSS",
"bytes": "17425"
},
{
"name": "Shell",
"bytes": "2240"
}
],
"symlink_target": ""
} |
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy_splash import SplashRequest
class QuotesSpider(scrapy.Spider):
name = "quotes"
allowed_domains = ["toscrape.com"]
start_urls = ['http://quotes.toscrape.com/']
# http_user = 'splash-user'
# http_pass = 'splash-password'
def parse(self, response):
le = LinkExtractor()
for link in le.extract_links(response):
yield SplashRequest(
link.url,
self.parse_link,
endpoint='render.json',
args={
'har': 1,
'html': 1,
}
)
def parse_link(self, response):
print("PARSED", response.real_url, response.url)
print(response.css("title").extract())
print(response.data["har"]["log"]["pages"])
print(response.headers.get('Content-Type')) | {
"content_hash": "8351d73cbb63d8215333691a31d699bb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 56,
"avg_line_length": 28.6875,
"alnum_prop": 0.5555555555555556,
"repo_name": "Ertinfagor/ScrapySplash",
"id": "ed8fb65f5c294b1aa80fa30bddf3b0fbdfc12631",
"size": "942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templateproject/templateproject/spiders/quotes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7135"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from babik_shadow_accounts import get_shadow_account_model
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def user_created(sender, instance, created, raw, using, update_fields,
**kwargs):
"""
When a user is created this searches for an Account object
and adds the user to the account if an account is found
"""
if created:
AccountModel = get_shadow_account_model()
fields = getattr(settings, 'BABIK_SHADOW_ACCOUNT_GLUE_FIELDS', None)
if not fields:
fields = {'email': 'email'}
kwargs = {f1: getattr(instance, f2) for f1, f2 in fields.items()}
try:
account = AccountModel.objects.get(**kwargs)
account.user = instance
account.save()
except AccountModel.DoesNotExist:
kwargs['user'] = instance
account = AccountModel.objects.create(**kwargs)
| {
"content_hash": "95aa02187be7b217ff08aa9f6421186d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 36.607142857142854,
"alnum_prop": 0.6497560975609756,
"repo_name": "aubreystarktoller/django-babik-shadow-accounts",
"id": "6d55c0b45ca74ff2f7e30ddbe90207f1fc977916",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "babik_shadow_accounts/signals/handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "10549"
}
],
"symlink_target": ""
} |
"""
Overview
========
The multiprocess plugin enables you to distribute your test run among a set of
worker processes that run tests in parallel. This can speed up CPU-bound test
runs (as long as the number of work processeses is around the number of
processors or cores available), but is mainly useful for IO-bound tests that
spend most of their time waiting for data to arrive from someplace else.
.. note ::
See :doc:`../doc_tests/test_multiprocess/multiprocess` for
additional documentation and examples. Use of this plugin on python
2.5 or earlier requires the multiprocessing_ module, also available
from PyPI.
.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
How tests are distributed
=========================
The ideal case would be to dispatch each test to a worker process
separately. This ideal is not attainable in all cases, however, because many
test suites depend on context (class, module or package) fixtures.
The plugin can't know (unless you tell it -- see below!) if a context fixture
can be called many times concurrently (is re-entrant), or if it can be shared
among tests running in different processes. Therefore, if a context has
fixtures, the default behavior is to dispatch the entire suite to a worker as
a unit.
Controlling distribution
^^^^^^^^^^^^^^^^^^^^^^^^
There are two context-level variables that you can use to control this default
behavior.
If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
in the context, and the plugin will dispatch tests in suites bound to that
context as if the context had no fixtures. This means that the fixtures will
execute concurrently and multiple times, typically once per test.
If a context's fixtures can be shared by tests running in different processes
-- such as a package-level fixture that starts an external http server or
initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
the context. These fixtures will then execute in the primary nose process, and
tests in those contexts will be individually dispatched to run in parallel.
How results are collected and reported
======================================
As each test or suite executes in a worker process, results (failures, errors,
and specially handled exceptions like SkipTest) are collected in that
process. When the worker process finishes, it returns results to the main
nose process. There, any progress output is printed (dots!), and the
results from the test run are combined into a consolidated result
set. When results have been received for all dispatched tests, or all
workers have died, the result summary is output as normal.
Beware!
=======
Not all test suites will benefit from, or even operate correctly using, this
plugin. For example, CPU-bound tests will run more slowly if you don't have
multiple processors. There are also some differences in plugin
interactions and behaviors due to the way in which tests are dispatched and
loaded. In general, test loading under this plugin operates as if it were
always in directed mode instead of discovered mode. For instance, doctests
in test modules will always be found when using this plugin with the doctest
plugin.
But the biggest issue you will face is probably concurrency. Unless you
have kept your tests as religiously pure unit tests, with no side-effects, no
ordering issues, and no external dependencies, chances are you will experience
odd, intermittent and unexplainable failures and errors when using this
plugin. This doesn't necessarily mean the plugin is broken; it may mean that
your test suite is not safe for concurrency.
New Features in 1.0.1
=====================
* functions generated by test generators are now added to the worker queue
making them multi-threaded.
* fixed timeout functionality, now functions will be terminated with a
TimedOutException exception when they exceed their execution time. The
worker processes are not terminated.
* added ``--process-restartworker`` option to restart workers once they are
done, this helps control memory usage. Sometimes memory leaks can accumulate
making long runs very difficult.
* added global _instantiate_plugins to configure which plugins are started
on the worker processes.
"""
import logging
import os
import sys
import time
import traceback
import unittest
import pickle
import signal
import nose.case
from nose.core import TextTestRunner
from nose import failure
from nose import loader
from nose.plugins.base import Plugin
#from nose.pyversion import bytes_
from nose.result import TextTestResult
from nose.suite import ContextSuite
from nose.util import test_address
try:
# 2.7+
from unittest.runner import _WritelnDecorator
except ImportError:
from unittest import _WritelnDecorator
from Queue import Empty
from warnings import warn
try:
from cStringIO import StringIO
except ImportError:
import StringIO
if sys.version_info >= (3, 0):
def bytes_(s, encoding='utf8'):
return bytes(s, encoding)
else:
def bytes_(s, encoding=None):
return str(s)
# this is a list of plugin classes that will be checked for and created inside
# each worker process
_instantiate_plugins = None
log = logging.getLogger(__name__)
Process = Queue = Pool = Event = Value = Array = None
class TimedOutException(Exception):
def __init__(self, value = "Timed Out"):
self.value = value
def __str__(self):
return repr(self.value)
class UnknownTerminationException(Exception):
def __init__(self, value = "Unknown termination"):
self.value = value
def __str__(self):
return repr(self.value)
class ForceErrorClass(object):
failedaddr=None
failedargs=None
def __init__(self,failedaddr,failedargs):
self.failedaddr=failedaddr
self.failedargs=failedargs
def __str__(self):
return ''
def __unicode__(self):
return u''
def _import_mp():
global Process, Queue, Pool, Event, Value, Array
try:
from multiprocessing import (Process as Process_, Queue as Queue_,
Pool as Pool_, Event as Event_,
Value as Value_, Array as Array_)
Process, Queue, Pool, Event, Value, Array = (Process_, Queue_, Pool_,
Event_, Value_, Array_)
except ImportError:
warn("multiprocessing module is not available, multiprocess plugin "
"cannot be used", RuntimeWarning)
class TestLet:
def __init__(self, case):
try:
self._id = case.id()
except AttributeError:
pass
try:
self._short_description = case.shortDescription()
self._str = str(case)
except AttributeError:
self._short_description = ''
self._str = ''
def id(self):
return self._id
def shortDescription(self):
return self._short_description
def __str__(self):
return self._str
class MultiProcess(Plugin):
"""
Run tests in multiple processes. Requires processing module.
"""
score = 1000
status = {}
def options(self, parser, env):
"""
Register command-line options.
"""
parser.add_option("--processes", action="store",
default=env.get('NOSE_PROCESSES', 0),
dest="multiprocess_workers",
metavar="NUM",
help="Spread test run among this many processes. "
"Set a number equal to the number of processors "
"or cores in your machine for best results. "
"[NOSE_PROCESSES]")
parser.add_option("--process-timeout", action="store",
default=env.get('NOSE_PROCESS_TIMEOUT', 10),
dest="multiprocess_timeout",
metavar="SECONDS",
help="Set timeout for return of results from each "
"test runner process. [NOSE_PROCESS_TIMEOUT]")
parser.add_option("--process-restartworker", action="store_true",
default=env.get('NOSE_PROCESS_RESTARTWORKER', False),
dest="multiprocess_restartworker",
help="If set, will restart each worker process once"
" their tests are done, this helps control memory "
"leaks from killing the system. "
"[NOSE_PROCESS_RESTARTWORKER]")
def configure(self, options, config):
"""
Configure plugin.
"""
try:
self.status.pop('active')
except KeyError:
pass
if not hasattr(options, 'multiprocess_workers'):
self.enabled = False
return
# don't start inside of a worker process
if config.worker:
return
self.config = config
try:
workers = int(options.multiprocess_workers)
except (TypeError, ValueError):
workers = 0
if workers:
_import_mp()
if Process is None:
self.enabled = False
return
self.enabled = True
self.config.multiprocess_workers = workers
t = float(options.multiprocess_timeout)
self.config.multiprocess_timeout = t
r = int(options.multiprocess_restartworker)
self.config.multiprocess_restartworker = r
self.status['active'] = True
def prepareTestLoader(self, loader):
"""Remember loader class so MultiProcessTestRunner can instantiate
the right loader.
"""
self.loaderClass = loader.__class__
def prepareTestRunner(self, runner):
"""Replace test runner with MultiProcessTestRunner.
"""
# replace with our runner class
return MultiProcessTestRunner(stream=runner.stream,
verbosity=self.config.verbosity,
config=self.config,
loaderClass=self.loaderClass)
class MultiProcessTestRunner(TextTestRunner):
waitkilltime = 10.0 # max time to wait to terminate a process that does not respond to SIGINT
def __init__(self, **kw):
self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader)
super(MultiProcessTestRunner, self).__init__(**kw)
def run(self, test):
"""
Execute the test (which may be a test suite). If the test is a suite,
distribute it out among as many processes as have been configured, at
as fine a level as is possible given the context fixtures defined in
the suite or any sub-suites.
"""
log.debug("%s.run(%s) (%s)", self, test, os.getpid())
wrapper = self.config.plugins.prepareTest(test)
if wrapper is not None:
test = wrapper
# plugins can decorate or capture the output stream
wrapped = self.config.plugins.setOutputStream(self.stream)
if wrapped is not None:
self.stream = wrapped
testQueue = Queue()
resultQueue = Queue()
tasks = []
completed = []
workers = []
to_teardown = []
shouldStop = Event()
result = self._makeResult()
start = time.time()
# dispatch and collect results
# put indexes only on queue because tests aren't picklable
for case in self.nextBatch(test):
log.debug("Next batch %s (%s)", case, type(case))
if (isinstance(case, nose.case.Test) and
isinstance(case.test, failure.Failure)):
log.debug("Case is a Failure")
case(result) # run here to capture the failure
continue
# handle shared fixtures
if isinstance(case, ContextSuite) and case.context is failure.Failure:
log.debug("Case is a Failure")
case(result) # run here to capture the failure
continue
elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
log.debug("%s has shared fixtures", case)
try:
case.setUp()
except (KeyboardInterrupt, SystemExit):
raise
except:
log.debug("%s setup failed", sys.exc_info())
result.addError(case, sys.exc_info())
else:
to_teardown.append(case)
for _t in case:
test_addr = self.addtask(testQueue,tasks,_t)
log.debug("Queued shared-fixture test %s (%s) to %s",
len(tasks), test_addr, testQueue)
else:
test_addr = self.addtask(testQueue,tasks,case)
log.debug("Queued test %s (%s) to %s",
len(tasks), test_addr, testQueue)
log.debug("Starting %s workers", self.config.multiprocess_workers)
for i in range(self.config.multiprocess_workers):
currentaddr = Array('c',1000)
currentaddr.value = bytes_('')
currentargs = Array('c',1000)
currentargs.value = bytes_('')
currentstart = Value('d')
keyboardCaught = Event()
p = Process(target=runner, args=(i, testQueue, resultQueue,
currentaddr, currentargs, currentstart,
keyboardCaught, shouldStop,
self.loaderClass,
result.__class__,
pickle.dumps(self.config)))
p.currentaddr = currentaddr
p.currentargs = currentargs
p.currentstart = currentstart
p.keyboardCaught = keyboardCaught
# p.setDaemon(True)
p.start()
workers.append(p)
log.debug("Started worker process %s", i+1)
total_tasks = len(tasks)
# need to keep track of the next time to check for timeouts in case
# more than one process times out at the same time.
nexttimeout=self.config.multiprocess_timeout
while tasks:
log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
len(completed), total_tasks,nexttimeout)
try:
# should periodically check for timeouts with a min of 10s since processes can terminate sporadically
iworker, addr, newtask_addrs, batch_result = resultQueue.get(timeout=min(10,nexttimeout))
log.debug('Results received for worker %d , %s, new tasks: %d', iworker,addr,len(newtask_addrs))
try:
try:
tasks.remove(addr)
except ValueError:
log.warn('worker %s failed to remove from tasks: %s',
iworker,addr)
total_tasks += len(newtask_addrs)
for newaddr in newtask_addrs:
tasks.append(newaddr)
except KeyError:
log.debug("Got result for unknown task? %s", addr)
log.debug("current: %s",str(list(tasks)[0]))
else:
completed.append([addr,batch_result])
self.consolidate(result, batch_result)
if (self.config.stopOnError
and not result.wasSuccessful()):
# set the stop condition
shouldStop.set()
break
if self.config.multiprocess_restartworker:
log.debug('joining worker %s',iworker)
# wait for working, but not that important if worker
# cannot be joined in fact, for workers that add to
# testQueue, they will not terminate until all their
# items are read
workers[iworker].join(timeout=1)
if not shouldStop.is_set() and not testQueue.empty():
log.debug('starting new process on worker %s',iworker)
currentaddr = Array('c',1000)
currentaddr.value = bytes_('')
currentargs = Array('c',1000)
currentargs.value = bytes_('')
currentstart = Value('d')
currentstart.value = time.time()
keyboardCaught = Event()
workers[iworker] = Process(target=runner,
args=(iworker, testQueue,
resultQueue,
currentaddr,
currentargs,
currentstart,
keyboardCaught,
shouldStop,
self.loaderClass,
result.__class__,
pickle.dumps(self.config)))
workers[iworker].currentaddr = currentaddr
workers[iworker].currentargs = currentargs
workers[iworker].currentstart = currentstart
workers[iworker].keyboardCaught = keyboardCaught
workers[iworker].start()
except Empty:
log.debug("Timed out with %s tasks pending (empty testQueue=%d): %s", len(tasks),testQueue.empty(),str(tasks))
any_alive = False
for iworker, w in enumerate(workers):
worker_addr = bytes_(w.currentaddr.value,'ascii')
worker_args = ''
try:
if len(w.currentargs.value) > 0:
worker_args = pickle.loads(bytes_(w.currentargs.value,'ascii'))
except EOFError,e:
log.warn('worker %d: exception in getting worker args (%s): %s',iworker, w.currentargs.value, str(e))
test_addr = worker_addr
if worker_args is not None:
test_addr += str(worker_args)
if not w.is_alive():
# it could have segmented with the last job assigned
if len(worker_addr) > 0:
foundtask = [task for task in tasks if task==test_addr]
log.debug('dead worker %d currentaddr: %s, currentargs: %s, found %d matching tasks', iworker, worker_addr,str(worker_args), len(foundtask))
if len(foundtask) > 0:
tasks.remove(foundtask[0])
testQueue.put((worker_addr,ForceErrorClass(worker_addr,worker_args)),block=False)
tasks.append(worker_addr)
# restart the worker
currentaddr = Array('c',1000)
currentaddr.value = bytes_('')
currentargs = Array('c',1000)
currentargs.value = bytes_('')
currentstart = Value('d')
currentstart.value = time.time()
keyboardCaught = Event()
workers[iworker] = Process(target=runner,
args=(iworker, testQueue,
resultQueue,
currentaddr,
currentargs,
currentstart,
keyboardCaught,
shouldStop,
self.loaderClass,
result.__class__,
pickle.dumps(self.config)))
workers[iworker].currentaddr = currentaddr
workers[iworker].currentargs = currentargs
workers[iworker].currentstart = currentstart
workers[iworker].keyboardCaught = keyboardCaught
workers[iworker].start()
any_alive = True # force continuation
else:
timeprocessing = time.time()-w.currentstart.value
if len(worker_addr) == 0 and timeprocessing > self.config.multiprocess_timeout-0.1:
log.debug('worker %d has finished its work item, but is not exiting? do we wait for it?', iworker)
if timeprocessing > self.config.multiprocess_timeout+30:
log.error('worker %d force kill', iworker)
os.kill(w.pid, signal.SIGINT)
time.sleep(0.1)
else:
any_alive = True
else:
any_alive = True
if len(worker_addr) > 0 and timeprocessing > self.config.multiprocess_timeout-0.1:
log.debug('timed out worker %s: %s', iworker,worker_addr)
w.currentaddr.value = bytes_('')
# If the process is in C++ code, sending a SIGINT
# might not send a python KeybordInterrupt exception
# therefore, send multiple signals until an
# exception is caught. If this takes too long, then
# terminate the process
w.keyboardCaught.clear()
startkilltime = time.time()
while not w.keyboardCaught.is_set() and w.is_alive():
if time.time()-startkilltime > self.waitkilltime:
# have to terminate...
log.error("terminating worker %s",iworker)
w.terminate()
foundtask = [task for task in tasks if task==test_addr]
log.debug('found %d matching tasks', len(foundtask))
if len(foundtask) > 0:
tasks.remove(foundtask[0])
testQueue.put((worker_addr,ForceErrorClass(worker_addr,worker_args)),block=False)
tasks.append(worker_addr)
currentaddr = Array('c',1000)
currentaddr.value = bytes_('')
currentargs = Array('c',1000)
currentargs.value = bytes_('')
currentstart = Value('d')
currentstart.value = time.time()
keyboardCaught = Event()
workers[iworker] = Process(target=runner,
args=(iworker, testQueue, resultQueue,
currentaddr, currentargs, currentstart,
keyboardCaught, shouldStop,
self.loaderClass,
result.__class__,
pickle.dumps(self.config)))
workers[iworker].currentaddr = currentaddr
workers[iworker].currentargs = currentargs
workers[iworker].currentstart = currentstart
workers[iworker].keyboardCaught = keyboardCaught
workers[iworker].start()
# there is a small probability that the
# terminated process might send a result,
# which has to be specially handled or
# else processes might get orphaned.
w = workers[iworker]
break
os.kill(w.pid, signal.SIGINT)
time.sleep(0.1)
if not any_alive and testQueue.empty():
log.debug("All workers dead")
break
nexttimeout=self.config.multiprocess_timeout
for w in workers:
if w.is_alive() and len(w.currentaddr.value) > 0:
timeprocessing = time.time()-w.currentstart.value
if timeprocessing <= self.config.multiprocess_timeout:
nexttimeout = min(nexttimeout,
self.config.multiprocess_timeout-timeprocessing)
log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))
for case in to_teardown:
log.debug("Tearing down shared fixtures for %s", case)
try:
case.tearDown()
except (KeyboardInterrupt, SystemExit):
raise
except:
result.addError(case, sys.exc_info())
stop = time.time()
# first write since can freeze on shutting down processes
result.printErrors()
result.printSummary(start, stop)
self.config.plugins.finalize(result)
log.debug("Tell all workers to stop")
for w in workers:
if w.is_alive():
testQueue.put('STOP', block=False)
# wait for the workers to end
try:
for iworker,worker in enumerate(workers):
if worker.is_alive():
log.debug('joining worker %s',iworker)
worker.join()#10)
if worker.is_alive():
log.debug('failed to join worker %s',iworker)
except KeyboardInterrupt:
log.info('parent received ctrl-c')
for worker in workers:
worker.terminate()
worker.join()
return result
def addtask(testQueue,tasks,case):
arg = None
if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'):
# this removes the top level descriptor and allows real function
# name to be returned
case.test.descriptor = None
arg = case.test.arg
test_addr = MultiProcessTestRunner.address(case)
testQueue.put((test_addr,arg), block=False)
if arg is not None:
test_addr += str(arg)
if tasks is not None:
tasks.append(test_addr)
return test_addr
addtask = staticmethod(addtask)
def address(case):
if hasattr(case, 'address'):
file, mod, call = case.address()
elif hasattr(case, 'context'):
file, mod, call = test_address(case.context)
else:
raise Exception("Unable to convert %s to address" % case)
parts = []
if file is None:
if mod is None:
raise Exception("Unaddressable case %s" % case)
else:
parts.append(mod)
else:
# strip __init__.py(c) from end of file part
# if present, having it there confuses loader
dirname, basename = os.path.split(file)
if basename.startswith('__init__'):
file = dirname
parts.append(file)
if call is not None:
parts.append(call)
return ':'.join(map(str, parts))
address = staticmethod(address)
def nextBatch(self, test):
# allows tests or suites to mark themselves as not safe
# for multiprocess execution
if hasattr(test, 'context'):
if not getattr(test.context, '_multiprocess_', True):
return
if ((isinstance(test, ContextSuite)
and test.hasFixtures(self.checkCanSplit))
or not getattr(test, 'can_split', True)
or not isinstance(test, unittest.TestSuite)):
# regular test case, or a suite with context fixtures
# special case: when run like nosetests path/to/module.py
# the top-level suite has only one item, and it shares
# the same context as that item. In that case, we want the
# item, not the top-level suite
if isinstance(test, ContextSuite):
contained = list(test)
if (len(contained) == 1
and getattr(contained[0],
'context', None) == test.context):
test = contained[0]
yield test
else:
# Suite is without fixtures at this level; but it may have
# fixtures at any deeper level, so we need to examine it all
# the way down to the case level
for case in test:
for batch in self.nextBatch(case):
yield batch
def checkCanSplit(self, context, fixt):
"""
Callback that we use to check whether the fixtures found in a
context or ancestor are ones we care about.
Contexts can tell us that their fixtures are reentrant by setting
_multiprocess_can_split_. So if we see that, we return False to
disregard those fixtures.
"""
if not fixt:
return False
if getattr(context, '_multiprocess_can_split_', False):
return False
return True
def sharedFixtures(self, case):
context = getattr(case, 'context', None)
if not context:
return False
return getattr(context, '_multiprocess_shared_', False)
def consolidate(self, result, batch_result):
log.debug("batch result is %s" , batch_result)
try:
output, testsRun, failures, errors, errorClasses = batch_result
except ValueError:
log.debug("result in unexpected format %s", batch_result)
failure.Failure(*sys.exc_info())(result)
return
self.stream.write(output)
result.testsRun += testsRun
result.failures.extend(failures)
result.errors.extend(errors)
for key, (storage, label, isfail) in errorClasses.items():
if key not in result.errorClasses:
# Ordinarily storage is result attribute
# but it's only processed through the errorClasses
# dict, so it's ok to fake it here
result.errorClasses[key] = ([], label, isfail)
mystorage, _junk, _junk = result.errorClasses[key]
mystorage.extend(storage)
log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun)
def runner(ix, testQueue, resultQueue, currentaddr, currentargs, currentstart,
keyboardCaught, shouldStop, loaderClass, resultClass, config):
try:
try:
try:
return __runner(ix, testQueue, resultQueue, currentaddr, currentargs, currentstart,
keyboardCaught, shouldStop, loaderClass, resultClass, config)
except KeyboardInterrupt:
keyboardCaught.set()
log.debug('Worker %s keyboard interrupt, stopping',ix)
except Empty:
log.debug("Worker %s timed out waiting for tasks", ix)
finally:
testQueue.close()
resultQueue.close()
def __runner(ix, testQueue, resultQueue, currentaddr, currentargs, currentstart,
keyboardCaught, shouldStop, loaderClass, resultClass, config):
config = pickle.loads(config)
dummy_parser = config.parserClass()
if _instantiate_plugins is not None:
for pluginclass in _instantiate_plugins:
plugin = pluginclass()
plugin.addOptions(dummy_parser,{})
config.plugins.addPlugin(plugin)
config.plugins.configure(config.options,config)
config.plugins.begin()
log.debug("Worker %s executing, pid=%d", ix,os.getpid())
loader = loaderClass(config=config)
loader.suiteClass.suiteClass = NoSharedFixtureContextSuite
def get():
return testQueue.get(timeout=config.multiprocess_timeout)
def makeResult():
stream = _WritelnDecorator(StringIO())
result = resultClass(stream, descriptions=1,
verbosity=config.verbosity,
config=config)
plug_result = config.plugins.prepareTestResult(result)
if plug_result:
return plug_result
return result
def batch(result):
failures = [(TestLet(c), err) for c, err in result.failures]
errors = [(TestLet(c), err) for c, err in result.errors]
errorClasses = {}
for key, (storage, label, isfail) in result.errorClasses.items():
errorClasses[key] = ([(TestLet(c), err) for c, err in storage],
label, isfail)
return (
result.stream.getvalue(),
result.testsRun,
failures,
errors,
errorClasses)
for test_addr, arg in iter(get, 'STOP'):
if shouldStop.is_set():
log.exception('Worker %d STOPPED',ix)
break
result = makeResult()
test = loader.loadTestsFromNames([test_addr])
test.testQueue = testQueue
test.tasks = []
test.arg = arg
log.debug("Worker %s Test is %s (%s), args: %s", ix, test_addr, test, str(arg))
try:
task_addr = test_addr
if arg is not None:
task_addr += str(arg)
currentaddr.value = bytes_(test_addr)
currentargs.value = bytes_(pickle.dumps(arg))
currentstart.value = time.time()
result.currentaddr = currentaddr
test(result)
currentaddr.value = bytes_('')
resultQueue.put((ix, task_addr, test.tasks, batch(result)))
except KeyboardInterrupt:
keyboardCaught.set()
if len(currentaddr.value) > 0:
log.exception('Worker %s keyboard interrupt, failing current test %s', ix,test_addr )
currentaddr.value = bytes_('')
failure.Failure(*sys.exc_info())(result)
resultQueue.put((ix, test_addr, test.tasks, batch(result)))
else:
log.exception('Worker %s test %s timed out',ix,test_addr)
resultQueue.put((ix, test_addr, test.tasks, batch(result)))
except SystemExit:
currentaddr.value = bytes_('')
log.exception('Worker %s system exit',ix)
raise
except:
log.exception("Worker %s error running test or returning results",ix)
currentaddr.value = bytes_('')
failure.Failure(*sys.exc_info())(result)
resultQueue.put((ix, test_addr, test.tasks, batch(result)))
if config.multiprocess_restartworker:
break
log.debug("Worker %s ending", ix)
class NoSharedFixtureContextSuite(ContextSuite):
"""
Context suite that never fires shared fixtures.
When a context sets _multiprocess_shared_, fixtures in that context
are executed by the main process. Using this suite class prevents them
from executing in the runner process as well.
"""
testQueue = None
tasks = None
arg = None
def setupContext(self, context):
if getattr(context, '_multiprocess_shared_', False):
return
super(NoSharedFixtureContextSuite, self).setupContext(context)
def teardownContext(self, context):
if getattr(context, '_multiprocess_shared_', False):
return
super(NoSharedFixtureContextSuite, self).teardownContext(context)
def run(self, result):
"""Run tests in suite inside of suite fixtures.
"""
# proxy the result for myself
log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
if self.resultProxy:
result, orig = self.resultProxy(result, self), result
else:
result, orig = result, result
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
self.error_context = 'setup'
result.addError(self, self._exc_info())
return
try:
localtests = [test for test in self._tests]
if len(localtests) > 1 and self.testQueue is not None:
log.debug("queue %d tests"%len(localtests))
for test in localtests:
if not isinstance(test,NoSharedFixtureContextSuite) and isinstance(test.test,nose.failure.Failure):
log.debug('test %s proably failed in the generator, so execute directly to get the exception'%str(test))
test(orig)
else:
MultiProcessTestRunner.addtask(self.testQueue, self.tasks, test)
else:
for test in localtests:
if isinstance(test,nose.case.Test) and self.arg is not None:
test.test.arg = self.arg
else:
test.arg = self.arg
test.testQueue = self.testQueue
test.tasks = self.tasks
if result.shouldStop:
log.debug("stopping")
break
test_addr = MultiProcessTestRunner.address(test)
orig.currentaddr.value = bytes_(test_addr)
if isinstance(self.arg, ForceErrorClass) and self.arg.failedaddr == test_addr:
if isinstance(test,nose.case.Test) and self.arg is not None:
test.test.arg = self.arg.failedargs
else:
test.arg = self.arg.failedargs
test.capturedOutput = None
err = (UnknownTerminationException,UnknownTerminationException(str(test)), None)
test.config.plugins.addError(test,err)
orig.addError(test,err)
return
# each nose.case.Test will create its own result proxy
# so the cases need the original result, to avoid proxy
# chains
try:
test(orig)
except KeyboardInterrupt,e:
err = (TimedOutException,TimedOutException(str(test)), sys.exc_info()[2])
test.config.plugins.addError(test,err)
orig.addError(test,err)
finally:
self.has_run = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
self.error_context = 'teardown'
result.addError(self, self._exc_info())
| {
"content_hash": "0e3d3fdb9991bc06f95090f5ae79f70d",
"timestamp": "",
"source": "github",
"line_count": 909,
"max_line_length": 168,
"avg_line_length": 44.34983498349835,
"alnum_prop": 0.5366870069950885,
"repo_name": "jdsika/TUM_HOly",
"id": "02833c30528c127146f1643065f559dc562ec6d1",
"size": "40314",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openrave/test/noseplugins/multiprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "361"
},
{
"name": "C",
"bytes": "111973"
},
{
"name": "C#",
"bytes": "24641"
},
{
"name": "C++",
"bytes": "11966748"
},
{
"name": "CMake",
"bytes": "212392"
},
{
"name": "CSS",
"bytes": "2102"
},
{
"name": "HTML",
"bytes": "16213"
},
{
"name": "Makefile",
"bytes": "41"
},
{
"name": "Matlab",
"bytes": "198171"
},
{
"name": "Modelica",
"bytes": "621"
},
{
"name": "Objective-C",
"bytes": "51576"
},
{
"name": "Python",
"bytes": "10053508"
},
{
"name": "Shell",
"bytes": "11963"
},
{
"name": "XSLT",
"bytes": "471726"
}
],
"symlink_target": ""
} |
import abc
from enum import Enum
from .method import wrap_method
from .base import weak_method
from .device import Device
from .stateMachine import StateMachine
from .attribute import Attribute
class DState(Enum):
# These are the states that our machine supports
Fault, Idle, Configuring, Ready, Running, Pausing, Paused, Aborting,\
Aborted, Resetting = range(10)
@classmethod
def rest(cls):
return [cls.Fault, cls.Idle, cls.Ready, cls.Aborted]
@classmethod
def pausedone(cls):
return [cls.Fault, cls.Aborted, cls.Paused]
@classmethod
def abortable(cls):
return [cls.Configuring, cls.Ready, cls.Running, cls.Pausing,
cls.Paused, cls.Resetting]
@classmethod
def configurable(cls):
return [cls.Idle, cls.Ready]
@classmethod
def runnable(cls):
return [cls.Ready, cls.Paused]
@classmethod
def resettable(cls):
return [cls.Fault, cls.Aborted]
def to_dict(self):
choices = [e.name for e in self.__class__]
d = dict(index=self.value, choices=choices)
return d
class DEvent(Enum):
# These are the messages that we will respond to
Error, Reset, ResetSta, Config, ConfigSta, Run, RunSta, Abort, AbortSta, \
Pause, PauseSta = range(11)
class RunnableDevice(Device):
def __init__(self, name, timeout=None):
# superclass init
super(RunnableDevice, self).__init__(name, timeout=timeout)
# Make a statemachine
sm = StateMachine(name + ".stateMachine", DState.Idle, DState.Fault)
self.add_stateMachine(sm)
# some shortcuts for the state table
do, t, s, e = self.shortcuts()
# Error condition generated by device
t(s, e.Error, do.error, s.Fault)
# Normal operations
t(s.resettable(), e.Reset, do.reset, s.Resetting)
t(s.Resetting, e.ResetSta, do.resetsta, s.Resetting, s.Idle)
t(s.Idle, e.Config, do.config, s.Configuring)
t(s.Configuring, e.ConfigSta, do.configsta, s.Configuring, s.Ready)
t(s.Ready, e.Config, do.config, s.Configuring)
t(s.Ready, e.Run, do.run, s.Running)
t(s.Running, e.RunSta, do.runsta, s.Running, s.Idle, s.Ready)
# Abort
t(s.abortable(), e.Abort, do.abort, s.Aborting)
t(s.Aborting, e.AbortSta, do.abortsta, s.Aborting, s.Aborted)
# Timeout for functions
self.add_attributes(
timeout=Attribute(float, "Time in seconds to wait for function"))
# Override the error handler of the stateMachine
sm.do_error = weak_method(self.do_error)
def shortcuts(self):
# Shortcut to all the self.do_ functions
class do:
pass
for fname in dir(self):
if fname.startswith("do_"):
setattr(do, fname[3:], getattr(self, fname))
# Shortcut to transition function, state list and event list
t = self.stateMachine.transition
s = DState
e = DEvent
return (do, t, s, e)
def do_error(self, error):
"""Handle an error"""
return DState.Fault, str(error)
@abc.abstractmethod
def do_reset(self):
"""Check and attempt to clear any error state, arranging for a
callback doing self.post(DEvent.ResetSta, resetsta) when progress has
been made, where resetsta is any device specific reset status
"""
@abc.abstractmethod
def do_resetsta(self, resetsta):
"""Examine configsta for configuration progress, returning
DState.Resetting if still in progress, or DState.Idle if done.
"""
@abc.abstractmethod
def do_config(self, **config_params):
"""Start doing a configuration using config_params, arranging for a
callback doing self.post(DEvent.ConfigSta, configsta) when progress has
been made, where configsta is any device specific configuration status
"""
@abc.abstractmethod
def do_configsta(self, configsta):
"""Examine configsta for configuration progress, returning
DState.Configuring if still in progress, or DState.Ready if done.
"""
@abc.abstractmethod
def do_run(self):
"""Start doing a run, arranging for a callback doing
self.post(DEvent.RunSta, runsta) when progress has been made, where
runsta is any device specific run status
"""
@abc.abstractmethod
def do_runsta(self, runsta):
"""Examine runsta for run progress, returning DState.Running if still
in progress, DState.Ready if done and another run can be started
without reconfiguration, or DState.Idle if done and configuration is
needed before another run can be started.
"""
@abc.abstractmethod
def do_abort(self):
"""Start doing an abort, arranging for a callback doing
self.post(DEvent.AbortSta, runsta) when progress has been made, where
abortsta is any device specific abort status
"""
@abc.abstractmethod
def do_abortsta(self, abortsta):
"""Examine abortsta for abort progress, returning DState.Aborting if still
in progress or DState.Aborted if done.
"""
@abc.abstractmethod
def assert_valid(self, arg1, arg2="arg2default"):
"""Check whether a set of configuration parameters is valid or not. Each
parameter name must match one of the names in self.attributes. This set
of parameters should be checked in isolation, no device state should be
taken into account. It is allowed from any DState and raises an error
if the set of configuration parameters is invalid.
"""
@wrap_method(only_in=DState.abortable())
def abort(self, timeout=None):
"""Abort configuration or abandon the current run whether it is
running or paused. It blocks until the device is in a rest state:
* Normally it will return a DState.Aborted Status
* If something goes wrong it will return a DState.Fault Status
"""
timeout = timeout or self.timeout
self.stateMachine.post(DEvent.Abort)
self.wait_until(DState.rest(), timeout=timeout)
@wrap_method(only_in=DState.resettable())
def reset(self, timeout=None):
"""Try and reset the device into DState.Idle. It blocks until the
device is in a rest state:
* Normally it will return a DState.Idle Status
* If something goes wrong it will return a DState.Fault Status
"""
timeout = timeout or self.timeout
self.stateMachine.post(DEvent.Reset)
self.wait_until(DState.rest(), timeout=timeout)
@wrap_method(only_in=DState.configurable(), args_from=assert_valid)
def configure(self, timeout=None, **params):
"""Assert params are valid, then use them to configure a device for a run.
It blocks until the device is in a rest state:
* Normally it will return a DState.Configured Status
* If the user aborts then it will return a DState.Aborted Status
* If something goes wrong it will return a DState.Fault Status
"""
timeout = timeout or self.timeout
self.assert_valid(**params)
self.stateMachine.post(DEvent.Config, **params)
self.wait_until(DState.rest(), timeout=timeout)
@wrap_method(only_in=DState.runnable())
def run(self, timeout=None):
"""Start a configured device running. It blocks until the device is in a
rest state:
* Normally it will return a DState.Idle Status
* If the device allows many runs from a single configure the it
will return a DState.Ready Status
* If the user aborts then it will return a DState.Aborted Status
* If something goes wrong it will return a DState.Fault Status
"""
timeout = timeout or self.timeout
self.stateMachine.post(DEvent.Run)
self.wait_until(DState.rest(), timeout=timeout)
@wrap_method(only_in=DState, args_from=assert_valid)
def configure_run(self, timeout=None, **params):
"""Try and configure and run a device in one step. It blocks until the
device is in a rest state:
* Normally it will return a DState.Idle Status
* If the device allows many runs from a single configure then it
will return a DState.Ready Status
* If the user aborts then it will return a DState.Aborted Status
* If something goes wrong it will return a DState.Fault Status
"""
timeout = timeout or self.timeout
# If we can't configure from our current state
if self.state not in DState.configurable():
# If we are abortable then abort
if self.state in DState.abortable():
self.abort(timeout=timeout)
# Now try a reset to bring us back to idle
if self.state in DState.resettable():
self.reset(timeout=timeout)
# Now if we are configurable then do so
if self.state in DState.configurable():
self.configure(timeout=timeout, **params)
# And now if we are ready then do a run
if self.state == DState.Ready:
self.run(timeout=timeout)
| {
"content_hash": "838a1618512fc537bf4ca2007f9301eb",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 82,
"avg_line_length": 38.82231404958678,
"alnum_prop": 0.6373602980308675,
"repo_name": "ulrikpedersen/malcolm",
"id": "6e9383fb1d0ae694b95b303ea727ccb45cd41bcf",
"size": "9395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "malcolm/core/runnableDevice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "150960"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
from tests.testcase import TestCase
from edmunds.globals import _request_ctx_stack
from edmunds.http.visitor import Visitor
class TestHttpServiceProvider(TestCase):
"""
Test the Http Service Provider
"""
def test_visitor(self):
"""
Test visitor
:return: void
"""
rule = '/' + self.rand_str(20)
# No context
self.assert_is_none(_request_ctx_stack.top)
# Call route
with self.app.test_request_context(rule):
# Before pre-processing
self.assert_is_not_none(_request_ctx_stack.top)
self.assert_false(hasattr(_request_ctx_stack.top, 'edmunds.visitor'))
# Pre-processing
self.app.preprocess_request()
# After pre-processing
self.assert_is_not_none(_request_ctx_stack.top)
self.assert_true(hasattr(_request_ctx_stack.top, 'edmunds.visitor'))
self.assert_is_instance(getattr(_request_ctx_stack.top, 'edmunds.visitor'), Visitor)
| {
"content_hash": "65e1fe9e0f57813eb0beeaef072fe853",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 96,
"avg_line_length": 30.205882352941178,
"alnum_prop": 0.6124634858812074,
"repo_name": "LowieHuyghe/edmunds",
"id": "8657f5a6d28901f0810eaab8d59d84ce6794425b",
"size": "1028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/http/providers/testhttpserviceprovider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "570304"
}
],
"symlink_target": ""
} |
"""Module for working with EOS VRRP resources
The Vrrp resource provides configuration management of interface specific
vrrp resources on and EOS node. It provides the following class
implementations:
* Vrrp - Configure vrrps in EOS
Vrrp Attributes:
- enable (boolean): The shutdown state of the vrrp
- primary_ip (string): The ip address of the vrrp
- secondary_ip (dict): The secondary ip addresses configured for the vrrp
This is a dictionary in the format::
{ key: [ list of ip addresses ] }
where key is 'add', 'remove', or 'exists'. 'add' is
used to add the list of secondary ip addresses
to the vrrp. 'remove' will remove the list of
secondary ip addresses from the vrrp. 'exists' is
a report only key for retrieving the current
secondary ip addresses on a vrrp.
- priority (int): The priority rank of the vrrp
- description (string): The description for the vrrp
- ip_version (int): The ip version value for the vrrp
- timers_advertise (int): The timers advertise setting for the vrrp
- mac_addr_adv_interval (int): The mac-address advertisement-interval
setting for the vrrp
- preempt (boolean): The preempt state of the vrrp
- preempt_delay_min (int): The preempt delay minimum setting for the vrrp
- preempt_delay_reload (int): The preempt delay reload setting for the vrrp
- delay_reload (int): The delay reload setting for the vrrp
- track (list): The object tracking settings for the vrrp
- bfd_ip (string): The bfd ip set for the vrrp
Notes:
The get method will return a dictionary of all the currently configured
vrrps on a single interface, with the VRID of each vrrp as the keys
in the dictionary::
{
vrrp1: { data },
vrrp2: { data },
}
The getall method will return a dictionary of all the currently configured
vrrps on the node, with the interface name as the top-level keys, with
the VRIDs for each vrrp on an interface as a sub-key of that interface::
{
interface1: {
vrrp1: { data },
vrrp2: { data },
},
interface2: {
vrrp1: { data },
vrrp2: { data },
}
}
The data for a configured vrrp is a dictionary with the following format::
{
enable: <True|False>
primary_ip: <string>
priority: <int>
description: <string|None>
secondary_ip: {
exists: [ <ip string1>, <ip string2> ]
}
ip_version: <int>
timers_advertise: <int>
mac_addr_adv_interval: <int>
preempt: <True|False>
preempt_delay_min: <int>
preempt_delay_reload: <int>
delay_reload: <int>
track: [
{
name: <string>
action: <shutdown|decrement>
amount: <int>|default|no|None
},
{
name: <string>
action: <shutdown|decrement>
amount: <int>|default|no|None
},
]
bfd_ip: <string>
}
The create and method accepts a kwargs dictionary which
defines the properties to be applied to the new or existing vrrp
configuration. The available keywords and values are as follows:
- enable: True to enable (no shutdown)|False to disable (shutdown)
- primary_ip: <ip_string>|no|default|None
- priority: <int>|no|default|None
- description: <string>|no|default|None
- secondary_ip: <dict> may include the following
- add: <list of ip address strings>
- remove: <list of ip address strings>
- ip_version: <int>|no|default|None
- timers_advertise: <int>|no|default|None
- mac_addr_adv_interval: <int>|no|default|None
- preempt: True to enable (preempt)|False to disable (no preempt)
- preempt_delay_min: <int>|no|default|None
- preempt_delay_reload: <int>|no|default|None
- delay_reload: <int>|no|default|None
- track: <list> of dicts in the following format::
{
name: <string>
action: <shutdown|decrement>
amount: <int>|default|no|None
}
- bfd_ip: <ip string>|no|default|None
"""
import re
from pyeapi.api import EntityCollection
PROPERTIES = ['primary_ip', 'priority', 'description', 'secondary_ip',
'ip_version', 'enable', 'timers_advertise',
'mac_addr_adv_interval', 'preempt',
'preempt_delay_min', 'preempt_delay_reload',
'delay_reload', 'track', 'bfd_ip']
class Vrrp(EntityCollection):
"""The Vrrp class provides management of the VRRP configuration
The Vrrp class is derived from EntityCollection and provides an API for
working with the node's vrrp configurations.
"""
def get(self, name):
"""Get the vrrp configurations for a single node interface
Args:
name (string): The name of the interface for which vrrp
configurations will be retrieved.
Returns:
A dictionary containing the vrrp configurations on the interface.
Returns None if no vrrp configurations are defined or
if the interface is not configured.
"""
# Validate the interface and vrid are specified
interface = name
if not interface:
raise ValueError("Vrrp.get(): interface must contain a value.")
# Get the config for the interface. Return None if the
# interface is not defined
config = self.get_block('interface %s' % interface)
if config is None:
return config
# Find all occurrences of vrids in this interface and make
# a set of the unique vrid numbers
match = set(re.findall(r'^\s+(?:no |)vrrp (\d+)', config, re.M))
if not match:
return None
# Initialize the result dict
result = dict()
for vrid in match:
subd = dict()
# Parse the vrrp configuration for the vrid(s) in the list
subd.update(self._parse_delay_reload(config, vrid))
subd.update(self._parse_description(config, vrid))
subd.update(self._parse_enable(config, vrid))
subd.update(self._parse_ip_version(config, vrid))
subd.update(self._parse_mac_addr_adv_interval(config, vrid))
subd.update(self._parse_preempt(config, vrid))
subd.update(self._parse_preempt_delay_min(config, vrid))
subd.update(self._parse_preempt_delay_reload(config, vrid))
subd.update(self._parse_primary_ip(config, vrid))
subd.update(self._parse_priority(config, vrid))
subd.update(self._parse_secondary_ip(config, vrid))
subd.update(self._parse_timers_advertise(config, vrid))
subd.update(self._parse_track(config, vrid))
subd.update(self._parse_bfd_ip(config, vrid))
result.update({int(vrid): subd})
# If result dict is empty, return None, otherwise return result
return result if result else None
def getall(self):
"""Get the vrrp configurations for all interfaces on a node
Returns:
A dictionary containing the vrrp configurations on the node,
keyed by interface.
"""
vrrps = dict()
# Find the available interfaces
interfaces = re.findall(r'^interface\s(\S+)', self.config, re.M)
# Get the vrrps defined for each interface
for interface in interfaces:
vrrp = self.get(interface)
# Only add those interfaces that have vrrps defined
if vrrp:
vrrps.update({interface: vrrp})
return vrrps
def _parse_enable(self, config, vrid):
match = re.search(r'^\s+vrrp %s shutdown$' % vrid, config, re.M)
if match:
return dict(enable=False)
return dict(enable=True)
def _parse_primary_ip(self, config, vrid):
match = re.search(r'^\s+vrrp %s ip (\d+\.\d+\.\d+\.\d+)$' %
vrid, config, re.M)
value = match.group(1) if match else None
return dict(primary_ip=value)
def _parse_priority(self, config, vrid):
match = re.search(r'^\s+vrrp %s priority (\d+)$' % vrid, config, re.M)
value = int(match.group(1)) if match else None
return dict(priority=value)
def _parse_timers_advertise(self, config, vrid):
match = re.search(r'^\s+vrrp %s timers advertise (\d+)$' %
vrid, config, re.M)
value = int(match.group(1)) if match else None
return dict(timers_advertise=value)
def _parse_preempt(self, config, vrid):
match = re.search(r'^\s+vrrp %s preempt$' % vrid, config, re.M)
if match:
return dict(preempt=True)
return dict(preempt=False)
def _parse_secondary_ip(self, config, vrid):
matches = re.findall(r'^\s+vrrp %s ip (\d+\.\d+\.\d+\.\d+) '
r'secondary$' % vrid, config, re.M)
value = matches if matches else []
return dict(secondary_ip=value)
def _parse_description(self, config, vrid):
match = re.search(r'^\s+vrrp %s description(.*)$' %
vrid, config, re.M)
if match:
return dict(description=match.group(1).lstrip())
return dict(description='')
def _parse_mac_addr_adv_interval(self, config, vrid):
match = re.search(r'^\s+vrrp %s mac-address advertisement-interval '
r'(\d+)$' % vrid, config, re.M)
value = int(match.group(1)) if match else None
return dict(mac_addr_adv_interval=value)
def _parse_preempt_delay_min(self, config, vrid):
match = re.search(r'^\s+vrrp %s preempt delay minimum (\d+)$' %
vrid, config, re.M)
value = int(match.group(1)) if match else None
return dict(preempt_delay_min=value)
def _parse_preempt_delay_reload(self, config, vrid):
match = re.search(r'^\s+vrrp %s preempt delay reload (\d+)$' %
vrid, config, re.M)
value = int(match.group(1)) if match else None
return dict(preempt_delay_reload=value)
def _parse_bfd_ip(self, config, vrid):
match = re.search(r'^\s+vrrp %s bfd ip'
r'(?: (\d+\.\d+\.\d+\.\d+)|)$' %
vrid, config, re.M)
if match:
return dict(bfd_ip=match.group(1))
return dict(bfd_ip='')
def _parse_ip_version(self, config, vrid):
match = re.search(r'^\s+vrrp %s ip version (\d+)$' %
vrid, config, re.M)
value = int(match.group(1)) if match else None
return dict(ip_version=value)
def _parse_delay_reload(self, config, vrid):
match = re.search(r'^\s+vrrp %s delay reload (\d+)$' %
vrid, config, re.M)
value = int(match.group(1)) if match else None
return dict(delay_reload=value)
def _parse_track(self, config, vrid):
matches = re.findall(r'^\s+vrrp %s track (\S+) '
r'(decrement|shutdown)(?:( \d+$|$))' %
vrid, config, re.M)
value = []
for match in matches:
tr_obj = match[0]
action = match[1]
amount = None if match[2] == '' else int(match[2])
entry = {
'name': tr_obj,
'action': action,
}
if amount:
entry.update({'amount': amount})
value.append(entry)
# Return the list, sorted for easier comparison
track_list = sorted(value, key=lambda k: (k['name'], k['action']))
return dict(track=track_list)
def create(self, interface, vrid, **kwargs):
"""Creates a vrrp instance from an interface
Note:
This method will attempt to create a vrrp in the node's
operational config. If the vrrp already exists on the
interface, then this method will set the properties of
the existing vrrp to those that have been passed in, if
possible.
Args:
interface (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be created.
kwargs (dict): A dictionary specifying the properties to
be applied to the new vrrp instance. See library
documentation for available keys and values.
Returns:
True if the vrrp could be created otherwise False (see Node)
"""
if 'enable' not in kwargs:
kwargs['enable'] = False
return self._vrrp_set(interface, vrid, **kwargs)
def delete(self, interface, vrid):
"""Deletes a vrrp instance from an interface
Note:
This method will attempt to delete the vrrp from the node's
operational config. If the vrrp does not exist on the
interface then this method will not perform any changes
but still return True
Args:
interface (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be deleted.
Returns:
True if the vrrp could be deleted otherwise False (see Node)
"""
vrrp_str = "no vrrp %d" % vrid
return self.configure_interface(interface, vrrp_str)
def default(self, interface, vrid):
"""Defaults a vrrp instance from an interface
Note:
This method will attempt to default the vrrp on the node's
operational config. Default results in the deletion of the
specified vrrp . If the vrrp does not exist on the
interface then this method will not perform any changes
but still return True
Args:
interface (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be defaulted.
Returns:
True if the vrrp could be defaulted otherwise False (see Node)
"""
vrrp_str = "default vrrp %d" % vrid
return self.configure_interface(interface, vrrp_str)
def set_enable(self, name, vrid, value=False, run=True):
"""Set the enable property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (boolean): True to enable the vrrp, False to disable.
run (boolean): True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure
If run is False, returns the formatted command string which can
be passed to the node
"""
if value is False:
cmd = "vrrp %d shutdown" % vrid
elif value is True:
cmd = "no vrrp %d shutdown" % vrid
else:
raise ValueError("vrrp property 'enable' must be "
"True or False")
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_primary_ip(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the primary_ip property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (string): IP address to be set.
disable (boolean): Unset primary ip if True.
default (boolean): Set primary ip to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if default is True:
vrrps = self.get(name)
primary_ip = vrrps[vrid]['primary_ip']
cmd = "default vrrp %d ip %s" % (vrid, primary_ip)
elif disable is True or value is None:
vrrps = self.get(name)
primary_ip = vrrps[vrid]['primary_ip']
cmd = "no vrrp %d ip %s" % (vrid, primary_ip)
elif re.match(r'^\d+\.\d+\.\d+\.\d+$', str(value)):
cmd = "vrrp %d ip %s" % (vrid, value)
else:
raise ValueError("vrrp property 'primary_ip' must be "
"a properly formatted IP address")
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_priority(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the primary_ip property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): Priority to assign to the vrrp.
disable (boolean): Unset priority if True.
default (boolean): Set priority to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not str(value).isdigit() or value < 1 or value > 254:
raise ValueError("vrrp property 'priority' must be "
"an integer in the range 1-254")
cmd = self.command_builder('vrrp %d priority' % vrid, value=value,
default=default, disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_description(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the description property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (string): Description to assign to the vrrp.
disable (boolean): Unset description if True.
default (boolean): Set description to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
cmd = self.command_builder('vrrp %d description' % vrid, value=value,
default=default, disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_ip_version(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the ip_version property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): IP version to assign to the vrrp.
disable (boolean): Unset ip_version if True.
default (boolean): Set ip_version to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if value not in (2, 3):
raise ValueError("vrrp property 'ip_version' must be 2 or 3")
cmd = self.command_builder('vrrp %d ip version' % vrid, value=value,
default=default, disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_secondary_ips(self, name, vrid, secondary_ips, run=True):
"""Configure the secondary_ip property of the vrrp
Notes:
set_secondary_ips takes a list of secondary ip addresses
which are to be set on the virtal router. An empty list will
remove any existing secondary ip addresses from the vrrp.
A list containing addresses will configure the virtual router
with only the addresses specified in the list - any existing
addresses not included in the list will be removed.
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
secondary_ips (list): A list of secondary ip addresses to
be assigned to the virtual router.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
cmds = []
# Get the current set of tracks defined for the vrrp
curr_sec_ips = []
vrrps = self.get(name)
if vrrps and vrid in vrrps:
curr_sec_ips = vrrps[vrid]['secondary_ip']
# Validate the list of ip addresses
for sec_ip in secondary_ips:
if type(sec_ip) is not str or \
not re.match(r'^\d+\.\d+\.\d+\.\d+$', sec_ip):
raise ValueError("vrrp property 'secondary_ip' must be a list "
"of properly formatted ip address strings")
intersection = list(set(curr_sec_ips) & set(secondary_ips))
# Delete the intersection from both lists to determine which
# addresses need to be added or removed from the vrrp
remove = list(set(curr_sec_ips) - set(intersection))
add = list(set(secondary_ips) - set(intersection))
# Build the commands to add and remove the secondary ip addresses
for sec_ip in remove:
cmds.append("no vrrp %d ip %s secondary" % (vrid, sec_ip))
for sec_ip in add:
cmds.append("vrrp %d ip %s secondary" % (vrid, sec_ip))
cmds = sorted(cmds)
# Run the command if requested
if run:
result = self.configure_interface(name, cmds)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmds
def set_timers_advertise(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the ip_version property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): Timers advertise value to assign to the vrrp.
disable (boolean): Unset timers advertise if True.
default (boolean): Set timers advertise to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not int(value) or int(value) < 1 or int(value) > 255:
raise ValueError("vrrp property 'timers_advertise' must be"
"in the range 1-255")
cmd = self.command_builder('vrrp %d timers advertise' % vrid,
value=value, default=default,
disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_mac_addr_adv_interval(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the mac_addr_adv_interval property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): mac-address advertisement-interval value to
assign to the vrrp.
disable (boolean): Unset mac-address advertisement-interval
if True.
default (boolean): Set mac-address advertisement-interval to
default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not int(value) or int(value) < 1 or int(value) > 3600:
raise ValueError("vrrp property 'mac_addr_adv_interval' must "
"be in the range 1-3600")
cmd = self.command_builder('vrrp %d mac-address advertisement-interval'
% vrid, value=value, default=default,
disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_preempt(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the preempt property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (boolean): True to enable preempt, False to disable
preempt on the vrrp.
disable (boolean): Unset preempt if True.
default (boolean): Set preempt to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if value is not True and value is not False:
raise ValueError("vrrp property 'preempt' must be True "
"or False")
cmd = self.command_builder('vrrp %d preempt' % vrid, value=value,
default=default, disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_preempt_delay_min(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the preempt_delay_min property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): Preempt delay minimum value to set on the vrrp.
disable (boolean): Unset preempt delay minimum if True.
default (boolean): Set preempt delay minimum to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not int(value) or int(value) < 1 or int(value) > 3600:
raise ValueError("vrrp property 'preempt_delay_min' must be"
"in the range 0-3600 %r" % value)
cmd = self.command_builder('vrrp %d preempt delay minimum'
% vrid, value=value, default=default,
disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_preempt_delay_reload(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the preempt_delay_min property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): Preempt delay reload value to set on the vrrp.
disable (boolean): Unset preempt delay reload if True.
default (boolean): Set preempt delay reload to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not int(value) or int(value) < 1 or int(value) > 3600:
raise ValueError("vrrp property 'preempt_delay_reload' must be"
"in the range 0-3600 %r" % value)
cmd = self.command_builder('vrrp %d preempt delay reload'
% vrid, value=value, default=default,
disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_delay_reload(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the preempt_delay_min property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): Preempt delay reload value to set on the vrrp.
disable (boolean): Unset preempt delay reload if True.
default (boolean): Set preempt delay reload to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not int(value) or int(value) < 1 or int(value) > 3600:
raise ValueError("vrrp property 'delay_reload' must be"
"in the range 0-3600 %r" % value)
cmd = self.command_builder('vrrp %d delay reload' % vrid, value=value,
default=default, disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def set_tracks(self, name, vrid, tracks, run=True):
"""Configure the track property of the vrrp
Notes:
set_tracks takes a list of tracked objects which are
to be set on the virtual router. An empty list will remove
any existing tracked objects from the vrrp. A list containing
track entries configures the virtual router to track only the
objects specified in the list - any existing tracked objects
on the vrrp not included in the list will be removed.
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
tracks (list): A list of track definition dictionaries. Each
dictionary is a definition of a tracked object in one
of the two formats::
{'name': tracked_object_name,
'action': 'shutdown'}
{'name': tracked_object_name,
'action': 'decrement',
'amount': amount_of_decrement}
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
cmds = []
# Get the current set of tracks defined for the vrrp
curr_tracks = []
vrrps = self.get(name)
if vrrps and vrid in vrrps:
curr_tracks = vrrps[vrid]['track']
# Determine which tracked objects are in both lists using
# sets of temporary strings built from the track specifications
unset = '_none_'
tracks_set = []
for track in tracks:
keys = track.keys()
# Validate no extraneous keys in track definition
err_keys = set(keys).difference(('name', 'action', 'amount'))
if err_keys:
err_keys = ', '.join(err_keys)
raise ValueError("Error found in vrrp property 'track': "
"unknown key(s) '%s' found. Valid keys are "
"name, action, and amount" % err_keys)
# Validate required keys in track definition
if not set(keys).issuperset(('name', 'action')):
raise ValueError("Error found in vrrp property 'track': "
"track definition must contain 'name' and "
"'action' keys")
tr_obj = track['name']
action = track['action']
amount = track['amount'] if 'amount' in track else unset
# Validate values in track definition
error = False
if action not in ('shutdown', 'decrement'):
error = True
if action == 'shutdown' and amount != unset:
error = True
if amount != unset and not str(amount).isdigit():
error = True
if error:
raise ValueError("Error found in vrrp property 'track'. "
"See documentation for format specification.")
tid = "%s %s %s" % (tr_obj, action, amount)
tracks_set.append(tid)
curr_set = []
for track in curr_tracks:
tr_obj = track['name']
action = track['action']
amount = track['amount'] if 'amount' in track else unset
# Validate track definition
error = False
if action not in ('shutdown', 'decrement'):
error = True
if action == 'shutdown' and amount != unset:
error = True
if amount != unset and not str(amount).isdigit():
error = True
if error:
raise ValueError("Error found in vrrp property 'track'. "
"See documentation for format specification.")
tid = "%s %s %s" % (tr_obj, action, amount)
curr_set.append(tid)
intersection = list(set(tracks_set) & set(curr_set))
# Delete the intersection from both lists to determine which
# track definitions need to be added or removed from the vrrp
remove = list(set(curr_set) - set(intersection))
add = list(set(tracks_set) - set(intersection))
# Build the commands to add and remove the tracked objects
for track in remove:
match = re.match(r'(\S+)\s+(\S+)\s+(\S+)', track)
if match:
(tr_obj, action, amount) = \
(match.group(1), match.group(2), match.group(3))
if amount == unset:
amount = ''
t_cmd = ("no vrrp %d track %s %s %s"
% (vrid, tr_obj, action, amount))
cmds.append(t_cmd.rstrip())
for track in add:
match = re.match(r'(\S+)\s+(\S+)\s+(\S+)', track)
if match:
(tr_obj, action, amount) = \
(match.group(1), match.group(2), match.group(3))
if amount == unset:
amount = ''
t_cmd = ("vrrp %d track %s %s %s"
% (vrid, tr_obj, action, amount))
cmds.append(t_cmd.rstrip())
cmds = sorted(cmds)
# Run the command if requested
if run:
result = self.configure_interface(name, cmds)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmds
def set_bfd_ip(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the bfd_ip property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (string): The bfd ip address to be set.
disable (boolean): Unset bfd ip if True.
default (boolean): Set bfd ip to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not re.match(r'^\d+\.\d+\.\d+\.\d+$', str(value)):
raise ValueError("vrrp property 'bfd_ip' must be "
"a properly formatted IP address")
cmd = self.command_builder('vrrp %d bfd ip' % vrid, value=value,
default=default, disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
def _vrrp_set(self, name, vrid, **kwargs):
# Configure the commands to create or update a vrrp
# configuration, and send the commands to the node.
vrconf = kwargs
# step through the individual vrrp properties and
# set those that need to be changed
commands = []
enable = vrconf.get('enable', '__NONE__')
if enable != '__NONE__':
cmd = self.set_enable(name, vrid, value=enable, run=False)
commands.append(cmd)
primary_ip = vrconf.get('primary_ip', '__NONE__')
if primary_ip != '__NONE__':
if primary_ip in ('no', None):
cmd = self.set_primary_ip(name, vrid, value=None,
disable=True, run=False)
elif primary_ip is 'default':
cmd = self.set_primary_ip(name, vrid, value=None,
default=True, run=False)
else:
cmd = self.set_primary_ip(name, vrid, value=primary_ip,
run=False)
commands.append(cmd)
priority = vrconf.get('priority', '__NONE__')
if priority != '__NONE__':
if priority in ('no', None):
cmd = self.set_priority(name, vrid, value=priority,
disable=True, run=False)
elif priority == 'default':
cmd = self.set_priority(name, vrid, value=priority,
default=True, run=False)
else:
cmd = self.set_priority(name, vrid, value=priority, run=False)
commands.append(cmd)
description = vrconf.get('description', '__NONE__')
if description != '__NONE__':
if description in ('no', None):
cmd = self.set_description(name, vrid, value=description,
disable=True, run=False)
elif description == 'default':
cmd = self.set_description(name, vrid, value=description,
default=True, run=False)
else:
cmd = self.set_description(name, vrid, value=description,
run=False)
commands.append(cmd)
ip_version = vrconf.get('ip_version', '__NONE__')
if ip_version != '__NONE__':
if ip_version in ('no', None):
cmd = self.set_ip_version(name, vrid, value=ip_version,
disable=True, run=False)
elif ip_version == 'default':
cmd = self.set_ip_version(name, vrid, value=ip_version,
default=True, run=False)
else:
cmd = self.set_ip_version(name, vrid, value=ip_version,
run=False)
commands.append(cmd)
secondary_ip = vrconf.get('secondary_ip', '__NONE__')
if secondary_ip != '__NONE__':
cmds = self.set_secondary_ips(name, vrid, secondary_ip, run=False)
for cmd in cmds:
commands.append(cmd)
timers_advertise = vrconf.get('timers_advertise', '__NONE__')
if timers_advertise != '__NONE__':
if timers_advertise in ('no', None):
cmd = self.set_timers_advertise(name, vrid,
value=timers_advertise,
disable=True, run=False)
elif timers_advertise == 'default':
cmd = self.set_timers_advertise(name, vrid,
value=timers_advertise,
default=True, run=False)
else:
cmd = self.set_timers_advertise(name, vrid,
value=timers_advertise,
run=False)
commands.append(cmd)
mac_addr_adv_interval = \
vrconf.get('mac_addr_adv_interval', '__NONE__')
if mac_addr_adv_interval != '__NONE__':
if mac_addr_adv_interval in ('no', None):
cmd = \
self.set_mac_addr_adv_interval(name, vrid,
value=mac_addr_adv_interval,
disable=True, run=False)
elif mac_addr_adv_interval == 'default':
cmd = \
self.set_mac_addr_adv_interval(name, vrid,
value=mac_addr_adv_interval,
default=True, run=False)
else:
cmd = \
self.set_mac_addr_adv_interval(name, vrid,
value=mac_addr_adv_interval,
run=False)
commands.append(cmd)
preempt = vrconf.get('preempt', '__NONE__')
if preempt != '__NONE__':
if preempt in ('no', False):
cmd = self.set_preempt(name, vrid, value=preempt,
disable=True, run=False)
elif preempt == 'default':
cmd = self.set_preempt(name, vrid, value=preempt,
default=True, run=False)
else:
cmd = self.set_preempt(name, vrid, value=preempt, run=False)
commands.append(cmd)
preempt_delay_min = vrconf.get('preempt_delay_min', '__NONE__')
if preempt_delay_min != '__NONE__':
if preempt_delay_min in ('no', None):
cmd = self.set_preempt_delay_min(name, vrid,
value=preempt_delay_min,
disable=True, run=False)
elif preempt_delay_min == 'default':
cmd = self.set_preempt_delay_min(name, vrid,
value=preempt_delay_min,
default=True, run=False)
else:
cmd = self.set_preempt_delay_min(name, vrid,
value=preempt_delay_min,
run=False)
commands.append(cmd)
preempt_delay_reload = vrconf.get('preempt_delay_reload', '__NONE__')
if preempt_delay_reload != '__NONE__':
if preempt_delay_reload in ('no', None):
cmd = self.set_preempt_delay_reload(name, vrid,
value=preempt_delay_reload,
disable=True, run=False)
elif preempt_delay_reload == 'default':
cmd = self.set_preempt_delay_reload(name, vrid,
value=preempt_delay_reload,
default=True, run=False)
else:
cmd = self.set_preempt_delay_reload(name, vrid,
value=preempt_delay_reload,
run=False)
commands.append(cmd)
delay_reload = vrconf.get('delay_reload', '__NONE__')
if delay_reload != '__NONE__':
if delay_reload in ('no', None):
cmd = self.set_delay_reload(name, vrid, value=delay_reload,
disable=True, run=False)
elif delay_reload == 'default':
cmd = self.set_delay_reload(name, vrid, value=delay_reload,
default=True, run=False)
else:
cmd = self.set_delay_reload(name, vrid, value=delay_reload,
run=False)
commands.append(cmd)
track = vrconf.get('track', '__NONE__')
if track != '__NONE__':
cmds = self.set_tracks(name, vrid, track, run=False)
for cmd in cmds:
commands.append(cmd)
bfd_ip = vrconf.get('bfd_ip', '__NONE__')
if bfd_ip != '__NONE__':
if bfd_ip in ('no', None):
cmd = self.set_bfd_ip(name, vrid, value=bfd_ip,
disable=True, run=False)
elif bfd_ip == 'default':
cmd = self.set_bfd_ip(name, vrid, value=bfd_ip,
default=True, run=False)
else:
cmd = self.set_bfd_ip(name, vrid, value=bfd_ip, run=False)
commands.append(cmd)
# Send the commands to the requested interface
result = self.configure_interface(name, commands)
# And verify the commands succeeded
if result is False:
return self.error
return result
def vrconf_format(self, vrconfig):
"""Formats a vrrp configuration dictionary to match the
information as presented from the get and getall methods.
vrrp configuration dictionaries passed to the create
method may contain data for setting properties which results
in a default value on the node. In these instances, the data
for setting or changing the property is replaced with the
value that would be returned from the get and getall methods.
Intended for validating updated vrrp configurations.
"""
fixed = dict(vrconfig)
# primary_ip: default, no, None results in address of 0.0.0.0
if fixed['primary_ip'] in ('no', 'default', None):
fixed['primary_ip'] = '0.0.0.0'
# priority: default, no, None results in priority of 100
if fixed['priority'] in ('no', 'default', None):
fixed['priority'] = 100
# description: default, no, None results in None
if fixed['description'] in ('no', 'default', None):
fixed['description'] = None
# secondary_ip: list should be exactly what is required,
# just sort it for easier comparison
if 'secondary_ip' in fixed:
fixed['secondary_ip'] = sorted(fixed['secondary_ip'])
# ip_version: default, no, None results in value of 2
if fixed['ip_version'] in ('no', 'default', None):
fixed['ip_version'] = 2
# timers_advertise: default, no, None results in value of 1
if fixed['timers_advertise'] in ('no', 'default', None):
fixed['timers_advertise'] = 1
# mac_address_advertisement_interaval:
# default, no, None results in value of 30
if fixed['mac_addr_adv_interval'] in \
('no', 'default', None):
fixed['mac_addr_adv_interval'] = 30
# preempt: default, no results in value of False
if fixed['preempt'] in ('no', 'default'):
fixed['preempt'] = False
# preempt_delay_min: default, no, None results in value of 0
if fixed['preempt_delay_min'] in ('no', 'default', None):
fixed['preempt_delay_min'] = 0
# preempt_delay_reload: default, no, None results in value of 0
if fixed['preempt_delay_reload'] in ('no', 'default', None):
fixed['preempt_delay_reload'] = 0
# delay_reload: default, no, None results in value of 0
if fixed['delay_reload'] in ('no', 'default', None):
fixed['delay_reload'] = 0
# track: list should be exactly what is required,
# just sort it for easier comparison
if 'track' in fixed:
fixed['track'] = \
sorted(fixed['track'], key=lambda k: (k['name'], k['action']))
# bfd_ip: default, no, None results in ''
if fixed['bfd_ip'] in ('no', 'default', None):
fixed['bfd_ip'] = ''
return fixed
def instance(node):
"""Returns an instance of Vrrp
Args:
node (Node): The node argument passes an instance of Node to the
resource
Returns:
object: An instance of Vrrp
"""
return Vrrp(node)
| {
"content_hash": "f60fbf830f3d0835d15cd4812203c89c",
"timestamp": "",
"source": "github",
"line_count": 1370,
"max_line_length": 79,
"avg_line_length": 39.9978102189781,
"alnum_prop": 0.542383707137252,
"repo_name": "mith1979/ansible_automation",
"id": "a63af216d794e31bdc6c8c14b165d59ad8f4d927",
"size": "56327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/pyeapi/api/vrrp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
import numbers
from collections import defaultdict
import numpy as np
from astropy.utils import isiterable
from astropy.utils.decorators import lazyproperty
from ..low_level_api import BaseLowLevelWCS
from .base import BaseWCSWrapper
__all__ = ['sanitize_slices', 'SlicedLowLevelWCS']
def sanitize_slices(slices, ndim):
"""
Given a slice as input sanitise it to an easier to parse format.format
This function returns a list ``ndim`` long containing slice objects (or ints).
"""
if not isinstance(slices, (tuple, list)): # We just have a single int
slices = (slices,)
if len(slices) > ndim:
raise ValueError(
f"The dimensionality of the specified slice {slices} can not be greater "
f"than the dimensionality ({ndim}) of the wcs.")
if any((isiterable(s) for s in slices)):
raise IndexError("This slice is invalid, only integer or range slices are supported.")
slices = list(slices)
if Ellipsis in slices:
if slices.count(Ellipsis) > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
# Replace the Ellipsis with the correct number of slice(None)s
e_ind = slices.index(Ellipsis)
slices.remove(Ellipsis)
n_e = ndim - len(slices)
for i in range(n_e):
ind = e_ind + i
slices.insert(ind, slice(None))
for i in range(ndim):
if i < len(slices):
slc = slices[i]
if isinstance(slc, slice):
if slc.step and slc.step != 1:
raise IndexError("Slicing WCS with a step is not supported.")
elif not isinstance(slc, numbers.Integral):
raise IndexError("Only integer or range slices are accepted.")
else:
slices.append(slice(None))
return slices
def combine_slices(slice1, slice2):
"""
Given two slices that can be applied to a 1-d array, find the resulting
slice that corresponds to the combination of both slices. We assume that
slice2 can be an integer, but slice1 cannot.
"""
if isinstance(slice1, slice) and slice1.step is not None:
raise ValueError('Only slices with steps of 1 are supported')
if isinstance(slice2, slice) and slice2.step is not None:
raise ValueError('Only slices with steps of 1 are supported')
if isinstance(slice2, numbers.Integral):
if slice1.start is None:
return slice2
else:
return slice2 + slice1.start
if slice1.start is None:
if slice1.stop is None:
return slice2
else:
if slice2.stop is None:
return slice(slice2.start, slice1.stop)
else:
return slice(slice2.start, min(slice1.stop, slice2.stop))
else:
if slice2.start is None:
start = slice1.start
else:
start = slice1.start + slice2.start
if slice2.stop is None:
stop = slice1.stop
else:
if slice1.start is None:
stop = slice2.stop
else:
stop = slice2.stop + slice1.start
if slice1.stop is not None:
stop = min(slice1.stop, stop)
return slice(start, stop)
class SlicedLowLevelWCS(BaseWCSWrapper):
"""
A Low Level WCS wrapper which applies an array slice to a WCS.
This class does not modify the underlying WCS object and can therefore drop
coupled dimensions as it stores which pixel and world dimensions have been
sliced out (or modified) in the underlying WCS and returns the modified
results on all the Low Level WCS methods.
Parameters
----------
wcs : `~astropy.wcs.wcsapi.BaseLowLevelWCS`
The WCS to slice.
slices : `slice` or `tuple` or `int`
A valid array slice to apply to the WCS.
"""
def __init__(self, wcs, slices):
slices = sanitize_slices(slices, wcs.pixel_n_dim)
if isinstance(wcs, SlicedLowLevelWCS):
# Here we combine the current slices with the previous slices
# to avoid ending up with many nested WCSes
self._wcs = wcs._wcs
slices_original = wcs._slices_array.copy()
for ipixel in range(wcs.pixel_n_dim):
ipixel_orig = wcs._wcs.pixel_n_dim - 1 - wcs._pixel_keep[ipixel]
ipixel_new = wcs.pixel_n_dim - 1 - ipixel
slices_original[ipixel_orig] = combine_slices(slices_original[ipixel_orig],
slices[ipixel_new])
self._slices_array = slices_original
else:
self._wcs = wcs
self._slices_array = slices
self._slices_pixel = self._slices_array[::-1]
# figure out which pixel dimensions have been kept, then use axis correlation
# matrix to figure out which world dims are kept
self._pixel_keep = np.nonzero([not isinstance(self._slices_pixel[ip], numbers.Integral)
for ip in range(self._wcs.pixel_n_dim)])[0]
# axis_correlation_matrix[world, pixel]
self._world_keep = np.nonzero(
self._wcs.axis_correlation_matrix[:, self._pixel_keep].any(axis=1))[0]
if len(self._pixel_keep) == 0 or len(self._world_keep) == 0:
raise ValueError("Cannot slice WCS: the resulting WCS should have "
"at least one pixel and one world dimension.")
@lazyproperty
def dropped_world_dimensions(self):
"""
Information describing the dropped world dimensions.
"""
world_coords = self._pixel_to_world_values_all(*[0]*len(self._pixel_keep))
dropped_info = defaultdict(list)
for i in range(self._wcs.world_n_dim):
if i in self._world_keep:
continue
if "world_axis_object_classes" not in dropped_info:
dropped_info["world_axis_object_classes"] = dict()
wao_classes = self._wcs.world_axis_object_classes
wao_components = self._wcs.world_axis_object_components
dropped_info["value"].append(world_coords[i])
dropped_info["world_axis_names"].append(self._wcs.world_axis_names[i])
dropped_info["world_axis_physical_types"].append(self._wcs.world_axis_physical_types[i])
dropped_info["world_axis_units"].append(self._wcs.world_axis_units[i])
dropped_info["world_axis_object_components"].append(wao_components[i])
dropped_info["world_axis_object_classes"].update(dict(
filter(
lambda x: x[0] == wao_components[i][0], wao_classes.items()
)
))
dropped_info["serialized_classes"] = self.serialized_classes
return dict(dropped_info)
@property
def pixel_n_dim(self):
return len(self._pixel_keep)
@property
def world_n_dim(self):
return len(self._world_keep)
@property
def world_axis_physical_types(self):
return [self._wcs.world_axis_physical_types[i] for i in self._world_keep]
@property
def world_axis_units(self):
return [self._wcs.world_axis_units[i] for i in self._world_keep]
@property
def pixel_axis_names(self):
return [self._wcs.pixel_axis_names[i] for i in self._pixel_keep]
@property
def world_axis_names(self):
return [self._wcs.world_axis_names[i] for i in self._world_keep]
def _pixel_to_world_values_all(self, *pixel_arrays):
pixel_arrays = tuple(map(np.asanyarray, pixel_arrays))
pixel_arrays_new = []
ipix_curr = -1
for ipix in range(self._wcs.pixel_n_dim):
if isinstance(self._slices_pixel[ipix], numbers.Integral):
pixel_arrays_new.append(self._slices_pixel[ipix])
else:
ipix_curr += 1
if self._slices_pixel[ipix].start is not None:
pixel_arrays_new.append(pixel_arrays[ipix_curr] + self._slices_pixel[ipix].start)
else:
pixel_arrays_new.append(pixel_arrays[ipix_curr])
pixel_arrays_new = np.broadcast_arrays(*pixel_arrays_new)
return self._wcs.pixel_to_world_values(*pixel_arrays_new)
def pixel_to_world_values(self, *pixel_arrays):
world_arrays = self._pixel_to_world_values_all(*pixel_arrays)
# Detect the case of a length 0 array
if isinstance(world_arrays, np.ndarray) and not world_arrays.shape:
return world_arrays
if self._wcs.world_n_dim > 1:
# Select the dimensions of the original WCS we are keeping.
world_arrays = [world_arrays[iw] for iw in self._world_keep]
# If there is only one world dimension (after slicing) we shouldn't return a tuple.
if self.world_n_dim == 1:
world_arrays = world_arrays[0]
return world_arrays
def world_to_pixel_values(self, *world_arrays):
world_arrays = tuple(map(np.asanyarray, world_arrays))
world_arrays_new = []
iworld_curr = -1
for iworld in range(self._wcs.world_n_dim):
if iworld in self._world_keep:
iworld_curr += 1
world_arrays_new.append(world_arrays[iworld_curr])
else:
world_arrays_new.append(1.)
world_arrays_new = np.broadcast_arrays(*world_arrays_new)
pixel_arrays = list(self._wcs.world_to_pixel_values(*world_arrays_new))
for ipixel in range(self._wcs.pixel_n_dim):
if isinstance(self._slices_pixel[ipixel], slice) and self._slices_pixel[ipixel].start is not None:
pixel_arrays[ipixel] -= self._slices_pixel[ipixel].start
# Detect the case of a length 0 array
if isinstance(pixel_arrays, np.ndarray) and not pixel_arrays.shape:
return pixel_arrays
pixel = tuple(pixel_arrays[ip] for ip in self._pixel_keep)
if self.pixel_n_dim == 1 and self._wcs.pixel_n_dim > 1:
pixel = pixel[0]
return pixel
@property
def world_axis_object_components(self):
return [self._wcs.world_axis_object_components[idx] for idx in self._world_keep]
@property
def world_axis_object_classes(self):
keys_keep = [item[0] for item in self.world_axis_object_components]
return dict([item for item in self._wcs.world_axis_object_classes.items() if item[0] in keys_keep])
@property
def array_shape(self):
if self._wcs.array_shape:
return np.broadcast_to(0, self._wcs.array_shape)[tuple(self._slices_array)].shape
@property
def pixel_shape(self):
if self.array_shape:
return tuple(self.array_shape[::-1])
@property
def pixel_bounds(self):
if self._wcs.pixel_bounds is None:
return
bounds = []
for idx in self._pixel_keep:
if self._slices_pixel[idx].start is None:
bounds.append(self._wcs.pixel_bounds[idx])
else:
imin, imax = self._wcs.pixel_bounds[idx]
start = self._slices_pixel[idx].start
bounds.append((imin - start, imax - start))
return tuple(bounds)
@property
def axis_correlation_matrix(self):
return self._wcs.axis_correlation_matrix[self._world_keep][:, self._pixel_keep]
| {
"content_hash": "c67db95b0d1fd3b2399f7d8efaf9d69f",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 110,
"avg_line_length": 37.282467532467535,
"alnum_prop": 0.6007140991030219,
"repo_name": "saimn/astropy",
"id": "21169377f0015e5a4676248da7e5bf4dffd3030d",
"size": "11483",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/wcs/wcsapi/wrappers/sliced_wcs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11034753"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78631"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52457"
},
{
"name": "Python",
"bytes": "12214998"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from cosmo_tester.framework.testenv import bootstrap, teardown
def setUp():
bootstrap()
def tearDown():
teardown()
| {
"content_hash": "884f98bb1c977d688e02d942f5611980",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 62,
"avg_line_length": 14.11111111111111,
"alnum_prop": 0.7086614173228346,
"repo_name": "isaac-s/cloudify-system-tests",
"id": "ee6fe3f6146737d2a80f26a2cabfd6974bc2146c",
"size": "772",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cosmo_tester/test_suites/ec2_test_blueprints/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "441"
},
{
"name": "Clojure",
"bytes": "290"
},
{
"name": "Puppet",
"bytes": "154"
},
{
"name": "Python",
"bytes": "336635"
},
{
"name": "Ruby",
"bytes": "1104"
},
{
"name": "Shell",
"bytes": "3795"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from hnotebook.accounts.factories import ProfileFactory
from .factories import NotebookFactory, HousingFactory
from .models import Notebook, Housing, Review, Note
class NotebookModelTestCase(TestCase):
def setUp(self):
pass
def test_notebook_private_field_is_false_as_default(self):
notebook = Notebook.objects.create(name='testnotebook')
self.assertEqual(notebook.private, False)
def test_notebook_string_representation_shows_name(self):
notebook = Notebook.objects.create(name='testnotebook')
self.assertIn("testnotebook", str(notebook))
class HousingModelTestCase(TestCase):
def setUp(self):
pass
def test_housing_instantiation(self):
notebook = NotebookFactory()
housing = Housing(
notebook=notebook,
category=Housing.RENT,
property_type=Housing.FLAT,
description="example description",
town="Some city",
address="Some address",
price="700",
currency="$",
surface=50,
num_rooms=2,
)
self.assertIn("Rent", str(housing))
self.assertIn("Flat", str(housing))
self.assertIn("$", str(housing))
self.assertIn("700", str(housing))
class ReviewModelTestCase(TestCase):
def setUp(self):
self.profile = ProfileFactory()
self.commenter_profile = ProfileFactory()
self.notebook = NotebookFactory(name="testenotebook")
self.profile.notebooks.add(self.notebook)
self.housing = HousingFactory()
def test_add_review_to_housing(self):
review = Review.objects.create(
housing=self.housing,
commenter=self.commenter_profile.user,
rating=5,
text='Awesome flat'
)
self.assertEqual(review.text, 'Awesome flat')
self.assertIn(review.commenter.username, str(review))
self.assertIn(str(review.rating), str(review))
self.assertIn(str(review.datetime), str(review))
class NoteModelTestCase(TestCase):
def setUp(self):
self.profile = ProfileFactory()
self.commenter_profile = ProfileFactory()
self.notebook = NotebookFactory(name="testenotebook")
self.profile.notebooks.add(self.notebook)
self.housing = HousingFactory()
def test_add_review_to_housing(self):
note = Note.objects.create(
housing=self.housing,
text='contacted owner'
)
self.assertEqual(note.text, 'contacted owner')
self.assertIn(str(note.datetime), str(note))
| {
"content_hash": "930e18a36f634697e0aca0ce4eda3a0b",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 63,
"avg_line_length": 31.476190476190474,
"alnum_prop": 0.6388048411497731,
"repo_name": "marcwebbie/hnotebook",
"id": "0cb4b3f4b3e3a905ddac43d6b4b34a44e822fc8c",
"size": "2644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hnotebook/notebooks/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26675"
}
],
"symlink_target": ""
} |
"""This app contains the views and models for expression data
Expression Objects
------------------
For microarray results (metadata in :class:`~experiments.models.MicroArrayExperiment`) there is only *gene* level data, as specified by the specific probe used.
For RNAseq results (metadata in :class:`~experiments.models.mRNASeqExperiment`), there is aggregated data at the level of the *gene*, *transcript*, *exon*, *promoter* and *splice site*.
Currently we are only able to work with gene level data.
Types of Data
-------------
The database can contain two types of data:
* SampleData level data, such as how many (hopefully normalized) counts are in each sample for each gene.
* ExperimentData, which includes the average counts for each group as well as statistical tests for differential expression.
""" | {
"content_hash": "319d67e09d583ad0e0dd36d037f0c745",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 185,
"avg_line_length": 47.94117647058823,
"alnum_prop": 0.7521472392638037,
"repo_name": "davebridges/expression-data-server",
"id": "24fa6daee42e83a11bfb3a089f57778b3b32c239",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expression_data/data/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "50811"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
if sys.version >= '3':
long = int
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
from pyspark.sql import Row
# $example off$
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("ALSExample")\
.getOrCreate()
# $example on$
lines = spark.read.text("data/mllib/als/sample_movielens_ratings.txt").rdd
parts = lines.map(lambda row: row.value.split("::"))
ratingsRDD = parts.map(lambda p: Row(userId=int(p[0]), movieId=int(p[1]),
rating=float(p[2]), timestamp=long(p[3])))
ratings = spark.createDataFrame(ratingsRDD)
(training, test) = ratings.randomSplit([0.8, 0.2])
# Build the recommendation model using ALS on the training data
# Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
als = ALS(maxIter=5, regParam=0.01, userCol="userId", itemCol="movieId", ratingCol="rating",
coldStartStrategy="drop")
model = als.fit(training)
# Evaluate the model by computing the RMSE on the test data
predictions = model.transform(test)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
print("Root-mean-square error = " + str(rmse))
# Generate top 10 movie recommendations for each user
userRecs = model.recommendForAllUsers(10)
# Generate top 10 user recommendations for each movie
movieRecs = model.recommendForAllItems(10)
# $example off$
userRecs.show()
movieRecs.show()
spark.stop()
| {
"content_hash": "20c23214d5d748889df8b8f8c1e0dc75",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 96,
"avg_line_length": 36.03921568627451,
"alnum_prop": 0.6414581066376496,
"repo_name": "wangyixiaohuihui/spark2-annotation",
"id": "dede3f39431678277ddefa853c1efdc84d603616",
"size": "2638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/src/main/python/ml/als_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33815"
},
{
"name": "Batchfile",
"bytes": "24294"
},
{
"name": "C",
"bytes": "1542"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "10012"
},
{
"name": "HiveQL",
"bytes": "1828674"
},
{
"name": "Java",
"bytes": "3737029"
},
{
"name": "JavaScript",
"bytes": "143063"
},
{
"name": "Makefile",
"bytes": "7980"
},
{
"name": "PLpgSQL",
"bytes": "9666"
},
{
"name": "PowerShell",
"bytes": "3751"
},
{
"name": "Python",
"bytes": "2248750"
},
{
"name": "R",
"bytes": "1027534"
},
{
"name": "Roff",
"bytes": "14420"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "22897473"
},
{
"name": "Shell",
"bytes": "156941"
},
{
"name": "Thrift",
"bytes": "33665"
},
{
"name": "q",
"bytes": "147332"
}
],
"symlink_target": ""
} |
from flask_restful_swagger import swagger
from sqlalchemy import bindparam
from manager_rest import manager_exceptions
from manager_rest.rest import (
resources_v1,
rest_decorators,
)
from manager_rest.storage.models_base import db
from manager_rest.storage.resource_models import (
Deployment,
Execution,
Event,
Log,
)
from manager_rest.storage import ListResult
class Events(resources_v1.Events):
"""Events resource.
Through the events endpoint a user can retrieve both events and logs as
stored in the SQL database.
"""
@swagger.operation(
responseclass='List[Event]',
nickname="list events",
notes='Returns a list of events for optionally provided filters'
)
@rest_decorators.exceptions_handled
@rest_decorators.marshal_events
@rest_decorators.create_filters()
@rest_decorators.paginate
@rest_decorators.rangeable
@rest_decorators.projection
@rest_decorators.sortable()
def get(self, _include=None, filters=None,
pagination=None, sort=None, range_filters=None, **kwargs):
"""List events using a SQL backend.
:param _include:
Projection used to get records from database (not currently used)
:type _include: list(str)
:param filters:
Filter selection.
It's used to decide if events:
{'type': ['cloudify_event']}
or both events and logs should be returned:
{'type': ['cloudify_event', 'cloudify_log']}
Also it's used to get only events for a particular execution:
{'execution_id': '<some uuid>'}
:type filters: dict(str, str)
:param pagination:
Parameters used to limit results returned in a single query.
Expected values `size` and `offset` are mapped into SQL as `LIMIT`
and `OFFSET`.
:type pagination: dict(str, int)
:param sort:
Result sorting order. The only allowed and expected value is to
sort by timestamp in ascending order:
{'timestamp': 'asc'}
:type sort: dict(str, str)
:returns: Events that match the conditions passed as arguments
:rtype: :class:`manager_rest.storage.storage_manager.ListResult`
:param range_filters:
Apparently was used to select a timestamp interval. It's not
currently used.
:type range_filters: dict(str)
:returns: Events found in the SQL backend
:rtype: :class:`manager_rest.storage.storage_manager.ListResult`
"""
size = pagination.get('size', self.DEFAULT_SEARCH_SIZE)
offset = pagination.get('offset', 0)
params = {
'limit': size,
'offset': offset,
}
count_query = self._build_count_query(filters, range_filters,
self.current_tenant.id)
total = count_query.params(**params).scalar()
select_query = self._build_select_query(filters, sort, range_filters,
self.current_tenant.id)
results = [
self._map_event_to_dict(_include, event)
for event in select_query.params(**params).all()
]
metadata = {
'pagination': {
'size': size,
'offset': offset,
'total': total,
}
}
return ListResult(results, metadata)
@rest_decorators.exceptions_handled
def post(self):
raise manager_exceptions.MethodNotAllowedError()
@swagger.operation(
responseclass='List[Event]',
nickname="delete events",
notes='Deletes events according to a passed Deployment ID'
)
@rest_decorators.exceptions_handled
@rest_decorators.marshal_events
@rest_decorators.create_filters()
@rest_decorators.paginate
@rest_decorators.rangeable
@rest_decorators.projection
@rest_decorators.sortable()
def delete(self, filters=None, pagination=None, sort=None,
range_filters=None, **kwargs):
"""Delete events/logs connected to a certain Deployment ID."""
if not isinstance(filters, dict) or 'type' not in filters:
raise manager_exceptions.BadParametersError(
'Filter by type is expected')
if 'cloudify_event' not in filters['type']:
raise manager_exceptions.BadParametersError(
'At least `type=cloudify_event` filter is expected')
executions_query = (
db.session.query(Execution._storage_id)
.filter(
Execution._deployment_fk == Deployment._storage_id,
Deployment.id == bindparam('deployment_id'),
Execution._tenant_id == bindparam('tenant_id')
)
)
params = {
'deployment_id': filters['deployment_id'][0],
'tenant_id': self.current_tenant.id
}
delete_event_query = (
db.session.query(Event)
.filter(
Event._execution_fk.in_(executions_query),
Event._tenant_id == bindparam('tenant_id')
)
.params(**params)
)
total = delete_event_query.delete(synchronize_session=False)
if 'cloudify_log' in filters['type']:
delete_log_query = (
db.session.query(Log)
.filter(
Log._execution_fk.in_(executions_query),
Log._tenant_id == bindparam('tenant_id')
)
.params(**params)
)
total += delete_log_query.delete('fetch')
metadata = {
'pagination': dict(pagination, total=total)
}
# Commit bulk row deletions to database
db.session.commit()
# We don't really want to return all of the deleted events,
# so it's a bit of a hack to return the deleted element count.
return ListResult([total], metadata)
| {
"content_hash": "c44a0b1a3c14179b0bea356da0c7d9b4",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 78,
"avg_line_length": 34.61363636363637,
"alnum_prop": 0.5838804990151018,
"repo_name": "isaac-s/cloudify-manager",
"id": "a44b77a2fd7bdad054b33bb1f2ad95bf50fdab7a",
"size": "6732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest-service/manager_rest/rest/resources_v2/events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Mako",
"bytes": "541"
},
{
"name": "Python",
"bytes": "1793118"
},
{
"name": "Ruby",
"bytes": "40193"
},
{
"name": "Shell",
"bytes": "41526"
}
],
"symlink_target": ""
} |
from threading import Thread, Lock, Semaphore
# current_thread for python 2.6, currentThread for python 2.5
try:
from threading import current_thread
except ImportError:
from threading import currentThread as current_thread
# Queue for python 2, queue for python 3
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
import struct
import socket
import types
import sys
import time
# use normal tuples instead of namedtuples in python version below 2.6
if sys.hexversion < 0x02060000:
def namedtuple(typename, field_names, verbose=False, rename=False):
def ntuple(*args):
return args
return ntuple
else:
from collections import namedtuple
def get_uid_from_data(data):
return struct.unpack('<I', data[0:4])[0]
def get_length_from_data(data):
return struct.unpack('<B', data[4:5])[0]
def get_function_id_from_data(data):
return struct.unpack('<B', data[5:6])[0]
def get_sequence_number_from_data(data):
return (struct.unpack('<B', data[6:7])[0] >> 4) & 0x0F
def get_error_code_from_data(data):
return (struct.unpack('<B', data[7:8])[0] >> 6) & 0x03
BASE58 = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
def base58encode(value):
encoded = ''
while value >= 58:
div, mod = divmod(value, 58)
encoded = BASE58[mod] + encoded
value = div
encoded = BASE58[value] + encoded
return encoded
def base58decode(encoded):
value = 0
column_multiplier = 1
for c in encoded[::-1]:
column = BASE58.index(c)
value += column * column_multiplier
column_multiplier *= 58
return value
def uid64_to_uid32(uid64):
value1 = uid64 & 0xFFFFFFFF
value2 = (uid64 >> 32) & 0xFFFFFFFF
uid32 = (value1 & 0x00000FFF)
uid32 |= (value1 & 0x0F000000) >> 12
uid32 |= (value2 & 0x0000003F) << 16
uid32 |= (value2 & 0x000F0000) << 6
uid32 |= (value2 & 0x3F000000) << 2
return uid32
class Error(Exception):
TIMEOUT = -1
NOT_ADDED = -6 # obsolete since v2.0
ALREADY_CONNECTED = -7
NOT_CONNECTED = -8
INVALID_PARAMETER = -9
NOT_SUPPORTED = -10
UNKNOWN_ERROR_CODE = -11
def __init__(self, value, description):
self.value = value
self.description = description
def __str__(self):
return str(self.value) + ': ' + str(self.description)
class Device:
RESPONSE_EXPECTED_INVALID_FUNCTION_ID = 0
RESPONSE_EXPECTED_ALWAYS_TRUE = 1 # getter
RESPONSE_EXPECTED_ALWAYS_FALSE = 2 # callback
RESPONSE_EXPECTED_TRUE = 3 # setter
RESPONSE_EXPECTED_FALSE = 4 # setter, default
def __init__(self, uid, ipcon):
"""
Creates the device object with the unique device ID *uid* and adds
it to the IPConnection *ipcon*.
"""
uid_ = base58decode(uid)
if uid_ > 0xFFFFFFFF:
uid_ = uid64_to_uid32(uid_)
self.uid = uid_
self.ipcon = ipcon
self.api_version = (0, 0, 0)
self.registered_callbacks = {}
self.callback_formats = {}
self.expected_response_function_id = None # protected by request_lock
self.expected_response_sequence_number = None # protected by request_lock
self.response_queue = Queue()
self.request_lock = Lock()
self.auth_key = None
self.response_expected = [Device.RESPONSE_EXPECTED_INVALID_FUNCTION_ID] * 256
self.response_expected[IPConnection.FUNCTION_ENUMERATE] = Device.RESPONSE_EXPECTED_ALWAYS_FALSE
self.response_expected[IPConnection.FUNCTION_ADC_CALIBRATE] = Device.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[IPConnection.FUNCTION_GET_ADC_CALIBRATION] = Device.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[IPConnection.FUNCTION_READ_BRICKLET_UID] = Device.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[IPConnection.FUNCTION_WRITE_BRICKLET_UID] = Device.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[IPConnection.FUNCTION_READ_BRICKLET_PLUGIN] = Device.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[IPConnection.FUNCTION_WRITE_BRICKLET_PLUGIN] = Device.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[IPConnection.CALLBACK_ENUMERATE] = Device.RESPONSE_EXPECTED_ALWAYS_FALSE
ipcon.devices[self.uid] = self # FIXME: maybe use a weakref here
def get_api_version(self):
"""
Returns the API version (major, minor, revision) of the bindings for
this device.
"""
return self.api_version
def get_response_expected(self, function_id):
"""
Returns the response expected flag for the function specified by the
*function_id* parameter. It is *true* if the function is expected to
send a response, *false* otherwise.
For getter functions this is enabled by default and cannot be disabled,
because those functions will always send a response. For callback
configuration functions it is enabled by default too, but can be
disabled via the set_response_expected function. For setter functions
it is disabled by default and can be enabled.
Enabling the response expected flag for a setter function allows to
detect timeouts and other error conditions calls of this setter as
well. The device will then send a response for this purpose. If this
flag is disabled for a setter function then no response is send and
errors are silently ignored, because they cannot be detected.
"""
if function_id < 0 or function_id >= len(self.response_expected):
raise ValueError('Function ID {0} out of range'.format(function_id))
flag = self.response_expected[function_id]
if flag == Device.RESPONSE_EXPECTED_INVALID_FUNCTION_ID:
raise ValueError('Invalid function ID {0}'.format(function_id))
return flag in [Device.RESPONSE_EXPECTED_ALWAYS_TRUE, Device.RESPONSE_EXPECTED_TRUE]
def set_response_expected(self, function_id, response_expected):
"""
Changes the response expected flag of the function specified by the
*function_id* parameter. This flag can only be changed for setter
(default value: *false*) and callback configuration functions
(default value: *true*). For getter functions it is always enabled
and callbacks it is always disabled.
Enabling the response expected flag for a setter function allows to
detect timeouts and other error conditions calls of this setter as
well. The device will then send a response for this purpose. If this
flag is disabled for a setter function then no response is send and
errors are silently ignored, because they cannot be detected.
"""
if function_id < 0 or function_id >= len(self.response_expected):
raise ValueError('Function ID {0} out of range'.format(function_id))
flag = self.response_expected[function_id]
if flag == Device.RESPONSE_EXPECTED_INVALID_FUNCTION_ID:
raise ValueError('Invalid function ID {0}'.format(function_id))
if flag in [Device.RESPONSE_EXPECTED_ALWAYS_TRUE, Device.RESPONSE_EXPECTED_ALWAYS_FALSE]:
raise ValueError('Response Expected flag cannot be changed for function ID {0}'.format(function_id))
if bool(response_expected):
self.response_expected[function_id] = Device.RESPONSE_EXPECTED_TRUE
else:
self.response_expected[function_id] = Device.RESPONSE_EXPECTED_FALSE
def set_response_expected_all(self, response_expected):
"""
Changes the response expected flag for all setter and callback
configuration functions of this device at once.
"""
if bool(response_expected):
flag = Device.RESPONSE_EXPECTED_TRUE
else:
flag = Device.RESPONSE_EXPECTED_FALSE
for i in range(len(self.response_expected)):
if self.response_expected[i] in [Device.RESPONSE_EXPECTED_TRUE, Device.RESPONSE_EXPECTED_FALSE]:
self.response_expected[i] = flag
class IPConnection:
FUNCTION_ENUMERATE = 254
FUNCTION_ADC_CALIBRATE = 251
FUNCTION_GET_ADC_CALIBRATION = 250
FUNCTION_READ_BRICKLET_UID = 249
FUNCTION_WRITE_BRICKLET_UID = 248
FUNCTION_READ_BRICKLET_PLUGIN = 247
FUNCTION_WRITE_BRICKLET_PLUGIN = 246
FUNCTION_DISCONNECT_PROBE = 128
CALLBACK_ENUMERATE = 253
CALLBACK_CONNECTED = 0
CALLBACK_DISCONNECTED = 1
BROADCAST_UID = 0
PLUGIN_CHUNK_SIZE = 32
# enumeration_type parameter to the enumerate callback
ENUMERATION_TYPE_AVAILABLE = 0
ENUMERATION_TYPE_CONNECTED = 1
ENUMERATION_TYPE_DISCONNECTED = 2
# connect_reason parameter to the connected callback
CONNECT_REASON_REQUEST = 0
CONNECT_REASON_AUTO_RECONNECT = 1
# disconnect_reason parameter to the disconnected callback
DISCONNECT_REASON_REQUEST = 0
DISCONNECT_REASON_ERROR = 1
DISCONNECT_REASON_SHUTDOWN = 2
# returned by get_connection_state
CONNECTION_STATE_DISCONNECTED = 0
CONNECTION_STATE_CONNECTED = 1
CONNECTION_STATE_PENDING = 2 # auto-reconnect in process
QUEUE_EXIT = 0
QUEUE_META = 1
QUEUE_PACKET = 2
DISCONNECT_PROBE_INTERVAL = 5
class CallbackContext:
def __init__(self):
self.queue = None
self.thread = None
self.packet_dispatch_allowed = False
self.lock = None
def __init__(self):
"""
Creates an IP Connection object that can be used to enumerate the available
devices. It is also required for the constructor of Bricks and Bricklets.
"""
self.host = None
self.port = None
self.timeout = 2.5
self.auto_reconnect = True
self.auto_reconnect_allowed = False
self.auto_reconnect_pending = False
self.sequence_number_lock = Lock()
self.next_sequence_number = 0 # protected by sequence_number_lock
self.auth_key = None
self.devices = {}
self.registered_callbacks = {}
self.socket = None # protected by socket_lock
self.socket_id = 0 # protected by socket_lock
self.socket_lock = Lock()
self.receive_flag = False
self.receive_thread = None
self.callback = None
self.disconnect_probe_flag = False
self.disconnect_probe_queue = None
self.disconnect_probe_thread = None
self.waiter = Semaphore()
def connect(self, host, port):
"""
Creates a TCP/IP connection to the given *host* and *port*. The host
and port can point to a Brick Daemon or to a WIFI/Ethernet Extension.
Devices can only be controlled when the connection was established
successfully.
Blocks until the connection is established and throws an exception if
there is no Brick Daemon or WIFI/Ethernet Extension listening at the
given host and port.
"""
with self.socket_lock:
if self.socket is not None:
raise Error(Error.ALREADY_CONNECTED,
'Already connected to {0}:{1}'.format(self.host, self.port))
self.host = host
self.port = port
self.connect_unlocked(False)
def disconnect(self):
"""
Disconnects the TCP/IP connection from the Brick Daemon or the
WIFI/Ethernet Extension.
"""
with self.socket_lock:
self.auto_reconnect_allowed = False
if self.auto_reconnect_pending:
# abort potentially pending auto reconnect
self.auto_reconnect_pending = False
else:
if self.socket is None:
raise Error(Error.NOT_CONNECTED, 'Not connected')
self.disconnect_unlocked()
# end callback thread
callback = self.callback
self.callback = None
# do this outside of socket_lock to allow calling (dis-)connect from
# the callbacks while blocking on the join call here
callback.queue.put((IPConnection.QUEUE_META,
(IPConnection.CALLBACK_DISCONNECTED,
IPConnection.DISCONNECT_REASON_REQUEST, None)))
callback.queue.put((IPConnection.QUEUE_EXIT, None))
if current_thread() is not callback.thread:
callback.thread.join()
def get_connection_state(self):
"""
Can return the following states:
- CONNECTION_STATE_DISCONNECTED: No connection is established.
- CONNECTION_STATE_CONNECTED: A connection to the Brick Daemon or
the WIFI/Ethernet Extension is established.
- CONNECTION_STATE_PENDING: IP Connection is currently trying to
connect.
"""
if self.socket is not None:
return IPConnection.CONNECTION_STATE_CONNECTED
elif self.auto_reconnect_pending:
return IPConnection.CONNECTION_STATE_PENDING
else:
return IPConnection.CONNECTION_STATE_DISCONNECTED
def set_auto_reconnect(self, auto_reconnect):
"""
Enables or disables auto-reconnect. If auto-reconnect is enabled,
the IP Connection will try to reconnect to the previously given
host and port, if the connection is lost.
Default value is *True*.
"""
self.auto_reconnect = bool(auto_reconnect)
if not self.auto_reconnect:
# abort potentially pending auto reconnect
self.auto_reconnect_allowed = False
def get_auto_reconnect(self):
"""
Returns *true* if auto-reconnect is enabled, *false* otherwise.
"""
return self.auto_reconnect
def set_timeout(self, timeout):
"""
Sets the timeout in seconds for getters and for setters for which the
response expected flag is activated.
Default timeout is 2.5.
"""
timeout = float(timeout)
if timeout < 0:
raise ValueError('Timeout cannot be negative')
self.timeout = timeout
def get_timeout(self):
"""
Returns the timeout as set by set_timeout.
"""
return self.timeout
def enumerate(self):
"""
Broadcasts an enumerate request. All devices will respond with an
enumerate callback.
"""
request, _, _ = self.create_packet_header(None, 8, IPConnection.FUNCTION_ENUMERATE)
self.send(request)
def wait(self):
"""
Stops the current thread until unwait is called.
This is useful if you rely solely on callbacks for events, if you want
to wait for a specific callback or if the IP Connection was created in
a thread.
Wait and unwait act in the same way as "acquire" and "release" of a
semaphore.
"""
self.waiter.acquire()
def unwait(self):
"""
Unwaits the thread previously stopped by wait.
Wait and unwait act in the same way as "acquire" and "release" of
a semaphore.
"""
self.waiter.release()
def register_callback(self, id, callback):
"""
Registers a callback with ID *id* to the function *callback*.
"""
self.registered_callbacks[id] = callback
def connect_unlocked(self, is_auto_reconnect):
# NOTE: assumes that socket_lock is locked
# create callback thread and queue
if self.callback is None:
try:
self.callback = IPConnection.CallbackContext()
self.callback.queue = Queue()
self.callback.packet_dispatch_allowed = False
self.callback.lock = Lock()
self.callback.thread = Thread(name='Callback-Processor',
target=self.callback_loop,
args=(self.callback, ))
self.callback.thread.daemon = True
self.callback.thread.start()
except:
self.callback = None
raise
# create and connect socket
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.connect((self.host, self.port))
self.socket_id += 1
except:
def cleanup():
self.socket = None
# end callback thread
if not is_auto_reconnect:
self.callback.queue.put((IPConnection.QUEUE_EXIT, None))
if current_thread() is not self.callback.thread:
self.callback.thread.join()
self.callback = None
cleanup()
raise
# create disconnect probe thread
try:
self.disconnect_probe_flag = True
self.disconnect_probe_queue = Queue()
self.disconnect_probe_thread = Thread(name='Disconnect-Prober',
target=self.disconnect_probe_loop,
args=(self.disconnect_probe_queue, ))
self.disconnect_probe_thread.daemon = True
self.disconnect_probe_thread.start()
except:
def cleanup():
self.disconnect_probe_thread = None
# close socket
self.socket.close()
self.socket = None
# end callback thread
if not is_auto_reconnect:
self.callback.queue.put((IPConnection.QUEUE_EXIT, None))
if current_thread() is not self.callback.thread:
self.callback.thread.join()
self.callback = None
cleanup()
raise
# create receive thread
self.callback.packet_dispatch_allowed = True
try:
self.receive_flag = True
self.receive_thread = Thread(name='Brickd-Receiver',
target=self.receive_loop,
args=(self.socket_id, ))
self.receive_thread.daemon = True
self.receive_thread.start()
except:
def cleanup():
self.disconnect_unlocked()
# end callback thread
if not is_auto_reconnect:
self.callback.queue.put((IPConnection.QUEUE_EXIT, None))
if current_thread() is not self.callback.thread:
self.callback.thread.join()
self.callback = None
cleanup()
raise
self.auto_reconnect_allowed = False
self.auto_reconnect_pending = False
if is_auto_reconnect:
connect_reason = IPConnection.CONNECT_REASON_AUTO_RECONNECT
else:
connect_reason = IPConnection.CONNECT_REASON_REQUEST
self.callback.queue.put((IPConnection.QUEUE_META,
(IPConnection.CALLBACK_CONNECTED,
connect_reason, None)))
def disconnect_unlocked(self):
# NOTE: assumes that socket_lock is locked
# end disconnect probe thread
self.disconnect_probe_queue.put(True)
self.disconnect_probe_thread.join() # FIXME: use a timeout?
self.disconnect_probe_thread = None
# stop dispatching packet callbacks before ending the receive
# thread to avoid timeout exceptions due to callback functions
# trying to call getters
if current_thread() is not self.callback.thread:
# FIXME: cannot hold callback lock here because this can
# deadlock due to an ordering problem with the socket lock
#with self.callback.lock:
if True:
self.callback.packet_dispatch_allowed = False
else:
self.callback.packet_dispatch_allowed = False
# end receive thread
self.receive_flag = False
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
if self.receive_thread is not None:
self.receive_thread.join() # FIXME: use a timeout?
self.receive_thread = None
# close socket
self.socket.close()
self.socket = None
def receive_loop(self, socket_id):
if sys.hexversion < 0x03000000:
pending_data = ''
else:
pending_data = bytes()
while self.receive_flag:
try:
data = self.socket.recv(8192)
except socket.error:
if self.receive_flag:
self.handle_disconnect_by_peer(IPConnection.DISCONNECT_REASON_ERROR, socket_id, False)
break
if len(data) == 0:
if self.receive_flag:
self.handle_disconnect_by_peer(IPConnection.DISCONNECT_REASON_SHUTDOWN, socket_id, False)
break
pending_data += data
while True:
if len(pending_data) < 8:
# Wait for complete header
break
length = get_length_from_data(pending_data)
if len(pending_data) < length:
# Wait for complete packet
break
packet = pending_data[0:length]
pending_data = pending_data[length:]
self.handle_response(packet)
def dispatch_meta(self, function_id, parameter, socket_id):
if function_id == IPConnection.CALLBACK_CONNECTED:
if IPConnection.CALLBACK_CONNECTED in self.registered_callbacks and \
self.registered_callbacks[IPConnection.CALLBACK_CONNECTED] is not None:
self.registered_callbacks[IPConnection.CALLBACK_CONNECTED](parameter)
elif function_id == IPConnection.CALLBACK_DISCONNECTED:
if parameter != IPConnection.DISCONNECT_REASON_REQUEST:
# need to do this here, the receive_loop is not allowed to
# hold the socket_lock because this could cause a deadlock
# with a concurrent call to the (dis-)connect function
with self.socket_lock:
# don't close the socket if it got disconnected or
# reconnected in the meantime
if self.socket is not None and self.socket_id == socket_id:
# end disconnect probe thread
self.disconnect_probe_queue.put(True)
self.disconnect_probe_thread.join() # FIXME: use a timeout?
self.disconnect_probe_thread = None
# close socket
self.socket.close()
self.socket = None
# FIXME: wait a moment here, otherwise the next connect
# attempt will succeed, even if there is no open server
# socket. the first receive will then fail directly
time.sleep(0.1)
if IPConnection.CALLBACK_DISCONNECTED in self.registered_callbacks and \
self.registered_callbacks[IPConnection.CALLBACK_DISCONNECTED] is not None:
self.registered_callbacks[IPConnection.CALLBACK_DISCONNECTED](parameter)
if parameter != IPConnection.DISCONNECT_REASON_REQUEST and \
self.auto_reconnect and self.auto_reconnect_allowed:
self.auto_reconnect_pending = True
retry = True
# block here until reconnect. this is okay, there is no
# callback to deliver when there is no connection
while retry:
retry = False
with self.socket_lock:
if self.auto_reconnect_allowed and self.socket is None:
try:
self.connect_unlocked(True)
except:
retry = True
else:
self.auto_reconnect_pending = False
if retry:
time.sleep(0.1)
def dispatch_packet(self, packet):
uid = get_uid_from_data(packet)
length = get_length_from_data(packet)
function_id = get_function_id_from_data(packet)
payload = packet[8:]
if function_id == IPConnection.CALLBACK_ENUMERATE and \
IPConnection.CALLBACK_ENUMERATE in self.registered_callbacks:
uid, connected_uid, position, hardware_version, \
firmware_version, device_identifier, enumeration_type = \
self.deserialize_data(payload, '8s 8s c 3B 3B H B')
cb = self.registered_callbacks[IPConnection.CALLBACK_ENUMERATE]
cb(uid, connected_uid, position, hardware_version,
firmware_version, device_identifier, enumeration_type)
return
if uid not in self.devices:
return
device = self.devices[uid]
if function_id in device.registered_callbacks and \
device.registered_callbacks[function_id] is not None:
cb = device.registered_callbacks[function_id]
form = device.callback_formats[function_id]
if len(form) == 0:
cb()
elif len(form) == 1:
cb(self.deserialize_data(payload, form))
else:
cb(*self.deserialize_data(payload, form))
def callback_loop(self, callback):
while True:
kind, data = callback.queue.get()
# FIXME: cannot hold callback lock here because this can
# deadlock due to an ordering problem with the socket lock
#with callback.lock:
if True:
if kind == IPConnection.QUEUE_EXIT:
break
elif kind == IPConnection.QUEUE_META:
self.dispatch_meta(*data)
elif kind == IPConnection.QUEUE_PACKET:
# don't dispatch callbacks when the receive thread isn't running
if callback.packet_dispatch_allowed:
self.dispatch_packet(data)
def disconnect_probe_loop(self, disconnect_probe_queue):
request, _, _ = self.create_packet_header(None, 8, IPConnection.FUNCTION_DISCONNECT_PROBE)
while True:
try:
disconnect_probe_queue.get(True, IPConnection.DISCONNECT_PROBE_INTERVAL)
break
except Empty:
pass
if self.disconnect_probe_flag:
with self.socket_lock:
try:
self.socket.send(request)
except socket.error:
self.handle_disconnect_by_peer(IPConnection.DISCONNECT_REASON_ERROR,
self.socket_id, False)
break
else:
self.disconnect_probe_flag = True
def deserialize_data(self, data, form):
ret = []
for f in form.split(' '):
f = '<' + f
length = struct.calcsize(f)
x = struct.unpack(f, data[:length])
if len(x) > 1:
if 'c' in f:
x = tuple([self.handle_deserialized_char(c) for c in x])
ret.append(x)
elif 'c' in f:
ret.append(self.handle_deserialized_char(x[0]))
elif 's' in f:
ret.append(self.handle_deserialized_string(x[0]))
else:
ret.append(x[0])
data = data[length:]
if len(ret) == 1:
return ret[0]
else:
return ret
def handle_deserialized_char(self, c):
if sys.hexversion >= 0x03000000:
c = c.decode('ascii')
return c
def handle_deserialized_string(self, s):
if sys.hexversion >= 0x03000000:
s = s.decode('ascii')
i = s.find(chr(0))
if i >= 0:
s = s[:i]
return s
def send(self, packet):
with self.socket_lock:
if self.socket is None:
raise Error(Error.NOT_CONNECTED, 'Not connected')
try:
self.socket.send(packet)
except socket.error:
self.handle_disconnect_by_peer(IPConnection.DISCONNECT_REASON_ERROR, None, True)
raise Error(Error.NOT_CONNECTED, 'Not connected')
self.disconnect_probe_flag = False
def send_request(self, device, function_id, data, form, form_ret):
length = 8 + struct.calcsize('<' + form)
request, response_expected, sequence_number = \
self.create_packet_header(device, length, function_id)
def pack_string(f, d):
if sys.hexversion < 0x03000000:
if type(d) == types.UnicodeType:
f = f.replace('s', 'B')
l = map(ord, d)
l += [0] * (int(f.replace('B', '')) - len(l))
return struct.pack('<' + f, *l)
else:
return struct.pack('<' + f, d)
else:
if isinstance(d, str):
return struct.pack('<' + f, bytes(map(ord, d)))
else:
return struct.pack('<' + f, d)
for f, d in zip(form.split(' '), data):
if len(f) > 1 and not 's' in f and not 'c' in f:
request += struct.pack('<' + f, *d)
elif 's' in f:
request += pack_string(f, d)
elif 'c' in f:
if len(f) > 1:
if int(f.replace('c', '')) != len(d):
raise ValueError('Incorrect char list length');
for k in d:
request += pack_string('c', k)
else:
request += pack_string(f, d)
else:
request += struct.pack('<' + f, d)
if response_expected:
with device.request_lock:
device.expected_response_function_id = function_id
device.expected_response_sequence_number = sequence_number
try:
self.send(request)
while True:
response = device.response_queue.get(True, self.timeout)
if function_id == get_function_id_from_data(response) and \
sequence_number == get_sequence_number_from_data(response):
# ignore old responses that arrived after the timeout expired, but before setting
# expected_response_function_id and expected_response_sequence_number back to None
break
except Empty:
msg = 'Did not receive response for function {0} in time'.format(function_id)
raise Error(Error.TIMEOUT, msg)
finally:
device.expected_response_function_id = None
device.expected_response_sequence_number = None
error_code = get_error_code_from_data(response)
if error_code == 0:
# no error
pass
elif error_code == 1:
msg = 'Got invalid parameter for function {0}'.format(function_id)
raise Error(Error.INVALID_PARAMETER, msg)
elif error_code == 2:
msg = 'Function {0} is not supported'.format(function_id)
raise Error(Error.NOT_SUPPORTED, msg)
else:
msg = 'Function {0} returned an unknown error'.format(function_id)
raise Error(Error.UNKNOWN_ERROR_CODE, msg)
if len(form_ret) > 0:
return self.deserialize_data(response[8:], form_ret)
else:
self.send(request)
def get_next_sequence_number(self):
with self.sequence_number_lock:
sequence_number = self.next_sequence_number + 1
self.next_sequence_number = sequence_number % 15
return sequence_number
def handle_response(self, packet):
self.disconnect_probe_flag = False
function_id = get_function_id_from_data(packet)
sequence_number = get_sequence_number_from_data(packet)
if sequence_number == 0 and function_id == IPConnection.CALLBACK_ENUMERATE:
if IPConnection.CALLBACK_ENUMERATE in self.registered_callbacks:
self.callback.queue.put((IPConnection.QUEUE_PACKET, packet))
return
uid = get_uid_from_data(packet)
if not uid in self.devices:
# Response from an unknown device, ignoring it
return
device = self.devices[uid]
if sequence_number == 0:
if function_id in device.registered_callbacks:
self.callback.queue.put((IPConnection.QUEUE_PACKET, packet))
return
if device.expected_response_function_id == function_id and \
device.expected_response_sequence_number == sequence_number:
device.response_queue.put(packet)
return
# Response seems to be OK, but can't be handled, most likely
# a callback without registered function
def handle_disconnect_by_peer(self, disconnect_reason, socket_id, disconnect_immediately):
# NOTE: assumes that socket_lock is locked if disconnect_immediately is true
self.auto_reconnect_allowed = True
if disconnect_immediately:
self.disconnect_unlocked()
self.callback.queue.put((IPConnection.QUEUE_META,
(IPConnection.CALLBACK_DISCONNECTED,
disconnect_reason, socket_id)))
def create_packet_header(self, device, length, function_id):
uid = IPConnection.BROADCAST_UID
sequence_number = self.get_next_sequence_number()
r_bit = 0
a_bit = 0
if device is not None:
uid = device.uid
if device.get_response_expected(function_id):
r_bit = 1
if device.auth_key is not None:
a_bit = 1
else:
if self.auth_key is not None:
a_bit = 1
sequence_number_and_options = \
(sequence_number << 4) | (r_bit << 3) | (a_bit << 2)
return (struct.pack('<IBBBB', uid, length, function_id,
sequence_number_and_options, 0),
bool(r_bit),
sequence_number)
def write_bricklet_plugin(self, device, port, position, plugin_chunk):
self.send_request(device,
IPConnection.FUNCTION_WRITE_BRICKLET_PLUGIN,
(port, position, plugin_chunk),
'c B 32B',
'')
def read_bricklet_plugin(self, device, port, position):
return self.send_request(device,
IPConnection.FUNCTION_READ_BRICKLET_PLUGIN,
(port, position),
'c B',
'32B')
def get_adc_calibration(self, device):
return self.send_request(device,
IPConnection.FUNCTION_GET_ADC_CALIBRATION,
(),
'',
'h h')
def adc_calibrate(self, device, port):
self.send_request(device,
IPConnection.FUNCTION_ADC_CALIBRATE,
(port,),
'c',
'')
def write_bricklet_uid(self, device, port, uid):
uid_int = base58decode(uid)
self.send_request(device,
IPConnection.FUNCTION_WRITE_BRICKLET_UID,
(port, uid_int),
'c I',
'')
def read_bricklet_uid(self, device, port):
uid_int = self.send_request(device,
IPConnection.FUNCTION_READ_BRICKLET_UID,
(port,),
'c',
'I')
return base58encode(uid_int)
| {
"content_hash": "1ce2f55db01776b51046e9d5885e40a6",
"timestamp": "",
"source": "github",
"line_count": 1012,
"max_line_length": 114,
"avg_line_length": 36.45059288537549,
"alnum_prop": 0.570212535241813,
"repo_name": "DeathPoison/tinker-cnc",
"id": "f2535ce580bc6bf7e6e9efb0a57f8d54889c2453",
"size": "37156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinkerforge/ip_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "515489"
}
],
"symlink_target": ""
} |
"""
Throughout my travels I've discovered that most people, including myself, do not
realise many things about our Planet's size. For example, the latitude and
longitude of certain regions (South America is much further east than the US)
or the relative size of countries (Japan is surprisingly long).
Thus, I've created this script to understand such things a bit better. It
compares the sizes of Japan and Europe, which is the most recent surprise
I came across.
The shape data were aquired from [Global Administrative Areas](http://www.gadm.org/country)
website. Thus, their **redistribution, or commercial use is not allowed without
prior permission**.
Created on Sun May 7 14:13:47 2017
@author: Alek
"""
from mpl_toolkits.basemap import Basemap
import numpy, shapefile, os, matplotlib.pyplot
matplotlib.pyplot.xkcd() # Here we go.
def plotPrefecture(*,shp,colour,bMap,axes,latOff=0,longOff=0,lwdth=0.5):
""" Plot a prefecture from a shapefile.
Kwargs
-------
* shp - shape as returned by :func:`shapefile.Reader.shapes`,
* colour - colour accepted by :func:`matplotlib.pyplot.Axes.plot',
* bMap - instance of :class:`mpl_toolkits.basemap.Basemap` used to project
the shape onto a map,
* axes - :class:`matplotlib.pyplot.Axes` instance where to plot,
* latOff,longOff - deg, by how much to offset the `shp` lattitudes and
longitudes before plotting,
* lwdth - line width as accepted by :func:`matplotlib.pyplot.Axes.plot'.
"""
if len(shp.parts)==1: # Only one region in this shape.
vertices=numpy.array(shp.points)
bMap.plot(vertices[:,0]+longOff,vertices[:,1]+latOff,color=colour,
lw=lwdth,ls='-',latlon=True,ax=axes)
else: # This shape has islands, disjoint regions and what-not.
for ip in range(len(shp.parts)): # For every part of the shape.
# Indices that get the slice with this part of the shape.
lower=shp.parts[ip]
if ip==len(shp.parts)-1:
upper=len(shp.points) # Last part.
else:
upper=shp.parts[ip+1] # Next part starts at idx parts[ip+1]
partVertices=numpy.array(shp.points[lower:upper])
bMap.plot(partVertices[:,0]+longOff,partVertices[:,1]+latOff,
color=colour,lw=lwdth,ls='-',latlon=True,ax=axes)
# Various font sizes.
ticksFontSize=18
labelsFontSizeSmall=20
labelsFontSize=30
titleFontSize=34
legendFontSize=20
matplotlib.rc('xtick',labelsize=ticksFontSize)
matplotlib.rc('ytick',labelsize=ticksFontSize)
cm=matplotlib.pyplot.cm.get_cmap('viridis')
# Read a shapefile with Japan's cartography data.
shapeRdr0=shapefile.Reader(os.path.join('borders','JPN_adm0')) # Country.
shapeRdr1=shapefile.Reader(os.path.join('borders','JPN_adm1')) # Prefectures.
shapeRdr2=shapefile.Reader(os.path.join('borders','JPN_adm2')) # Towns.
shape=shapeRdr0.shapes()[0]
if shape.shapeType != shapefile.POLYGON:
raise ValueError('Shape not polygon with shapeType={}'.format(shape.shapeType ))
vertices=numpy.array(shape.points) # 2D array of coordinates.
# Where to centre different maps and where to translate Japan to.
latJpn=37 # Where to centre one map, i.e. over Japan. Lat/lon in degrees.
lonJpn=138
latCtr=40 # Where to centre the Europe's map. Lat/lon in degrees.
lonCtr=10
dLonJ=10 # Plot Japan at these coordinates over the map of Europe.
dLatJ=50
' Mercator projection, a.k.a. "the things you learn in schools".'
fig,ax=matplotlib.pyplot.subplots(1,2,figsize=(16,8))
# The whole Planet.
mercMapP=Basemap(projection='merc',llcrnrlat=-80,urcrnrlat=80,llcrnrlon=-180,
urcrnrlon=180,lat_ts=10,ax=ax[0],resolution='c')
mercMapP.drawcoastlines(linewidth=0.5)
mercMapP.drawcountries(linewidth=0.25)
mercMapP.drawparallels(numpy.arange(-90.,91.,30.))
mercMapP.drawmeridians(numpy.arange(-180.,181.,60.))
ax[0].set_title(r'$Our\ Planet$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=1,bMap=mercMapP,axes=ax[0])
# Only Europe.
mercMapE=Basemap(projection='merc',llcrnrlat=30,urcrnrlat=75,llcrnrlon=-25,
urcrnrlon=40,lat_ts=10,ax=ax[1],resolution='l')
mercMapE.drawcoastlines(linewidth=0.5)
mercMapE.drawcountries(linewidth=0.25)
mercMapE.drawparallels(numpy.arange(mercMapE.latmin,mercMapE.latmax,10.))
mercMapE.drawmeridians(numpy.arange(mercMapE.lonmin,mercMapE.lonmax,15.))
ax[1].set_title(r'$Europe$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=mercMapE,axes=ax[1],
latOff=dLatJ-latJpn,longOff=dLonJ-lonJpn)
fig.show()
' One figure with orthonormal maps centred on Japan and Europe.'
fig,ax=matplotlib.pyplot.subplots(1,2,figsize=(16,8))
# Centred on Japan.
ortnMapJ=Basemap(projection='ortho',lat_0=latJpn,lon_0=lonJpn,resolution='c',
ax=ax[0])
ortnMapJ.drawcoastlines(linewidth=0.5)
ortnMapJ.drawcountries(linewidth=0.25)
ortnMapJ.drawmeridians(numpy.arange(0,360,30))
ortnMapJ.drawparallels(numpy.arange(-90,90,30))
ax[0].set_title(r'${}$'.format(shapeRdr0.records()[0][4]),fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=ortnMapJ,axes=ax[0])
# Plot all the prefectures.
cNorm=matplotlib.colors.Normalize(vmin=0,vmax=shapeRdr1.numRecords)
scalarMap=matplotlib.cm.ScalarMappable(norm=cNorm,cmap=cm)
prefectures=shapeRdr1.shapes()
prefRecords=shapeRdr1.records()
for i in range(shapeRdr1.numRecords):
if prefRecords[i][9]=='Prefecture':
plotPrefecture(shp=prefectures[i],colour=scalarMap.to_rgba(i),
lwdth=0.5,bMap=ortnMapJ,axes=ax[0])
# Centred on Europe.
ortnMapE=Basemap(projection='ortho',lat_0=latCtr,lon_0=lonCtr,resolution='c',
ax=ax[1])
ortnMapE.drawcoastlines(linewidth=0.5)
ortnMapE.drawcountries(linewidth=0.25)
ortnMapE.drawmeridians(numpy.arange(0,360,30))
ortnMapE.drawparallels(numpy.arange(-90,90,30))
ax[1].set_title(r'${}\ over\ Europe$'.format(shapeRdr0.records()[0][4]),
fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=ortnMapE,axes=ax[1],
latOff=dLatJ-latJpn,longOff=dLonJ-lonJpn)
fig.show()
' Japan and Kitakyushu overlaid on Europe.'
fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(16,8))
mercMapE=Basemap(projection='merc',llcrnrlat=30,urcrnrlat=75,llcrnrlon=-25,
urcrnrlon=40,lat_ts=10,ax=ax,resolution='l')
mercMapE.drawcoastlines(linewidth=0.5)
mercMapE.drawcountries(linewidth=0.25)
mercMapE.drawparallels(numpy.arange(mercMapE.latmin,mercMapE.latmax,10.))
mercMapE.drawmeridians(numpy.arange(mercMapE.lonmin,mercMapE.lonmax,15.))
ax.set_title(r'$Europe,\ true\ lat.$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=mercMapE,axes=ax,
latOff=0,longOff=dLonJ-lonJpn)
# Show annotation at the true latitude.
xKIT,yKIT=mercMapE.projtran(130.834730+dLonJ-lonJpn,33.8924837)
xTXT,yTXT=mercMapE.projtran(110.834730+dLonJ-lonJpn,45.8924837)
ax.scatter([xKIT],[yKIT],s=50,c='crimson')
ax.annotate('Here', xy=(xKIT,yKIT),xytext=(xTXT,yTXT),color='crimson',
arrowprops=dict(facecolor='crimson', shrink=0.05))
fig.show() | {
"content_hash": "6704ad1ba64763207c1effd94ce0fbb2",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 91,
"avg_line_length": 43.61349693251534,
"alnum_prop": 0.7266844844563229,
"repo_name": "AleksanderLidtke/XKCD",
"id": "0289963af258cded39c2b0dcfaad0d26f59c24b0",
"size": "7133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JapanSize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17635"
}
],
"symlink_target": ""
} |
import logging
from .base import BaseResource
from .map import MapCollection
from .layer import LayerCollection
class User(BaseResource):
def __init__(self, *args):
super(User, self).__init__(*args)
self.username = None
self.maps = MapCollection()
self.layers = LayerCollection()
def on_get(self, resource):
if 'username' in resource:
self.username = resource['username']
if 'resources' in resource:
rs = resource['resources']
if 'maps' in rs and 'url' in rs['maps']:
self.maps.set_resource_url(rs['maps']['url'])
if 'layers' in rs and 'url' in rs['layers']:
self.layers.set_resource_url(rs['layers']['url'])
| {
"content_hash": "90f2228f74332eb4afe04eb9dcbaca14",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 32.56521739130435,
"alnum_prop": 0.5874499332443258,
"repo_name": "rjw57/foldbeam",
"id": "27cbd6ea71aecca7e14fc2423354cc1c16871509",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webui/client/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7278"
},
{
"name": "CSS",
"bytes": "309998"
},
{
"name": "JavaScript",
"bytes": "480923"
},
{
"name": "Python",
"bytes": "286208"
},
{
"name": "Shell",
"bytes": "4783"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.