code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.7.4, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import ContainerRegistryConfiguration
from .operations import AuthenticationOperations, ContainerRegistryBlobOperations, ContainerRegistryOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.rest import HttpRequest, HttpResponse
class ContainerRegistry(object):
"""Metadata API definition for the Azure Container Registry runtime.
:ivar container_registry: ContainerRegistryOperations operations
:vartype container_registry: container_registry.operations.ContainerRegistryOperations
:ivar container_registry_blob: ContainerRegistryBlobOperations operations
:vartype container_registry_blob: container_registry.operations.ContainerRegistryBlobOperations
:ivar authentication: AuthenticationOperations operations
:vartype authentication: container_registry.operations.AuthenticationOperations
:param url: Registry login URL.
:type url: str
:keyword api_version: Api Version. The default value is "2021-07-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
_base_url = '{url}'
self._config = ContainerRegistryConfiguration(url=url, **kwargs)
self._client = PipelineClient(base_url=_base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.container_registry = ContainerRegistryOperations(self._client, self._config, self._serialize, self._deserialize)
self.container_registry_blob = ContainerRegistryBlobOperations(self._client, self._config, self._serialize, self._deserialize)
self.authentication = AuthenticationOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
path_format_arguments = {
"url": self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ContainerRegistry
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/_container_registry.py | Python | mit | 4,280 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PeerExpressRouteCircuitConnectionsOperations(object):
"""PeerExpressRouteCircuitConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PeerExpressRouteCircuitConnection"
"""Gets the specified Peer Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the peer express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeerExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.PeerExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PeerExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PeerExpressRouteCircuitConnectionListResult"]
"""Gets all global reach peer connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeerExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.PeerExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PeerExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_peer_express_route_circuit_connections_operations.py | Python | mit | 9,496 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from azure.core.exceptions import HttpResponseError
from .._deserialize import (
process_storage_error)
from .._shared.response_handlers import return_response_headers
from .._shared.uploads_async import (
upload_data_chunks,
DataLakeFileChunkUploader, upload_substream_blocks)
def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument
return any([
modified_access_conditions.if_modified_since,
modified_access_conditions.if_unmodified_since,
modified_access_conditions.if_none_match,
modified_access_conditions.if_match
])
async def upload_datalake_file( # pylint: disable=unused-argument
client=None,
stream=None,
length=None,
overwrite=None,
validate_content=None,
max_concurrency=None,
file_settings=None,
**kwargs):
try:
if length == 0:
return {}
properties = kwargs.pop('properties', None)
umask = kwargs.pop('umask', None)
permissions = kwargs.pop('permissions', None)
path_http_headers = kwargs.pop('path_http_headers', None)
modified_access_conditions = kwargs.pop('modified_access_conditions', None)
chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
if not overwrite:
# if customers didn't specify access conditions, they cannot flush data to existing file
if not _any_conditions(modified_access_conditions):
modified_access_conditions.if_none_match = '*'
if properties or umask or permissions:
raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
if overwrite:
response = await client.create(
resource='file',
path_http_headers=path_http_headers,
properties=properties,
modified_access_conditions=modified_access_conditions,
umask=umask,
permissions=permissions,
cls=return_response_headers,
**kwargs)
# this modified_access_conditions will be applied to flush_data to make sure
# no other flush between create and the current flush
modified_access_conditions.if_match = response['etag']
modified_access_conditions.if_none_match = None
modified_access_conditions.if_modified_since = None
modified_access_conditions.if_unmodified_since = None
use_original_upload_path = file_settings.use_byte_buffer or \
validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \
hasattr(stream, 'seekable') and not stream.seekable() or \
not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
if use_original_upload_path:
await upload_data_chunks(
service=client,
uploader_class=DataLakeFileChunkUploader,
total_size=length,
chunk_size=chunk_size,
stream=stream,
max_concurrency=max_concurrency,
validate_content=validate_content,
**kwargs)
else:
await upload_substream_blocks(
service=client,
uploader_class=DataLakeFileChunkUploader,
total_size=length,
chunk_size=chunk_size,
max_concurrency=max_concurrency,
stream=stream,
validate_content=validate_content,
**kwargs
)
return await client.flush_data(position=length,
path_http_headers=path_http_headers,
modified_access_conditions=modified_access_conditions,
close=True,
cls=return_response_headers,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
| Azure/azure-sdk-for-python | sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py | Python | mit | 4,443 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._blob_containers_operations import build_clear_legal_hold_request, build_create_or_update_immutability_policy_request, build_create_request, build_delete_immutability_policy_request, build_delete_request, build_extend_immutability_policy_request, build_get_immutability_policy_request, build_get_request, build_lease_request, build_list_request, build_lock_immutability_policy_request, build_set_legal_hold_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BlobContainersOperations:
"""BlobContainersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
maxpagesize: Optional[str] = None,
filter: Optional[str] = None,
include: Optional[Union[str, "_models.ListContainersInclude"]] = None,
**kwargs: Any
) -> AsyncIterable["_models.ListContainerItems"]:
"""Lists all containers and does not support a prefix like data plane. Also SRP today does not
return continuation token.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param maxpagesize: Optional. Specified maximum number of containers that can be included in
the list.
:type maxpagesize: str
:param filter: Optional. When specified, only container names starting with the filter will be
listed.
:type filter: str
:param include: Optional, used to include the properties for soft deleted blob containers.
:type include: str or ~azure.mgmt.storage.v2019_06_01.models.ListContainersInclude
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListContainerItems or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2019_06_01.models.ListContainerItems]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainerItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
include=include,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
include=include,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListContainerItems", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers'} # type: ignore
@distributed_trace_async
async def create(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs: Any
) -> "_models.BlobContainer":
"""Creates a new container under the specified account as described by request body. The container
resource includes metadata and properties for that container. It does not include a list of the
blobs contained by the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties of the blob container to create.
:type blob_container: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(blob_container, 'BlobContainer')
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BlobContainer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs: Any
) -> "_models.BlobContainer":
"""Updates container properties as specified in request body. Properties not mentioned in the
request will be unchanged. Update fails if the specified container doesn't already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties to update for the blob container.
:type blob_container: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(blob_container, 'BlobContainer')
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs: Any
) -> "_models.BlobContainer":
"""Gets properties of a specified container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs: Any
) -> None:
"""Deletes specified container under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
@distributed_trace_async
async def set_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs: Any
) -> "_models.LegalHold":
"""Sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold
follows an append pattern and does not clear out the existing tags that are not specified in
the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be set to a blob container.
:type legal_hold: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(legal_hold, 'LegalHold')
request = build_set_legal_hold_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.set_legal_hold.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold'} # type: ignore
@distributed_trace_async
async def clear_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs: Any
) -> "_models.LegalHold":
"""Clears legal hold tags. Clearing the same or non-existent tag results in an idempotent
operation. ClearLegalHold clears out only the specified tags in the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be clear from a blob container.
:type legal_hold: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(legal_hold, 'LegalHold')
request = build_clear_legal_hold_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.clear_legal_hold.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clear_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold'} # type: ignore
@distributed_trace_async
async def create_or_update_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Creates or updates an unlocked immutability policy. ETag in If-Match is honored if given but
not required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be created or updated to a blob
container.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
_json = None
request = build_create_or_update_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self.create_or_update_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
@distributed_trace_async
async def get_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Gets the existing immutability policy along with the corresponding ETag in response headers and
body.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
template_url=self.get_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
@distributed_trace_async
async def delete_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Aborts an unlocked immutability policy. The response of delete has
immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this
operation. Deleting a locked immutability policy is not allowed, the only way is to delete the
container after deleting all expired blobs inside the policy locked container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
template_url=self.delete_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
@distributed_trace_async
async def lock_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is
ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_lock_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
template_url=self.lock_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lock_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock'} # type: ignore
@distributed_trace_async
async def extend_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only
action allowed on a Locked policy will be this action. ETag in If-Match is required for this
operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be extended for a blob
container.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
_json = None
request = build_extend_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
if_match=if_match,
json=_json,
template_url=self.extend_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
extend_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend'} # type: ignore
@distributed_trace_async
async def lease(
self,
resource_group_name: str,
account_name: str,
container_name: str,
parameters: Optional["_models.LeaseContainerRequest"] = None,
**kwargs: Any
) -> "_models.LeaseContainerResponse":
"""The Lease Container operation establishes and manages a lock on a container for delete
operations. The lock duration can be 15 to 60 seconds, or can be infinite.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param parameters: Lease Container request body.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.LeaseContainerRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LeaseContainerResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LeaseContainerResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LeaseContainerResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'LeaseContainerRequest')
else:
_json = None
request = build_lease_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.lease.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LeaseContainerResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease'} # type: ignore
| Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/aio/operations/_blob_containers_operations.py | Python | mit | 49,142 |
from decimal import Decimal
map_ones = {
0: "",
1: "One",
2: "Two",
3: "Three",
4: "Four",
5: "Five",
6: "Six",
7: "Seven",
8: "Eight",
9: "Nine",
}
map_tens = {
10: "Ten",
11: "Eleven",
12: "Twelve",
13: "Thirteen",
14: "Fourteen",
15: "Fifteen",
16: "Sixteen",
17: "Seventeen",
18: "Eighteen",
19: "Nineteen",
}
map_tenths = {
2: "Twenty",
3: "Thirty",
4: "Forty",
5: "Fifty",
6: "Sixty",
7: "Seventy",
8: "Eighty",
9: "Ninety",
}
def convert_ones(num):
"""
Convert ones number to word.
Parameters
----------
num: int
Single digit integer number
"""
if len(str(num)) > 1:
raise Exception("Must have at most 1 digit")
num = int(num)
return map_ones[num]
def convert_tenths(num):
"""
Convert tenths number to word.
Parameters
----------
num: int
Double digit integer number
"""
if len(str(num)) > 2:
raise Exception("Must have at most 2 digits")
num = int(num)
bases = ""
# less than 10
if num < 10:
return map_ones[num]
# 10-19
if 10 <= num < 20:
return map_tens[num]
# 20-99
first_num = map_tenths[int(str(num)[0])]
second_num = map_ones[int(str(num)[1])]
if not second_num:
return first_num
return first_num + " " + second_num
def get_dollar(hundredth, tenth, one, base):
"""
Given hundredth, tenth and one integer number for base (e.g. Billion, Million), return converted word
Parameters
----------
hundredth: int
Hundredth number
tenth: int
Tenth number
one: int
One number
base: string
Base value
"""
dollar_word = ""
if hundredth:
dollar_word += "{0} Hundred".format(convert_ones(hundredth))
# Add "And" if there's numbers after hundredths
if hundredth and (tenth or one):
dollar_word += " And "
if tenth or one:
dollar_word += "{0}".format(convert_tenths(int(str(tenth) + str(one))))
if base:
dollar_word += " {0}".format(base)
return dollar_word
def get_billion(hundredth, tenth, one):
return get_dollar(hundredth, tenth, one, "Billion")
def get_million(hundredth, tenth, one):
return get_dollar(hundredth, tenth, one, "Million")
def get_thousand(hundredth, tenth, one):
return get_dollar(hundredth, tenth, one, "Thousand")
def get_one(hundredth, tenth, one):
return get_dollar(hundredth, tenth, one, "")
def get_cent(tenth, one):
"""
Given tenth and one integer number (for cent), return converted word
Parameters
----------
tenth: int
Tenth number
one: int
One number
"""
cent_word = ""
if tenth or one:
cent_word += "{0}".format(convert_tenths(int(str(tenth) + str(one))))
if cent_word:
cent_word = "Cents {0} ".format(cent_word)
return cent_word
def get_index(val, index, default=0):
try:
return val[index]
except IndexError:
return default
def extract(num):
"""
Given a max 3 character number, extract and return hundredth, tenth and one value
Parameters
----------
num: string
Number in string
Return
----------
hundredth: int
Hundredth number
tenth: int
Tenth number
one: int
One number
"""
hundredth = 0
tenth = 0
one = 0
if len(num) == 3:
hundredth, tenth, one = int(num[0]), int(num[1]), int(num[2])
if len(num) == 2:
tenth, one = int(num[0]), int(num[1])
if len(num) == 1:
one = int(num[0])
return hundredth, tenth, one
def generate_dollar_word(num):
"""
Generate word for dollar
Parameters
----------
num: string
Dollar number in string
"""
word = ""
# at least 1 billion
if len(num) > 9:
billion_num = int(num[0:(len(num)-9)])
num = str(int(num) - (billion_num*int(1e9)))
hundredth, tenth, one = extract(str(billion_num))
word += "{0} ".format(get_billion(hundredth, tenth, one))
# at least 1 million
if len(num) > 6:
million_num = int(num[0:(len(num)-6)])
num = str(int(num) - (million_num*int(1e6)))
hundredth, tenth, one = extract(str(million_num))
word += "{0} ".format(get_million(hundredth, tenth, one))
# at least 1 thousand
if len(num) > 3:
thousand_num = int(num[0:(len(num)-3)])
num = str(int(num) - (thousand_num*int(1e3)))
hundredth, tenth, one = extract(str(thousand_num))
word += "{0} ".format(get_thousand(hundredth, tenth, one))
# at least 1
if int(num) and len(num) > 0:
one_num = int(num[0:len(num)])
num = str(int(num) - one_num)
hundredth, tenth, one = extract(str(one_num))
word += "{0} ".format(get_one(hundredth, tenth, one))
return word
def generate_cent_word(num):
"""
Generate word for cent
Parameters
----------
num: string
Cent number in string
"""
word = ""
hundredth, tenth, one = extract(str(num))
word += get_cent(tenth, one)
return word
def validate(amt):
# amt MUST be in string to avoid accidental round off
if Decimal(amt) > Decimal(str(1e11)):
raise Exception("Please enter an amount smaller than 100 billion")
if len(get_index(amt.split('.'), 1, "")) > 2:
raise Exception("Please enter an amount within 2 decimal place")
def generate_word(amt):
# remove commas and spaces from word
amt = amt.replace(",", "").replace(" ", "")
validate(amt)
amt = '{0:.2f}'.format(Decimal(amt))
amt_list = amt.split('.')
dollar_amt = get_index(amt_list, 0)
cent_amt = get_index(amt_list, 1)
dollar_word = generate_dollar_word(dollar_amt)
cent_word = generate_cent_word(cent_amt)
if not dollar_word:
return cent_word + "Only"
if not cent_word:
return dollar_word + "Only"
return dollar_word + "And " + cent_word + "Only"
| pirsquare/chequeconvert-python | chequeconvert/base.py | Python | mit | 6,111 |
import numpy as np
import torch
import os
import sys
import functools
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import init
import torch.nn.functional as F
import torchvision.models as M
class GANLoss(nn.Module):
def __init__(self, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
self.loss = nn.MSELoss()
def get_target_tensor(self, input, target_is_real):
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
def U_weight_init(ms):
for m in ms.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data = init.kaiming_normal(m.weight.data, a=0.2)
elif classname.find('ConvTranspose2d') != -1:
m.weight.data = init.kaiming_normal(m.weight.data)
print ('worked!') # TODO: kill this
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data = init.kaiming_normal(m.weight.data)
def LR_weight_init(ms):
for m in ms.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data = init.kaiming_normal(m.weight.data, a=0.2)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data = init.kaiming_normal(m.weight.data, a=0.2)
def R_weight_init(ms):
for m in ms.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data = init.kaiming_normal(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data = init.kaiming_normal(m.weight.data)
############################
# G network
###########################
# custom weights initialization called on netG
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def def_netG(ngf=64, norm='instance'):
norm_layer = get_norm_layer(norm_type=norm)
netG = UnetGenerator(ngf, norm_layer=norm_layer)
return netG
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, ngf, norm_layer):
super(UnetGenerator, self).__init__()
################ downS
self.down1 = nn.Conv2d(1, ngf // 2, kernel_size=4, stride=2, padding=1)
down = [nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2, padding=1), norm_layer(ngf)]
self.down2 = nn.Sequential(*down)
down = [nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 2)]
self.down3 = nn.Sequential(*down)
down = [nn.Conv2d(ngf * 2, ngf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 4)]
self.down4 = nn.Sequential(*down)
down = [nn.Conv2d(ngf * 4, ngf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 4)]
self.down5 = nn.Sequential(*down)
down = [nn.Conv2d(ngf * 4, ngf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 4)]
self.down6 = nn.Sequential(*down)
down = [nn.Conv2d(ngf * 4, ngf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 4)]
self.down7 = nn.Sequential(*down)
self.down8 = nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1)
################ down--up
up = [nn.ConvTranspose2d(ngf * 8 + 2048, ngf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 8)]
self.up8 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 12, ngf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 8)]
self.up7 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 12, ngf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 8)]
self.up6 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 12, ngf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 8)]
self.up5 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 12, ngf * 4, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 4)]
self.up4 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 6, ngf * 2, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 2)]
self.up3 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 3, ngf, kernel_size=4, stride=2, padding=1), norm_layer(ngf)]
self.up2 = nn.Sequential(*up)
self.up1 = nn.ConvTranspose2d(int(ngf * 1.5), 3, kernel_size=4, stride=2, padding=1)
self.linear = nn.Linear(4096, 2048)
U_weight_init(self)
def forward(self, input, VGG):
x1 = F.leaky_relu(self.down1(input), 0.2, True)
x2 = F.leaky_relu(self.down2(x1), 0.2, True)
x3 = F.leaky_relu(self.down3(x2), 0.2, True)
x4 = F.leaky_relu(self.down4(x3), 0.2, True)
x5 = F.leaky_relu(self.down5(x4), 0.2, True)
x6 = F.leaky_relu(self.down6(x5), 0.2, True)
x7 = F.leaky_relu(self.down7(x6), 0.2, True)
x8 = F.relu(self.down8(x7), True)
VGG = F.relu(self.linear(VGG), True)
x = F.relu(self.up8(torch.cat([x8, VGG.view(-1, 2048, 1, 1)], 1)), True)
x = F.relu(self.up7(torch.cat([x, x7], 1)), True)
x = F.relu(self.up6(torch.cat([x, x6], 1)), True)
x = F.relu(self.up5(torch.cat([x, x5], 1)), True)
x = F.relu(self.up4(torch.cat([x, x4], 1)), True)
x = F.relu(self.up3(torch.cat([x, x3], 1)), True)
x = F.relu(self.up2(torch.cat([x, x2], 1)), True)
x = F.tanh(self.up1(torch.cat([x, x1], 1)))
return x
############################
# D network
###########################
def def_netD(ndf=64, norm='batch'):
norm_layer = get_norm_layer(norm_type=norm)
netD = NLayerDiscriminator(ndf, norm_layer=norm_layer)
return netD
class NLayerDiscriminator(nn.Module):
def __init__(self, ndf, norm_layer=nn.BatchNorm2d):
super(NLayerDiscriminator, self).__init__()
kw = 4
padw = 1
self.ndf = ndf
sequence = [
nn.Conv2d(4, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * 1, ndf * 2,
kernel_size=kw, stride=2, padding=padw),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * 2, ndf * 4,
kernel_size=kw, stride=2, padding=padw),
norm_layer(ndf * 4),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * 4, ndf * 8,
kernel_size=kw, stride=1, padding=padw), # stride 1
norm_layer(ndf * 8),
nn.LeakyReLU(0.2, True)
]
self.model = nn.Sequential(*sequence)
self.linear = nn.Linear(4096, ndf * 8)
sequence = [
nn.Conv2d(ndf * 8, ndf * 8, kernel_size=kw, stride=1, padding=padw),
norm_layer(ndf * 8),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 8, 1, kernel_size=kw, stride=1, padding=padw),
]
self.final = nn.Sequential(*sequence)
LR_weight_init(self)
def forward(self, input, VGG):
x = self.model(input)
VGG = F.leaky_relu(self.linear(VGG), 0.2, True)
return self.final(x + VGG.view(-1, self.ndf * 8, 1, 1))
############################
# VGG feature
###########################
def def_netF():
vgg19 = M.vgg19()
vgg19.load_state_dict(torch.load('vgg19.pth'))
vgg19.classifier = nn.Sequential(
*list(vgg19.classifier.children())[:2]
)
for param in vgg19.parameters():
param.requires_grad = False
return vgg19
| orashi/PaintsPytorch | models/base_model.py | Python | mit | 9,627 |
# coding: utf-8
"""
Routines for printing a report.
"""
from __future__ import print_function, division, absolute_import
import sys
from collections import namedtuple
from contextlib import contextmanager
import textwrap
from .adapters import BACK, FRONT, PREFIX, SUFFIX, ANYWHERE
from .modifiers import QualityTrimmer, AdapterCutter
from .filters import (NoFilter, PairedNoFilter, TooShortReadFilter, TooLongReadFilter,
DiscardTrimmedFilter, DiscardUntrimmedFilter, Demultiplexer, NContentFilter)
class Statistics:
def __init__(self, n, total_bp1, total_bp2):
"""
n -- total number of reads
total_bp1 -- number of bases in first reads
total_bp2 -- number of bases in second reads (set to None for single-end data)
"""
self.n = n
self.total_bp = total_bp1
self.total_bp1 = total_bp1
if total_bp2 is None:
self.paired = False
else:
self.paired = True
self.total_bp2 = total_bp2
self.total_bp += total_bp2
def collect(self, adapters_pair, time, modifiers, modifiers2, writers):
self.time = max(time, 0.01)
self.too_short = None
self.too_long = None
self.written = 0
self.written_bp = [0, 0]
self.too_many_n = None
# Collect statistics from writers/filters
for w in writers:
if isinstance(w, (NoFilter, PairedNoFilter, Demultiplexer)) or isinstance(w.filter, (DiscardTrimmedFilter, DiscardUntrimmedFilter)):
self.written += w.written
if self.n > 0:
self.written_fraction = self.written / self.n
self.written_bp = self.written_bp[0] + w.written_bp[0], self.written_bp[1] + w.written_bp[1]
elif isinstance(w.filter, TooShortReadFilter):
self.too_short = w.filtered
elif isinstance(w.filter, TooLongReadFilter):
self.too_long = w.filtered
elif isinstance(w.filter, NContentFilter):
self.too_many_n = w.filtered
assert self.written is not None
# Collect statistics from modifiers
self.with_adapters = [0, 0]
self.quality_trimmed_bp = [0, 0]
self.did_quality_trimming = False
for i, modifiers_list in [(0, modifiers), (1, modifiers2)]:
for modifier in modifiers_list:
if isinstance(modifier, QualityTrimmer):
self.quality_trimmed_bp[i] = modifier.trimmed_bases
self.did_quality_trimming = True
elif isinstance(modifier, AdapterCutter):
self.with_adapters[i] += modifier.with_adapters
self.with_adapters_fraction = [ (v / self.n if self.n > 0 else 0) for v in self.with_adapters ]
self.quality_trimmed = sum(self.quality_trimmed_bp)
self.quality_trimmed_fraction = self.quality_trimmed / self.total_bp if self.total_bp > 0 else 0.0
self.total_written_bp = sum(self.written_bp)
self.total_written_bp_fraction = self.total_written_bp / self.total_bp if self.total_bp > 0 else 0.0
if self.n > 0:
if self.too_short is not None:
self.too_short_fraction = self.too_short / self.n
if self.too_long is not None:
self.too_long_fraction = self.too_long / self.n
if self.too_many_n is not None:
self.too_many_n_fraction = self.too_many_n / self.n
ADAPTER_TYPES = {
BACK: "regular 3'",
FRONT: "regular 5'",
PREFIX: "anchored 5'",
SUFFIX: "anchored 3'",
ANYWHERE: "variable 5'/3'"
}
def print_error_ranges(adapter_length, error_rate):
print("No. of allowed errors:")
prev = 0
for errors in range(1, int(error_rate * adapter_length) + 1):
r = int(errors / error_rate)
print("{0}-{1} bp: {2};".format(prev, r - 1, errors - 1), end=' ')
prev = r
if prev == adapter_length:
print("{0} bp: {1}".format(adapter_length, int(error_rate * adapter_length)))
else:
print("{0}-{1} bp: {2}".format(prev, adapter_length, int(error_rate * adapter_length)))
print()
def print_histogram(d, adapter_length, n, error_rate, errors):
"""
Print a histogram. Also, print the no. of reads expected to be
trimmed by chance (assuming a uniform distribution of nucleotides in the reads).
d -- a dictionary mapping lengths of trimmed sequences to their respective frequency
adapter_length -- adapter length
n -- total no. of reads.
"""
h = []
for length in sorted(d):
# when length surpasses adapter_length, the
# probability does not increase anymore
estimated = n * 0.25 ** min(length, adapter_length)
h.append( (length, d[length], estimated) )
print("length", "count", "expect", "max.err", "error counts", sep="\t")
for length, count, estimate in h:
max_errors = max(errors[length].keys())
errs = ' '.join(str(errors[length][e]) for e in range(max_errors+1))
print(length, count, "{0:.1F}".format(estimate), int(error_rate*min(length, adapter_length)), errs, sep="\t")
print()
def print_adjacent_bases(bases, sequence):
"""
Print a summary of the bases preceding removed adapter sequences.
Print a warning if one of the bases is overrepresented and there are
at least 20 preceding bases available.
Return whether a warning was printed.
"""
total = sum(bases.values())
if total == 0:
return False
print('Bases preceding removed adapters:')
warnbase = None
for base in ['A', 'C', 'G', 'T', '']:
b = base if base != '' else 'none/other'
fraction = 1.0 * bases[base] / total
print(' {0}: {1:.1%}'.format(b, fraction))
if fraction > 0.8 and base != '':
warnbase = b
if total >= 20 and warnbase is not None:
print('WARNING:')
print(' The adapter is preceded by "{0}" extremely often.'.format(warnbase))
print(' The provided adapter sequence may be incomplete.')
print(' To fix the problem, add "{0}" to the beginning of the adapter sequence.'.format(warnbase))
print()
return True
print()
return False
@contextmanager
def redirect_standard_output(file):
if file is None:
yield
return
old_stdout = sys.stdout
sys.stdout = file
yield
sys.stdout = old_stdout
def print_report(stats, adapters_pair):
"""Print report to standard output."""
if stats.n == 0:
print("No reads processed! Either your input file is empty or you used the wrong -f/--format parameter.")
return
print("Finished in {0:.2F} s ({1:.0F} us/read; {2:.2F} M reads/minute).".format(
stats.time, 1E6 * stats.time / stats.n, stats.n / stats.time * 60 / 1E6))
report = "\n=== Summary ===\n\n"
if stats.paired:
report += textwrap.dedent("""\
Total read pairs processed: {n:13,d}
Read 1 with adapter: {with_adapters[0]:13,d} ({with_adapters_fraction[0]:.1%})
Read 2 with adapter: {with_adapters[1]:13,d} ({with_adapters_fraction[1]:.1%})
""")
else:
report += textwrap.dedent("""\
Total reads processed: {n:13,d}
Reads with adapters: {with_adapters[0]:13,d} ({with_adapters_fraction[0]:.1%})
""")
if stats.too_short is not None:
report += "{pairs_or_reads} that were too short: {too_short:13,d} ({too_short_fraction:.1%})\n"
if stats.too_long is not None:
report += "{pairs_or_reads} that were too long: {too_long:13,d} ({too_long_fraction:.1%})\n"
if stats.too_many_n is not None:
report += "{pairs_or_reads} with too many N: {too_many_n:13,d} ({too_many_n_fraction:.1%})\n"
report += textwrap.dedent("""\
{pairs_or_reads} written (passing filters): {written:13,d} ({written_fraction:.1%})
Total basepairs processed: {total_bp:13,d} bp
""")
if stats.paired:
report += " Read 1: {total_bp1:13,d} bp\n"
report += " Read 2: {total_bp2:13,d} bp\n"
if stats.did_quality_trimming:
report += "Quality-trimmed: {quality_trimmed:13,d} bp ({quality_trimmed_fraction:.1%})\n"
if stats.paired:
report += " Read 1: {quality_trimmed_bp[0]:13,d} bp\n"
report += " Read 2: {quality_trimmed_bp[1]:13,d} bp\n"
report += "Total written (filtered): {total_written_bp:13,d} bp ({total_written_bp_fraction:.1%})\n"
if stats.paired:
report += " Read 1: {written_bp[0]:13,d} bp\n"
report += " Read 2: {written_bp[1]:13,d} bp\n"
v = vars(stats)
v['pairs_or_reads'] = "Pairs" if stats.paired else "Reads"
try:
report = report.format(**v)
except ValueError:
# Python 2.6 does not support the comma format specifier (PEP 378)
report = report.replace(",d}", "d}").format(**v)
print(report)
warning = False
for which_in_pair in (0, 1):
for adapter in adapters_pair[which_in_pair]:
total_front = sum(adapter.lengths_front.values())
total_back = sum(adapter.lengths_back.values())
total = total_front + total_back
where = adapter.where
assert where == ANYWHERE or (where in (BACK, SUFFIX) and total_front == 0) or (where in (FRONT, PREFIX) and total_back == 0)
if stats.paired:
extra = 'First read: ' if which_in_pair == 0 else 'Second read: '
else:
extra = ''
print("=" * 3, extra + "Adapter", adapter.name, "=" * 3)
print()
print("Sequence: {0}; Type: {1}; Length: {2}; Trimmed: {3} times.".
format(adapter.sequence, ADAPTER_TYPES[adapter.where],
len(adapter.sequence), total))
if total == 0:
print()
continue
if where == ANYWHERE:
print(total_front, "times, it overlapped the 5' end of a read")
print(total_back, "times, it overlapped the 3' end or was within the read")
print()
print_error_ranges(len(adapter), adapter.max_error_rate)
print("Overview of removed sequences (5')")
print_histogram(adapter.lengths_front, len(adapter), stats.n, adapter.max_error_rate, adapter.errors_front)
print()
print("Overview of removed sequences (3' or within)")
print_histogram(adapter.lengths_back, len(adapter), stats.n, adapter.max_error_rate, adapter.errors_back)
elif where in (FRONT, PREFIX):
print()
print_error_ranges(len(adapter), adapter.max_error_rate)
print("Overview of removed sequences")
print_histogram(adapter.lengths_front, len(adapter), stats.n, adapter.max_error_rate, adapter.errors_front)
else:
assert where in (BACK, SUFFIX)
print()
print_error_ranges(len(adapter), adapter.max_error_rate)
warning = warning or print_adjacent_bases(adapter.adjacent_bases, adapter.sequence)
print("Overview of removed sequences")
print_histogram(adapter.lengths_back, len(adapter), stats.n, adapter.max_error_rate, adapter.errors_back)
if warning:
print('WARNING:')
print(' One or more of your adapter sequences may be incomplete.')
print(' Please see the detailed output above.')
| Chris7/cutadapt | cutadapt/report.py | Python | mit | 10,176 |
# Copyright (c) 2015-2016 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pytest
from molecule.verifier import trailing
@pytest.fixture()
def trailing_instance(molecule_instance):
return trailing.Trailing(molecule_instance)
def test_trailing_newline(trailing_instance):
line = ['line1', 'line2', '']
res = trailing_instance._trailing_newline(line)
assert res is None
def test_trailing_newline_matched(trailing_instance):
line = ['line1', 'line2', '\n']
res = trailing_instance._trailing_newline(line)
assert res
def test_trailing_whitespace_success(trailing_instance):
line = ['line1', 'line2', 'line3']
res = trailing_instance._trailing_whitespace(line)
assert res is None
def test_trailing_whitespace_matched(trailing_instance):
line = ['line1', 'line2', 'line3 ']
res = trailing_instance._trailing_whitespace(line)
assert res
def test_trailing_whitespace_matched_multiline(trailing_instance):
line = ['line1', 'line2 ', 'line3', 'line4 ']
res = trailing_instance._trailing_whitespace(line)
assert [2, 4] == res
| rjfellman/molecule | test/unit/verifier/test_trailing.py | Python | mit | 2,152 |
#!python3
# -*- coding:utf-8 -*-
import os
import sys
import time
import ctypes
import shutil
import subprocess
IsPy3 = sys.version_info[0] >= 3
if IsPy3:
import winreg
else:
import codecs
import _winreg as winreg
BuildType = 'Release'
IsRebuild = True
Build = 'Rebuild'
Update = False
Copy = False
CleanAll = False
BuildTimeout = 30*60
MSBuild = None
IncrediBuild = None
UseMSBuild = True #默认用MSBuild编译,如果为False则用IncrediBuild编译
#不同项目只需修改下面5个变量
SlnFile = '../storage.sln' #相对于本py脚本路径的相对路径
UpdateDir = [] #相对于本py脚本路径的相对路径,填空不更新
ExecBatList = [] #相对于本py脚本路径的相对路径,编译前调用的脚本,可填空,执行bat会先cd到bat目录再执行
MSBuildFirstProjects = [r'storage'] #使用MSBuild需要工程文件在解决方案sln中的路径
# MSBuild首先编译的项目,填空不指定顺序
IncrediBuildFirstProjects = ['storage'] #使用IncrediBuild只需工程名字
#IncrediBuild首先编译的项目,填空不指定顺序
class ConsoleColor():
'''This class defines the values of color for printing on console window'''
Black = 0
DarkBlue = 1
DarkGreen = 2
DarkCyan = 3
DarkRed = 4
DarkMagenta = 5
DarkYellow = 6
Gray = 7
DarkGray = 8
Blue = 9
Green = 10
Cyan = 11
Red = 12
Magenta = 13
Yellow = 14
White = 15
class Coord(ctypes.Structure):
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRect(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short),
]
class ConsoleScreenBufferInfo(ctypes.Structure):
_fields_ = [('dwSize', Coord),
('dwCursorPosition', Coord),
('wAttributes', ctypes.c_uint),
('srWindow', SmallRect),
('dwMaximumWindowSize', Coord),
]
class Win32API():
'''Some native methods for python calling'''
StdOutputHandle = -11
ConsoleOutputHandle = None
DefaultColor = None
@staticmethod
def SetConsoleColor(color):
'''Change the text color on console window'''
if not Win32API.DefaultColor:
if not Win32API.ConsoleOutputHandle:
Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)
bufferInfo = ConsoleScreenBufferInfo()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))
Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)
@staticmethod
def ResetConsoleColor():
'''Reset the default text color on console window'''
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)
class Logger():
LogFile = '@AutomationLog.txt'
LineSep = '\n'
@staticmethod
def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
if printToStdout:
isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)
if isValidColor:
Win32API.SetConsoleColor(consoleColor)
try:
sys.stdout.write(log)
except UnicodeError as e:
Win32API.SetConsoleColor(ConsoleColor.Red)
isValidColor = True
sys.stdout.write(str(type(e)) + ' can\'t print the log!\n')
if isValidColor:
Win32API.ResetConsoleColor()
if not writeToFile:
return
if IsPy3:
logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')
else:
logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')
try:
logFile.write(log)
# logFile.flush() # need flush in python 3, otherwise log won't be saved
except Exception as ex:
logFile.close()
sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))
@staticmethod
def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)
@staticmethod
def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
t = time.localtime()
log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)
Logger.Write(log, consoleColor, writeToFile, printToStdout)
@staticmethod
def DeleteLog():
if os.path.exists(Logger.LogFile):
os.remove(Logger.LogFile)
def GetMSBuildPath():
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" x86\nwhere msbuild'
ftemp = open('GetMSBuildPath.bat', 'wt')
ftemp.write(cmd)
ftemp.close()
p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)
p.wait()
lines = p.stdout.read().decode().splitlines()
os.remove('GetMSBuildPath.bat')
for line in lines:
if 'MSBuild.exe' in line:
return line
def GetIncrediBuildPath():
try:
key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Classes\IncrediBuild.MonitorFile\shell\open\command')
value, typeId = winreg.QueryValueEx(key, '')
if value:
start = value.find('"')
end = value.find('"', start + 1)
path = value[start+1:end]
buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')
return buildConsole
except FileNotFoundError as e:
Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)
def UpdateCode():
# put git to path first
if not shutil.which('git.exe'):
Logger.Log('找不到git.exe. 请确认安装git时将git\bin目录路径加入到环境变量path中!!!\n, 跳过更新代码!!!', ConsoleColor.Yellow)
return false
oldDir = os.getcwd()
for dir in UpdateDir:
os.chdir(dir)
ret = os.system('git pull')
os.chdir(oldDir)
if ret != 0:
Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)
return false
return True
def BuildProject(cmd):
for i in range(6):
Logger.WriteLine(cmd, ConsoleColor.Cyan)
buildFailed = True
startTime = time.time()
p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug
if IsPy3:
try:
buildFailed = p.wait(BuildTimeout)
except subprocess.TimeoutExpired as e:
Logger.Log('{0}'.format(e), ConsoleColor.Yellow)
p.kill()
else:
buildFailed = p.wait()
if not UseMSBuild:
#IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断
fin = open('IncrediBuild.log')
for line in fin:
if line.startswith('=========='):
Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)
if IsPy3:
start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========
else:#为了兼容py2做的特殊处理,很恶心
start = 0
n2 = 0
while 1:
if line[start].isdigit():
n2 += 1
if n2 == 2:
break
start = line.find(' ', start)
start += 1
end = line.find(' ', start)
failCount = int(line[start:end])
buildFailed = failCount > 0
else:
Logger.Write(line, ConsoleColor.Red, writeToFile = True if IsPy3 else False, printToStdout = True if ' error ' in line else False)
fin.close()
costTime = time.time() - startTime
Logger.WriteLine('build cost time: {0:.1f}s\n'.format(costTime), ConsoleColor.Green)
if not buildFailed:
return True
return False
def BuildAllProjects():
buildSuccess = False
cmds = []
if UseMSBuild:
if IsRebuild:
if CleanAll:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Debug'))
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Release'))
else:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))
for project in MSBuildFirstProjects:
cmds.append('{0} {1} /t:{2} /p:Configuration={3} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, project, BuildType))
cmds.append('{0} {1} /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))
else: #IncrediBuild
if IsRebuild:
if CleanAll:
cmds.append('"{0}" {1} /clean /cfg="{2}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Debug'))
cmds.append('"{0}" {1} /clean /cfg="{2}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Release'))
else:
cmds.append('"{0}" {1} /clean /cfg="{2}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType))
for project in IncrediBuildFirstProjects:
cmds.append('"{0}" {1} /build /prj={2} /cfg="{3}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, project, BuildType))
cmds.append('"{0}" {1} /build /cfg="{2}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType))
for cmd in cmds:
buildSuccess = BuildProject(cmd)
if not buildSuccess:
break
return buildSuccess
def main():
if UseMSBuild:
if not os.path.exists(MSBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
else:
if not os.path.exists(IncrediBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
dir = os.path.dirname(__file__)
if dir:
oldDir = os.getcwd()
os.chdir(dir)
if Update:
if not UpdateCode():
return 1
Logger.Log('git update succeed', ConsoleColor.Green)
if Copy:
for bat in ExecBatList:
oldBatDir = os.getcwd()
batDir = os.path.dirname(bat)
batName = os.path.basename(bat)
if batDir:
os.chdir(batDir)
start = time.clock()
os.system(batName)
Logger.Log('run "{}" cost {:.1f} seconds'.format(batName, time.clock() - start), ConsoleColor.Green)
if batDir:
os.chdir(oldBatDir)
buildSuccess = BuildAllProjects()
if buildSuccess:
Logger.Log('build succeed', ConsoleColor.Green)
else:
Logger.Log('build failed', ConsoleColor.Red)
if dir:
os.chdir(oldDir)
return 0 if buildSuccess else 1
if __name__ == '__main__':
Logger.Log('run with argv ' + str(sys.argv), ConsoleColor.Green)
sys.argv = [x.lower() for x in sys.argv]
start_time = time.time()
if 'debug' in sys.argv:
BuildType = 'Debug'
if 'build' in sys.argv:
IsRebuild = False
Build = 'Build'
if 'update' in sys.argv:
Update = True
if 'copy' in sys.argv:
Copy = True
if 'clean' in sys.argv:
CleanAll = True
if 'incredibuild' in sys.argv:
UseMSBuild = False
if UseMSBuild:
MSBuild = GetMSBuildPath()
if not MSBuild:
Logger.Log('can not find MSBuild.exe', ConsoleColor.Red)
exit(1)
else:
IncrediBuild = GetIncrediBuildPath()
if not IncrediBuild:
Logger.Log('can not find BuildConsole.exe', ConsoleColor.Red)
exit(1)
cwd = os.getcwd()
Logger.WriteLine('current dir is: {0}, {1}: {2}'.format(cwd, Build, BuildType))
ret = main()
end_time = time.time()
cost_time = end_time-start_time
Logger.WriteLine('all build cost time: {0:.2f} seconds'.format(cost_time), ConsoleColor.Green)
exit(ret)
| xylsxyls/xueyelingshuang | src/storageMysql/scripts/rebuild_storage.py | Python | mit | 13,961 |
""" kNN digit classifier, converting images to binary before
training and classification. Should (or should allow for)
reduction in kNN object size.
"""
import cv2
from utils import classifier as cs
from utils import knn
from utils import mnist
class KnnBinary(knn.KnnDigitClassifier):
def train(self, images, labels):
super(KnnBinary, self).train(
self.preprocess_all(images), labels)
def classify(self, image):
return super(KnnBinary, self).classify(self.preprocess(image))
def preprocess(self, image):
# [1]: threshold returns tuple (x, x, img), where x is
# something I cbf figuring out
return cv2.threshold(image, 127, 1, cv2.THRESH_BINARY)[1]
def preprocess_all(self, images):
for image in images:
yield self.preprocess(image)
if __name__ == '__main__':
NUM_TRAINS = 100
NUM_TESTS = 100
runner = cs.ClassifierRunner(KnnBinary())
runner.train(mnist.training_images(NUM_TRAINS), mnist.training_labels(NUM_TRAINS))
runner.run(mnist.test_images(NUM_TESTS), mnist.test_labels(NUM_TESTS))
print(runner.get_report_str())
| uozuAho/mnist-ocr | src/knn_binary.py | Python | mit | 1,150 |
"""
Support for Z-Wave.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zwave/
"""
import logging
import os.path
import time
from pprint import pprint
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_ENTITY_ID, ATTR_LOCATION,
CONF_CUSTOMIZE, EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import discovery
from homeassistant.helpers.event import track_time_change
from homeassistant.util import convert, slugify
DOMAIN = "zwave"
CONF_USB_STICK_PATH = "usb_path"
DEFAULT_CONF_USB_STICK_PATH = "/zwaveusbstick"
CONF_DEBUG = "debug"
CONF_POLLING_INTERVAL = "polling_interval"
CONF_POLLING_INTENSITY = "polling_intensity"
CONF_AUTOHEAL = "autoheal"
DEFAULT_CONF_AUTOHEAL = True
# How long to wait for the zwave network to be ready.
NETWORK_READY_WAIT_SECS = 30
SERVICE_ADD_NODE = "add_node"
SERVICE_REMOVE_NODE = "remove_node"
SERVICE_HEAL_NETWORK = "heal_network"
SERVICE_SOFT_RESET = "soft_reset"
SERVICE_TEST_NETWORK = "test_network"
EVENT_SCENE_ACTIVATED = "zwave.scene_activated"
COMMAND_CLASS_WHATEVER = None
COMMAND_CLASS_SENSOR_MULTILEVEL = 49
COMMAND_CLASS_COLOR = 51
COMMAND_CLASS_METER = 50
COMMAND_CLASS_ALARM = 113
COMMAND_CLASS_SWITCH_BINARY = 37
COMMAND_CLASS_SENSOR_BINARY = 48
COMMAND_CLASS_SWITCH_MULTILEVEL = 38
COMMAND_CLASS_DOOR_LOCK = 98
COMMAND_CLASS_THERMOSTAT_SETPOINT = 67
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 68
COMMAND_CLASS_BATTERY = 128
COMMAND_CLASS_SENSOR_ALARM = 156
GENERIC_COMMAND_CLASS_WHATEVER = None
GENERIC_COMMAND_CLASS_REMOTE_CONTROLLER = 1
GENERIC_COMMAND_CLASS_NOTIFICATION = 7
GENERIC_COMMAND_CLASS_REMOTE_SWITCH = 12
GENERIC_COMMAND_CLASS_REPEATER_SLAVE = 15
GENERIC_COMMAND_CLASS_MULTILEVEL_SWITCH = 17
GENERIC_COMMAND_CLASS_BINARY_SWITCH = 16
GENERIC_COMMAND_CLASS_WALL_CONTROLLER = 24
GENERIC_COMMAND_CLASS_ENTRY_CONTROL = 64
GENERIC_COMMAND_CLASS_BINARY_SENSOR = 32
GENERIC_COMMAND_CLASS_MULTILEVEL_SENSOR = 33
GENERIC_COMMAND_CLASS_METER = 49
GENERIC_COMMAND_CLASS_ALARM_SENSOR = 161
GENERIC_COMMAND_CLASS_THERMOSTAT = 8
SPECIFIC_DEVICE_CLASS_WHATEVER = None
SPECIFIC_DEVICE_CLASS_NOT_USED = 0
SPECIFIC_DEVICE_CLASS_MULTILEVEL_POWER_SWITCH = 1
SPECIFIC_DEVICE_CLASS_ADVANCED_DOOR_LOCK = 2
SPECIFIC_DEVICE_CLASS_MULTIPOSITION_MOTOR = 3
SPECIFIC_DEVICE_CLASS_SECURE_KEYPAD_DOOR_LOCK = 3
SPECIFIC_DEVICE_CLASS_MULTILEVEL_SCENE = 4
SPECIFIC_DEVICE_CLASS_SECURE_DOOR = 5
SPECIFIC_DEVICE_CLASS_MOTOR_CONTROL_CLASS_A = 5
SPECIFIC_DEVICE_CLASS_MOTOR_CONTROL_CLASS_B = 6
SPECIFIC_DEVICE_CLASS_SECURE_BARRIER_ADD_ON = 7
SPECIFIC_DEVICE_CLASS_MOTOR_CONTROL_CLASS_C = 7
GENRE_WHATEVER = None
GENRE_USER = "User"
TYPE_WHATEVER = None
TYPE_BYTE = "Byte"
TYPE_BOOL = "Bool"
TYPE_DECIMAL = "Decimal"
# List of tuple (DOMAIN, discovered service, supported command classes,
# value type, genre type, specific device class).
DISCOVERY_COMPONENTS = [
('sensor',
[GENERIC_COMMAND_CLASS_WHATEVER],
[SPECIFIC_DEVICE_CLASS_WHATEVER],
[COMMAND_CLASS_SENSOR_MULTILEVEL,
COMMAND_CLASS_METER,
COMMAND_CLASS_ALARM,
COMMAND_CLASS_SENSOR_ALARM],
TYPE_WHATEVER,
GENRE_USER),
('light',
[GENERIC_COMMAND_CLASS_MULTILEVEL_SWITCH],
[SPECIFIC_DEVICE_CLASS_MULTILEVEL_POWER_SWITCH,
SPECIFIC_DEVICE_CLASS_MULTILEVEL_SCENE,
SPECIFIC_DEVICE_CLASS_NOT_USED],
[COMMAND_CLASS_SWITCH_MULTILEVEL],
TYPE_BYTE,
GENRE_USER),
('switch',
[GENERIC_COMMAND_CLASS_ALARM_SENSOR,
GENERIC_COMMAND_CLASS_BINARY_SENSOR,
GENERIC_COMMAND_CLASS_BINARY_SWITCH,
GENERIC_COMMAND_CLASS_ENTRY_CONTROL,
GENERIC_COMMAND_CLASS_MULTILEVEL_SENSOR,
GENERIC_COMMAND_CLASS_MULTILEVEL_SWITCH,
GENERIC_COMMAND_CLASS_NOTIFICATION,
GENERIC_COMMAND_CLASS_REMOTE_CONTROLLER,
GENERIC_COMMAND_CLASS_REMOTE_SWITCH,
GENERIC_COMMAND_CLASS_REPEATER_SLAVE,
GENERIC_COMMAND_CLASS_THERMOSTAT,
GENERIC_COMMAND_CLASS_WALL_CONTROLLER],
[SPECIFIC_DEVICE_CLASS_WHATEVER],
[COMMAND_CLASS_SWITCH_BINARY],
TYPE_BOOL,
GENRE_USER),
('binary_sensor',
[GENERIC_COMMAND_CLASS_ALARM_SENSOR,
GENERIC_COMMAND_CLASS_BINARY_SENSOR,
GENERIC_COMMAND_CLASS_BINARY_SWITCH,
GENERIC_COMMAND_CLASS_METER,
GENERIC_COMMAND_CLASS_MULTILEVEL_SENSOR,
GENERIC_COMMAND_CLASS_MULTILEVEL_SWITCH,
GENERIC_COMMAND_CLASS_NOTIFICATION,
GENERIC_COMMAND_CLASS_THERMOSTAT],
[SPECIFIC_DEVICE_CLASS_WHATEVER],
[COMMAND_CLASS_SENSOR_BINARY],
TYPE_BOOL,
GENRE_USER),
('thermostat',
[GENERIC_COMMAND_CLASS_THERMOSTAT],
[SPECIFIC_DEVICE_CLASS_WHATEVER],
[COMMAND_CLASS_THERMOSTAT_SETPOINT],
TYPE_WHATEVER,
GENRE_WHATEVER),
('hvac',
[GENERIC_COMMAND_CLASS_THERMOSTAT],
[SPECIFIC_DEVICE_CLASS_WHATEVER],
[COMMAND_CLASS_THERMOSTAT_FAN_MODE],
TYPE_WHATEVER,
GENRE_WHATEVER),
('lock',
[GENERIC_COMMAND_CLASS_ENTRY_CONTROL],
[SPECIFIC_DEVICE_CLASS_ADVANCED_DOOR_LOCK,
SPECIFIC_DEVICE_CLASS_SECURE_KEYPAD_DOOR_LOCK],
[COMMAND_CLASS_DOOR_LOCK],
TYPE_BOOL,
GENRE_USER),
('rollershutter',
[GENERIC_COMMAND_CLASS_MULTILEVEL_SWITCH],
[SPECIFIC_DEVICE_CLASS_MOTOR_CONTROL_CLASS_A,
SPECIFIC_DEVICE_CLASS_MOTOR_CONTROL_CLASS_B,
SPECIFIC_DEVICE_CLASS_MOTOR_CONTROL_CLASS_C,
SPECIFIC_DEVICE_CLASS_MULTIPOSITION_MOTOR],
[COMMAND_CLASS_WHATEVER],
TYPE_WHATEVER,
GENRE_USER),
('garage_door',
[GENERIC_COMMAND_CLASS_ENTRY_CONTROL],
[SPECIFIC_DEVICE_CLASS_SECURE_BARRIER_ADD_ON,
SPECIFIC_DEVICE_CLASS_SECURE_DOOR],
[COMMAND_CLASS_SWITCH_BINARY],
TYPE_BOOL,
GENRE_USER)
]
ATTR_NODE_ID = "node_id"
ATTR_VALUE_ID = "value_id"
ATTR_SCENE_ID = "scene_id"
NETWORK = None
_LOGGER = logging.getLogger(__name__)
def _obj_to_dict(obj):
"""Convert an object into a hash for debug."""
return {key: getattr(obj, key) for key
in dir(obj)
if key[0] != '_' and not hasattr(getattr(obj, key), '__call__')}
def _node_name(node):
"""Return the name of the node."""
return node.name or "{} {}".format(
node.manufacturer_name, node.product_name)
def _value_name(value):
"""Return the name of the value."""
return "{} {}".format(_node_name(value.node), value.label)
def _object_id(value):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id
to not collide with other entity_ids.
"""
object_id = "{}_{}".format(slugify(_value_name(value)),
value.node.node_id)
# Add the instance id if there is more than one instance for the value
if value.instance > 1:
return "{}_{}".format(object_id, value.instance)
return object_id
def nice_print_node(node):
"""Print a nice formatted node to the output (debug method)."""
node_dict = _obj_to_dict(node)
node_dict['values'] = {value_id: _obj_to_dict(value)
for value_id, value in node.values.items()}
print("\n\n\n")
print("FOUND NODE", node.product_name)
pprint(node_dict)
print("\n\n\n")
def get_config_value(node, value_index):
"""Return the current configuration value for a specific index."""
try:
for value in node.values.values():
# 112 == config command class
if value.command_class == 112 and value.index == value_index:
return value.data
except RuntimeError:
# If we get an runtime error the dict has changed while
# we was looking for a value, just do it again
return get_config_value(node, value_index)
# pylint: disable=R0914
def setup(hass, config):
"""Setup Z-Wave.
Will automatically load components to support devices found on the network.
"""
# pylint: disable=global-statement, import-error
global NETWORK
try:
import libopenzwave
except ImportError:
_LOGGER.error("You are missing required dependency Python Open "
"Z-Wave. Please follow instructions at: "
"https://home-assistant.io/components/zwave/")
return False
from pydispatch import dispatcher
from openzwave.option import ZWaveOption
from openzwave.network import ZWaveNetwork
default_zwave_config_path = os.path.join(os.path.dirname(
libopenzwave.__file__), 'config')
# Load configuration
use_debug = str(config[DOMAIN].get(CONF_DEBUG)) == '1'
customize = config[DOMAIN].get(CONF_CUSTOMIZE, {})
autoheal = config[DOMAIN].get(CONF_AUTOHEAL, DEFAULT_CONF_AUTOHEAL)
# Setup options
options = ZWaveOption(
config[DOMAIN].get(CONF_USB_STICK_PATH, DEFAULT_CONF_USB_STICK_PATH),
user_path=hass.config.config_dir,
config_path=config[DOMAIN].get('config_path',
default_zwave_config_path),)
options.set_console_output(use_debug)
options.lock()
NETWORK = ZWaveNetwork(options, autostart=False)
if use_debug:
def log_all(signal, value=None):
"""Log all the signals."""
print("")
print("SIGNAL *****", signal)
if value and signal in (ZWaveNetwork.SIGNAL_VALUE_CHANGED,
ZWaveNetwork.SIGNAL_VALUE_ADDED):
pprint(_obj_to_dict(value))
print("")
dispatcher.connect(log_all, weak=False)
def value_added(node, value):
"""Called when a value is added to a node on the network."""
for (component,
generic_device_class,
specific_device_class,
command_class,
value_type,
value_genre) in DISCOVERY_COMPONENTS:
_LOGGER.debug("Component=%s Node_id=%s query start",
component, node.node_id)
if node.generic not in generic_device_class and \
None not in generic_device_class:
_LOGGER.debug("node.generic %s not None and in \
generic_device_class %s",
node.generic, generic_device_class)
continue
if node.specific not in specific_device_class and \
None not in specific_device_class:
_LOGGER.debug("node.specific %s is not None and in \
specific_device_class %s", node.specific,
specific_device_class)
continue
if value.command_class not in command_class and \
None not in command_class:
_LOGGER.debug("value.command_class %s is not None \
and in command_class %s",
value.command_class, command_class)
continue
if value_type != value.type and value_type is not None:
_LOGGER.debug("value.type %s != value_type %s",
value.type, value_type)
continue
if value_genre != value.genre and value_genre is not None:
_LOGGER.debug("value.genre %s != value_genre %s",
value.genre, value_genre)
continue
# Configure node
_LOGGER.debug("Adding Node_id=%s Generic_command_class=%s, \
Specific_command_class=%s, \
Command_class=%s, Value type=%s, \
Genre=%s", node.node_id,
node.generic, node.specific,
value.command_class, value.type,
value.genre)
name = "{}.{}".format(component, _object_id(value))
node_config = customize.get(name, {})
polling_intensity = convert(
node_config.get(CONF_POLLING_INTENSITY), int)
if polling_intensity:
value.enable_poll(polling_intensity)
else:
value.disable_poll()
discovery.load_platform(hass, component, DOMAIN, {
ATTR_NODE_ID: node.node_id,
ATTR_VALUE_ID: value.value_id,
}, config)
def scene_activated(node, scene_id):
"""Called when a scene is activated on any node in the network."""
name = _node_name(node)
object_id = "{}_{}".format(slugify(name), node.node_id)
hass.bus.fire(EVENT_SCENE_ACTIVATED, {
ATTR_ENTITY_ID: object_id,
ATTR_SCENE_ID: scene_id
})
dispatcher.connect(
value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False)
dispatcher.connect(
scene_activated, ZWaveNetwork.SIGNAL_SCENE_EVENT, weak=False)
def add_node(service):
"""Switch into inclusion mode."""
NETWORK.controller.add_node()
def remove_node(service):
"""Switch into exclusion mode."""
NETWORK.controller.remove_node()
def heal_network(service):
"""Heal the network."""
_LOGGER.info("ZWave heal running.")
NETWORK.heal()
def soft_reset(service):
"""Soft reset the controller."""
NETWORK.controller.soft_reset()
def test_network(service):
"""Test the network by sending commands to all the nodes."""
NETWORK.test()
def stop_zwave(event):
"""Stop Z-Wave."""
NETWORK.stop()
def start_zwave(event):
"""Startup Z-Wave."""
NETWORK.start()
# Need to be in STATE_AWAKED before talking to nodes.
# Wait up to NETWORK_READY_WAIT_SECS seconds for the zwave network
# to be ready.
for i in range(NETWORK_READY_WAIT_SECS):
_LOGGER.debug(
"network state: %d %s", NETWORK.state, NETWORK.state_str)
if NETWORK.state >= NETWORK.STATE_AWAKED:
_LOGGER.info("zwave ready after %d seconds", i)
break
time.sleep(1)
else:
_LOGGER.warning(
"zwave not ready after %d seconds, continuing anyway",
NETWORK_READY_WAIT_SECS)
_LOGGER.info(
"final network state: %d %s", NETWORK.state, NETWORK.state_str)
polling_interval = convert(
config[DOMAIN].get(CONF_POLLING_INTERVAL), int)
if polling_interval is not None:
NETWORK.set_poll_interval(polling_interval, False)
poll_interval = NETWORK.get_poll_interval()
_LOGGER.info("zwave polling interval set to %d ms", poll_interval)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_zwave)
# Register add / remove node services for Z-Wave sticks without
# hardware inclusion button
hass.services.register(DOMAIN, SERVICE_ADD_NODE, add_node)
hass.services.register(DOMAIN, SERVICE_REMOVE_NODE, remove_node)
hass.services.register(DOMAIN, SERVICE_HEAL_NETWORK, heal_network)
hass.services.register(DOMAIN, SERVICE_SOFT_RESET, soft_reset)
hass.services.register(DOMAIN, SERVICE_TEST_NETWORK, test_network)
# Setup autoheal
if autoheal:
_LOGGER.info("ZWave network autoheal is enabled.")
track_time_change(hass, heal_network, hour=0, minute=0, second=0)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_zwave)
return True
class ZWaveDeviceEntity:
"""Representation of a Z-Wave node entity."""
def __init__(self, value, domain):
"""Initialize the z-Wave device."""
self._value = value
self.entity_id = "{}.{}".format(domain, self._object_id())
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return an unique ID."""
return "ZWAVE-{}-{}".format(self._value.node.node_id,
self._value.object_id)
@property
def name(self):
"""Return the name of the device."""
return _value_name(self._value)
def _object_id(self):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id to not collide
with other entity_ids.
"""
return _object_id(self._value)
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
ATTR_NODE_ID: self._value.node.node_id,
}
battery_level = self._value.node.get_battery_level()
if battery_level is not None:
attrs[ATTR_BATTERY_LEVEL] = battery_level
location = self._value.node.location
if location:
attrs[ATTR_LOCATION] = location
return attrs
| Julian/home-assistant | homeassistant/components/zwave.py | Python | mit | 16,822 |
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
import jsonfield
from .signals import event_logged
class Log(models.Model):
user = models.ForeignKey(
getattr(settings, "AUTH_USER_MODEL", "auth.User"),
null=True,
on_delete=models.SET_NULL
)
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
action = models.CharField(max_length=50, db_index=True)
content_type = models.ForeignKey(ContentType, null=True)
object_id = models.PositiveIntegerField(null=True)
obj = GenericForeignKey("content_type", "object_id")
extra = jsonfield.JSONField()
@property
def template_fragment_name(self):
return "eventlog/{}.html".format(self.action.lower())
class Meta:
ordering = ["-timestamp"]
def log(user, action, extra=None, obj=None):
if (user is not None and not user.is_authenticated()):
user = None
if extra is None:
extra = {}
content_type = None
object_id = None
if obj is not None:
content_type = ContentType.objects.get_for_model(obj)
object_id = obj.pk
event = Log.objects.create(
user=user,
action=action,
extra=extra,
content_type=content_type,
object_id=object_id
)
event_logged.send(sender=Log, event=event)
return event
| rosscdh/pinax-eventlog | pinax/eventlog/models.py | Python | mit | 1,510 |
from setuptools import setup, find_packages
with open('README.md') as fp:
long_description = fp.read()
setup(
name='typeform',
version='1.1.0',
description='Python Client wrapper for Typeform API',
long_description=long_description,
long_description_content_type='text/markdown',
keywords=[
'type',
'form',
'typeform',
'api',
],
author='Typeform',
author_email='[email protected]',
url='https://github.com/MichaelSolati/typeform-python-sdk',
packages=find_packages(),
install_requires=['requests'],
test_suite='typeform.test.suite.test_suite',
license='MIT',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
]
)
| underdogio/typeform | setup.py | Python | mit | 1,168 |
from epic.utils.helper_functions import lru_cache
from numpy import log
from scipy.stats import poisson
@lru_cache()
def compute_window_score(i, poisson_parameter):
# type: (int, float) -> float
# No enrichment; poisson param also average
if i < poisson_parameter:
return 0
p_value = poisson.pmf(i, poisson_parameter)
if p_value > 0:
window_score = -log(p_value)
else:
# log of zero not defined
window_score = 1000
return window_score
| endrebak/epic | epic/statistics/compute_window_score.py | Python | mit | 501 |
#!/usr/bin/python
"""
Visualizing H fractal with tkinter.
=======
License
=======
Copyright (c) 2017 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
if __name__ == "__main__":
import math
import Tkinter as tk
from concept.math.point import Point2d
from concept.math.vector import Vector2d
from concept.math.hfractal import hfractal
class Application(tk.Frame):
"""Simple tk application displaying a H fractal."""
def __init__(self):
"""Init canvas and display fractal."""
tk.Frame.__init__(self, tk.Tk())
self.angle = 0.0
self.scale = 1.0
self.depth = 0
self.master.title("H Fractal")
self.master.geometry("640x480+50+50")
self.canvas = tk.Canvas(self)
self.canvas['bg'] = "#ffffff"
self.canvas.bind("<Configure>", self.on_configure)
self.canvas.pack(fill=tk.BOTH, expand=tk.YES)
self.pack(fill=tk.BOTH, expand=tk.YES)
self.bind("<Left>", self.on_key_left)
self.bind("<Right>", self.on_key_right)
self.bind("<Up>", self.on_key_up)
self.bind("<Down>", self.on_key_down)
self.bind("+", self.on_key_plus)
self.bind("-", self.on_key_minus)
self.focus_set()
def set_title(self, count):
"""Change the title."""
self.master.title("H Fractal (%d H's, angle=%.2f Degree, scale=%.2f)"
% (count, self.angle * 180.0 / math.pi, self.scale))
def on_key_left(self, event):
"""Rotate hfractal to the left."""
self.angle -= 0.05
self.repaint(self.canvas.winfo_width(), self.canvas.winfo_height())
def on_key_right(self, event):
"""Rotate hfractal to the right."""
self.angle += 0.05
self.repaint(self.canvas.winfo_width(), self.canvas.winfo_height())
def on_key_up(self, event):
"""Scale hfractal (increase)."""
self.scale += 0.05
self.repaint(self.canvas.winfo_width(), self.canvas.winfo_height())
def on_key_down(self, event):
"""Scale hfractal (decrease)."""
if self.scale >= (0.05 + 0.05):
self.scale -= 0.05
self.repaint(self.canvas.winfo_width(), self.canvas.winfo_height())
def on_key_plus(self, event):
"""Increase hfractal depth."""
if self.depth < 7:
self.depth += 1
self.repaint(self.canvas.winfo_width(), self.canvas.winfo_height())
def on_key_minus(self, event):
"""Decrease hfractal depth."""
if self.depth > 0:
self.depth -= 1
self.repaint(self.canvas.winfo_width(), self.canvas.winfo_height())
def on_configure(self, event):
"""Called to react on changes to width and height."""
self.repaint(event.width, event.height)
def repaint(self, width, height):
"""Repaint hfractal."""
# delete all previous lines
self.canvas.delete(tk.ALL)
center = Point2d(width / 2.0, height / 2.0)
direction = Vector2d(0.0, height / 2.0).scaled(self.scale).rotated(self.angle)
hdefs = hfractal(center, direction, 2.0, self.depth)
self.set_title(len(hdefs))
for hdef in hdefs:
for line in hdef.generate_lines():
self.canvas.create_line(
line[0].x,
line[0].y,
line[0].x + line[1].x,
line[0].y + line[1].y
)
def mainloop(self):
"""Application mainloop when called."""
self.master.mainloop()
def main():
"""Main function."""
app = Application()
app.mainloop()
main()
| Nachtfeuer/concept-py | examples/hfractal.py | Python | mit | 4,958 |
#!/usr/bin/python
import serial
ser = serial.Serial('COM9', 9600)
ser.write(b'5~')
ser.close()
| GeoffSpielman/Hackathin_Examples | Python_To_Arduino_Communication/Python_To_Arduino_Communication.py | Python | mit | 96 |
#!/usr/bin/python
# by: Mohammad Riftadi <[email protected]>
# Testing Database instance for CPE Manager
from pymongo import MongoClient
import hashlib
client = MongoClient('mongodb://localhost:27017/')
dbh = client.jawdat_internal
#drop if collections exists
dbh.drop_collection("resetpass")
#drop if collections exists
dbh.drop_collection("employees")
eh = dbh.employees
ne = [
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "001",
"roles" : ["manager", "director"],
"fullname" : "Tedhi Achdiana",
"position" : "Managing Director",
"division" : "bod",
"supervisor" : "[email protected]",
"profpic" : "tedhi.jpg",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "002",
"roles" : ["manager", "director"],
"fullname" : "Himawan Nugroho",
"position" : "CEO",
"division" : "bod",
"supervisor" : "[email protected]",
"profpic" : "himawan.jpg",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "004",
"roles" : ["accounting", "hrd"],
"fullname" : "Afilia Ratna",
"position" : "HRD Manager",
"division" : "hrd",
"supervisor" : "[email protected]",
"profpic" : "afilia.jpg",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "005",
"roles" : ["staff"],
"fullname" : "Handoko Baguswasito",
"position" : "Consulting Engineer",
"division" : "delivery",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "010",
"roles" : ["staff"],
"fullname" : "Ary Rahmadian Thala",
"position" : "Solutions Architect",
"division" : "delivery",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "012",
"roles" : ["staff", "admin"],
"fullname" : "Mohammad Riftadi",
"position" : "Solutions Manager",
"division" : "solutions",
"supervisor" : "[email protected]",
"profpic" : "riftadi.jpg",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "016",
"roles" : ["staff"],
"fullname" : "Ericson Ferdinand Pasaribu",
"position" : "Engineering Manager",
"division" : "engineering",
"supervisor" : "[email protected]",
"profpic" : "ericson.pasaribu.jpg",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "020",
"roles" : ["staff"],
"fullname" : "Nugroho Dwi Prasetyo",
"position" : "Business Analyst",
"division" : "external",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "023",
"roles" : ["staff"],
"fullname" : "Panji Harimurti",
"position" : "Tax and Accounting Staff",
"division" : "finance",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "031",
"roles" : ["staff"],
"fullname" : "Munandar Rahman",
"position" : "Office Assistant",
"division" : "ga",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "032",
"roles" : ["staff"],
"fullname" : "Danav Pratama",
"position" : "Office Assistant",
"division" : "ga",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "024",
"roles" : ["staff"],
"fullname" : "Tri Primandra Karamoy",
"position" : "Product Manager",
"division" : "solutions",
"supervisor" : "[email protected]",
"profpic" : "tri.karamoy.jpg",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "025",
"roles" : ["staff"],
"fullname" : "Firza Agusta Wiratama",
"position" : "SDN Engineer",
"division" : "engineering",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "026",
"roles" : ["staff"],
"fullname" : "Lisa Anggrainy",
"position" : "Business Analyst",
"division" : "external",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "027",
"roles" : ["staff"],
"fullname" : "Moh. Faisal Sonjaya",
"position" : "Asst. PM",
"division" : "external",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "028",
"roles" : ["staff"],
"fullname" : "Doni Marlon Siringoringo",
"position" : "Asst. PM",
"division" : "external",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "029",
"roles" : ["staff"],
"fullname" : "Dimas Pandu Nugroho",
"position" : "UI/UX Developer",
"division" : "engineering",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "030",
"roles" : ["staff"],
"fullname" : "M. Fikri Ali Rahman",
"position" : "UI/UX Developer",
"division" : "engineering",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "033",
"roles" : ["staff"],
"fullname" : "Febrian Rendak",
"position" : "SDN Engineer",
"division" : "engineering",
"supervisor" : "[email protected]",
},
{
"username" : "[email protected]",
"secret" : hashlib.md5("J@wdat12345").hexdigest(),
"first_login" : True,
"jawdat_id" : "034",
"roles" : ["staff"],
"fullname" : "Raisha Syifa Nizami",
"position" : "Asst. PM",
"division" : "external",
"supervisor" : "[email protected]",
},
]
print eh.insert(ne)
#drop if collections exists
dbh.drop_collection("costcenters")
cch = dbh.costcenters
ncc = [
{
"costcenter_id" : "opex", # pre -> presales phase, pro->project phase, sup->support phase, should be unique
"costcenter_name" : "Operational Expense",
"costcenter_budget" : 500000000,
"costcenter_category" : "internal",
"costcenter_status" : "active"
},
{
"costcenter_id" : "presales", # pre -> presales phase, pro->project phase, sup->support phase, should be unique
"costcenter_name" : "Presales General",
"costcenter_budget" : 1000000000,
"costcenter_category" : "presales",
"costcenter_status" : "active"
},
{
"costcenter_id" : "pro-tsra-cpe", # pre -> presales phase, pro->project phase, sup->support phase, should be unique
"costcenter_name" : "Project Telkomtelstra CPE",
"costcenter_budget" : 500000000,
"costcenter_category" : "project",
"costcenter_status" : "active"
},
{
"costcenter_id" : "pro-tsel-eol", # pre -> presales phase, pro->project phase, sup->support phase, should be unique
"costcenter_name" : "Project Telkomsel EoL",
"costcenter_budget" : 500000000,
"costcenter_category" : "project",
"costcenter_status" : "active"
},
{
"costcenter_id" : "sup-lintas-sdh", # pre -> presales phase, pro->project phase, sup->support phase, should be unique
"costcenter_name" : "Support Lintasarta SDH",
"costcenter_budget" : 200000000,
"costcenter_category" : "support",
"costcenter_status" : "active"
},
]
print cch.insert(ncc)
#drop if collections exists
dbh.drop_collection("settings")
sh = dbh.settings
ns = { "setting_name" : "mail", "email_notifications" : "off" }
print sh.insert(ns)
rch = dbh.reimburse_claims
nrc = [
{
"username" : "[email protected]",
"fullname" : "Mohammad Riftadi",
"period" : "0516", # may (05) 2016 (16)
# "date_submitted" : datetime.now(),
"approved_by" : "[email protected]",
"status" : "submitted", # presubmitted, submitted, approved, rejected
# "status_desc" : "OK",
# "approval_date" : datetime.now(),
"expense_list" : [
{
"date" : "02/05/2016",
"description" : "Beli Modem",
"category" : "logistic",
"costcenter" : "opex",
"cost" : 300000 # in IDR
},
{
"date" : "02/05/2016",
"description" : "Parkir",
"category" : "parking",
"costcenter" : "opex",
"cost" : 150000 # in IDR
},
{
"date" : "02/05/2016",
"description" : "Makan Siang dengan Sisindokom",
"category" : "meal",
"costcenter" : "opex",
"cost" : 200000 # in IDR
},
]
},
]
| riftadi/smallcorptools | sct_initdb.py | Python | mit | 11,668 |
import os
import traceback
from mantidqt.utils.asynchronous import AsyncTask
from addie.processing.mantid.master_table.master_table_exporter import TableFileExporter as MantidTableExporter
# Mantid Total Scattering integration
# (https://github.com/marshallmcdonnell/mantid_total_scattering)
try:
import total_scattering
print("Mantid Total Scattering Version: ", total_scattering.__version__)
from total_scattering.reduction import TotalScatteringReduction
MANTID_TS_ENABLED = True
except ImportError:
print('total_scattering module not found. Functionality disabled')
MANTID_TS_ENABLED = False
class JobPool(object):
task_output = None,
running = None
task_exc_type, task_exc, task_exc_stack = None, None, None
def __init__(self, configurations):
self.jobs = []
for config in configurations:
print("CONFIG:", config)
self.jobs.append(AsyncTask(TotalScatteringReduction, args=(config,),
success_cb=self.on_success, error_cb=self.on_error,
finished_cb=self.on_finished))
def _start_next(self):
if self.jobs:
self.running = self.jobs.pop(0)
self.running.start()
else:
self.running = None
def start(self):
if not self.jobs:
raise RuntimeError('Cannot start empty job list')
self._start_next()
def on_success(self, task_result):
# TODO should emit a signal
self.task_output = task_result.output
print('SUCCESS!!! {}'.format(self.task_output))
def on_error(self, task_result):
# TODO should emit a signal
print('ERROR!!!')
self.task_exc_type = task_result.exc_type
self.task_exc = task_result.exc_value
self.task_exc_stack = traceback.extract_tb(task_result.stack)
traceback.print_tb(task_result.stack)
print(task_result)
def on_finished(self):
'''Both success and failure call this method afterwards'''
# TODO should emit a signal
self._start_next() # kick off the next one in the pool
def run_mantid(parent):
num_rows = parent.processing_ui.h3_table.rowCount()
if num_rows <= 0:
raise RuntimeError('Cannot export empty table')
exporter = MantidTableExporter(parent=parent)
# write out the full table to disk
# TODO make a class level name so it can be reused
full_reduction_filename = os.path.join(
os.path.expanduser('~'), '.mantid', 'addie.json')
print('writing out full table to "{}"'.format(full_reduction_filename))
exporter.export(full_reduction_filename)
# append the individual rows to input list (reduction_inputs)
reduction_inputs = []
for row in range(num_rows):
if not exporter.isActive(row):
print('skipping row {} - inactive'.format(row + 1)) # REMOVE?
continue
print('Will be running row {} for reduction'.format(
row + 1)) # TODO should be debug logging
json_input = exporter.retrieve_row_info(row)
reduction_input = exporter.convert_from_row_to_reduction(json_input)
reduction_inputs.append(reduction_input)
if len(reduction_inputs) == 0:
raise RuntimeError('None of the rows were activated')
# locate total scattering script
if MANTID_TS_ENABLED:
pool = JobPool(reduction_inputs)
pool.start()
else:
# TODO should be on the status bar
print('total_scattering module not found. Functionality disabled')
| neutrons/FastGR | addie/processing/mantid/launch_reduction.py | Python | mit | 3,590 |
from amqpstorm.management import ManagementApi
from amqpstorm.message import Message
from amqpstorm.tests import HTTP_URL
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import USERNAME
from amqpstorm.tests.functional.utility import TestFunctionalFramework
from amqpstorm.tests.functional.utility import setup
class ApiBasicFunctionalTests(TestFunctionalFramework):
@setup(queue=True)
def test_api_basic_publish(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
try:
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
finally:
api.queue.delete(self.queue_name)
@setup(queue=True)
def test_api_basic_get_message(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
result = api.basic.get(self.queue_name, requeue=False)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], Message)
self.assertEqual(result[0].body, self.message)
# Make sure the message wasn't re-queued.
self.assertFalse(api.basic.get(self.queue_name, requeue=False))
@setup(queue=True)
def test_api_basic_get_message_requeue(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
result = api.basic.get(self.queue_name, requeue=True)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], Message)
self.assertEqual(result[0].body, self.message)
# Make sure the message was re-queued.
self.assertTrue(api.basic.get(self.queue_name, requeue=False))
@setup(queue=True)
def test_api_basic_get_message_to_dict(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
result = api.basic.get(self.queue_name, requeue=False, to_dict=True)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], dict)
self.assertEqual(result[0]['payload'], self.message)
| eandersson/amqpstorm | amqpstorm/tests/functional/management/test_basic.py | Python | mit | 2,514 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
import numpy as np
from SimPEG import Utils
def E_field_from_SheetCurruent(XYZ, srcLoc, sig, t, E0=1., orientation='X', kappa=0., epsr=1.):
"""
Computing Analytic Electric fields from Plane wave in a Wholespace
TODO:
Add description of parameters
"""
XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
# Check
if XYZ.shape[0] > 1 & t.shape[0] > 1:
raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")
mu = mu_0*(1+kappa)
if orientation == "X":
z = XYZ[:, 2]
bunja = -E0*(mu*sig)**0.5 * z * np.exp(-(mu*sig*z**2) / (4*t))
bunmo = 2 * np.pi**0.5 * t**1.5
Ex = bunja / bunmo
Ey = np.zeros_like(z)
Ez = np.zeros_like(z)
return Ex, Ey, Ez
else:
raise NotImplementedError()
def H_field_from_SheetCurruent(XYZ, srcLoc, sig, t, E0=1., orientation='X', kappa=0., epsr=1.):
"""
Plane wave propagating downward (negative z (depth))
"""
XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
# Check
if XYZ.shape[0] > 1 & t.shape[0] > 1:
raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")
mu = mu_0*(1+kappa)
if orientation == "X":
z = XYZ[:, 2]
Hx = np.zeros_like(z)
Hy = E0 * np.sqrt(sig / (np.pi*mu*t))*np.exp(-(mu*sig*z**2) / (4*t))
Hz = np.zeros_like(z)
return Hx, Hy, Hz
else:
raise NotImplementedError()
if __name__ == '__main__':
pass
| geoscixyz/em_examples | em_examples/TDEMPlanewave.py | Python | mit | 1,761 |
"""backtest.py, backunttest.py and coveragetest.py are all taken from coverage.py version 3.7.1""" | public/testmon | test/coveragepy/__init__.py | Python | mit | 98 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tanium SOUL package builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import codecs
import datetime
import imp
import io
import logging
import os
import shutil
import sys
__version__ = "1.0.2"
THIS_FILE = os.path.abspath(__file__)
"""The absolute path to this file, ala ``/path/to/package/script.py``"""
THIS_SCRIPT = os.path.basename(THIS_FILE)
"""This scripts basename from THIS_FILE, ala ``script.py``"""
THIS_PATH = os.path.dirname(THIS_FILE)
"""The path of the directory containing THIS_FILE, ala ``/path/to/package/``"""
LOG = logging.getLogger(THIS_SCRIPT)
class TemplateProcessor(object):
"""Templatize the schema parsed from a WSDL file into an api.py."""
INDENTS = {"i4": " " * 4, "i8": " " * 8, "i12": " " * 12}
"""Indentation variables used by self.tmpl."""
SIMPLE_TYPES = ["int", "STR"]
"""Object types that represent 'simple' objects from the schema."""
CR = "\n"
"""Carriage return constant."""
CR1 = (CR * 1).join
"""Joiner method for joining lists with a single carriage return."""
CR2 = (CR * 2).join
"""Joiner method for joining lists with two carriage returns."""
DTF = "%Y-%m-%d at %H:%M:%S UTC"
"""Date time format."""
def __init__(self, tmpl_path, tmpl_vars):
"""Constructor."""
self.TMPL_PATH = tmpl_path
self.TMPL_VARS = tmpl_vars
self.TMPL_VARS.update(now=self.now)
self.TMPLS = {}
for k in os.listdir(tmpl_path):
if (k.endswith("tmpl") or k.endswith("py")):
self.TMPLS[k] = self.read_tmpl(name=k)
def __str__(self):
"""Method for str() on self."""
ret = "{c} using template path: {t}"
ret = ret.format(c=self.__class__.__name__, t=self.TMPL_PATH)
return ret
def __repr__(self):
"""Method for repr() on self."""
ret = self.__str__()
return ret
def api(self, schema, manual_wsdl):
"""Generate the text of all object classes."""
self.USED_TMPLS = getattr(self, "USED_TMPLS", [])
list_types = [x for x in schema if x["attrs"]["list_prop"]]
item_types = [x for x in schema if not x["attrs"]["list_prop"]]
manual_types = [x for x in manual_wsdl.MANUAL_WSDL]
all_types = list_types + item_types + manual_types
list_objs = [self.gen_obj(obj_dict=x, tmpl="wsdl_object") for x in list_types]
item_objs = [self.gen_obj(obj_dict=x, tmpl="wsdl_object") for x in item_types]
man_objs = [self.gen_obj(obj_dict=x, tmpl="manual_object") for x in manual_types]
wsdl_maps = [{"prefix": k, "uri": v} for k, v in self.TMPL_VARS["wsdl_ns"].items()]
wsdl_map = self.CR1([self.tmpl(tmpl="wsdl_map", subs=x) for x in wsdl_maps])
obj_map = self.CR1([self.tmpl(tmpl="api_map", subs=x) for x in all_types])
all_map = self.CR1([self.tmpl(tmpl="all_map", subs=x) for x in all_types])
self.TMPL_VARS["api_map"] = obj_map
self.TMPL_VARS["all_map"] = all_map
self.TMPL_VARS["wsdl_map"] = wsdl_map
statics = self.TMPLS["statics.py"]
statics = "\n".join(statics.splitlines()[1:])
self.USED_TMPLS.append("statics.py")
header = self.tmpl(tmpl="header", subs=self.TMPL_VARS, strip=False)
header += statics + "\n"
footer = self.tmpl(tmpl="footer", subs=self.TMPL_VARS, strip=False)
objs = self.CR2(man_objs + list_objs + item_objs)
ret = self.CR2([header, objs, footer])
for x in self.TMPLS:
if x not in self.USED_TMPLS:
m = "UNUSED TEMPLATE FILE: {x}"
m = m.format(x=x)
LOG.warning(m)
return ret
def gen_obj(self, obj_dict, tmpl):
"""Generate an object string from an obj_dict.
obj_dict::
{
'attrs': {
'constants': [
{
'name': 'ADMIN',
'value': 'admin',
'value_type': u'STR'
},
...
],
'list_prop': {
'attr': { 'maxOccurs': 'unbounded', 'minOccurs': '0', 'name': 'query', 'type': 'sensor_query'},
'obj_name': u'Query',
'obj_type': u'SensorQuery',
'soap_name': 'query'
},
'properties': [
{
'attr': {'name': 'columns', 'type': 'plugin_sql_column_list'},
'obj_name': u'Columns',
'obj_type': u'PluginSqlColumnList',
'soap_name': 'columns',
},
...
]
},
'obj_name': u'SensorQueryList',
'soap_name': 'sensor_query_list',
'wsdl_doc': u''
}
"""
m = "Generating '{obj_name}' WSDL object for soap_name '{soap_name}'"
m = m.format(**obj_dict)
LOG.debug(m)
attrs = obj_dict["attrs"]
props = attrs["properties"]
[self._update_prop(x) for x in props]
lprop = attrs["list_prop"]
self._update_prop(lprop) if lprop else None
constants = attrs["constants"]
cprops = self._sort_props([x for x in props if x["obj_type"] not in self.SIMPLE_TYPES])
sprops = self._sort_props([x for x in props if x["obj_type"] in self.SIMPLE_TYPES])
ignores = attrs.get("ignores", [])
tv = {}
tv.update(self.TMPL_VARS)
tv.update(self.INDENTS)
tv.update(obj_dict)
tv["wsdl_doc"] = self.wsdl_doc(obj_dict=obj_dict)
tv["attrs_doc"] = self.CR1(self.attrs_doc(sprops=sprops, cprops=cprops, lprop=lprop))
tv["attrs"] = self.CR1(self.attrs_class(sprops=sprops, cprops=cprops, lprop=lprop))
tv["api_dict"] = self.CR1(self.api_dict(sprops=sprops, cprops=cprops, lprop=lprop))
tv["attrs_dict"] = self.CR1(self.attr_dict(sprops=sprops, cprops=cprops, lprop=lprop))
if "class_type" not in tv:
if lprop:
tv["class_type"] = "ApiList"
else:
tv["class_type"] = "ApiObject"
if "class_doc" not in tv:
if lprop:
tv["class_doc"] = self.tmpl(subs=tv, tmpl="list_doc")
else:
tv["class_doc"] = self.tmpl(subs=tv, tmpl="item_doc")
if ignores:
xx = self.CR1([self.tmpl(subs={"class_attr": x}, tmpl="simplex") for x in ignores])
tv["api_ignore"] = xx
else:
tv["api_ignore"] = self.none_tmpl(subs=tv, t="ignored attributes", i=12)
if constants:
tv["constants"] = self.CR1(self.const_tmpl(constants=constants))
else:
tv["constants"] = self.none_tmpl(subs=tv, t="constants", i=8)
if sprops:
tv["simple"] = self.CR1([self.tmpl(subs=x, tmpl="simplex") for x in sprops])
else:
tv["simple"] = self.none_tmpl(subs=tv, t="simple attributes", i=12)
if cprops:
tv["complex"] = self.CR1([self.tmpl(subs=x, tmpl="simplex") for x in cprops])
else:
tv["complex"] = self.none_tmpl(subs=tv, t="complex attributes", i=12)
ret = self.tmpl(subs=tv, tmpl=tmpl, strip=False)
return ret
def wsdl_doc(self, obj_dict):
"""Generate the text for the WSDL documentation."""
lines = self.indent(obj_dict["wsdl_doc"], 8).replace("\\", "/").splitlines()
ret = "\n".join(x.rstrip() for x in lines if x.strip())
return ret
def attr_dict(self, sprops=None, cprops=None, lprop=None):
"""Generate the text for the class ATTRS dict."""
ret = []
if lprop:
ret.append(self.tmpl(subs=lprop, tmpl="attr_dict"))
if sprops:
ret += [self.tmpl(subs=x, tmpl="attr_dict") for x in sprops]
if cprops:
ret += [self.tmpl(subs=x, tmpl="attr_dict") for x in cprops]
return ret
def api_dict(self, sprops=None, cprops=None, lprop=None):
"""Generate the text for the class API dict."""
ret = []
"""
{
"tag": "{soap_name}",
"attr": "{class_attr}",
"is_list": True,
"is_api": {is_api},
"type": {obj_type},
"required": {required},
"is_cdata": True/False,
}
"""
if lprop:
ret.append(self._api_dict(lprop, True))
if sprops:
ret += [self._api_dict(x) for x in sprops]
if cprops:
ret += [self._api_dict(x) for x in cprops]
return ret
def _api_dict(self, x, is_list=False):
items = [
['"tag"', '"{t}"'.format(t=x["soap_name"])],
['"attr"', '"{t}"'.format(t=x["class_attr"])],
['"type"', '{t}'.format(t=x["obj_type"])],
]
if x["obj_type"] in self.SIMPLE_TYPES:
items.append(['"is_api"', 'False'])
else:
items.append(['"is_api"', 'True'])
if is_list:
items.append(['"is_list"', 'True'])
if x.get("is_cdata", False):
items.append(['"is_cdata"', 'True'])
if x.get("from_text", False):
items.append(['"from_text"', 'True'])
if x.get("from_attr", False):
items.append(['"from_attr"', 'True'])
if x.get("required", False):
items.append(['"required"', 'True'])
t_items = ["{i[0]}: {i[1]}".format(i=i) for i in items]
x["api_dict"] = "{{{i}}}".format(i=", ".join(t_items))
ret = self.tmpl(subs=x, tmpl="api_dict")
return ret
def _update_prop(self, x):
"""Add is_api, class_attr, and required to a property type."""
# x["is_api"] = "False" if x["obj_type"] in self.SIMPLE_TYPES else "True"
x["class_attr"] = x.get("class_attr", x["soap_name"])
def _sort_props(self, props):
"""Sort properties in alpha sort, with id and name going first."""
firsts = ["id", "name"]
ret = sorted(props, key=lambda x: x["soap_name"])
for x in firsts:
for idx, i in enumerate(list(ret)):
if i["soap_name"] == x:
ret.insert(0, ret.pop(idx))
return ret
def attrs_class(self, sprops=None, cprops=None, lprop=None):
"""Generate the text for the class attributes."""
ret = []
if lprop:
ret.append(self.tmpl(subs=lprop, tmpl="attr_list"))
if sprops:
ret += [self.tmpl(subs=x, tmpl="attr_simplex") for x in sprops]
if cprops:
ret += [self.tmpl(subs=x, tmpl="attr_simplex") for x in cprops]
return ret
def attrs_doc(self, sprops=None, cprops=None, lprop=None):
"""Generate the text for the class attribute documentation."""
ret = []
if lprop:
ret.append(self.tmpl(subs=lprop, tmpl="attr_doc_list"))
if sprops:
ret += [self.tmpl(subs=x, tmpl="attr_doc_simplex") for x in sprops]
if cprops:
ret += [self.tmpl(subs=x, tmpl="attr_doc_simplex") for x in cprops]
return ret
def const_tmpl(self, constants):
"""Generate the text for a list of constants."""
ret = []
for x in constants:
if x["value_type"] == "int":
x["value"] = int(x["value"])
ret.append(self.tmpl(subs=x, tmpl="class_constant"))
return ret
def none_tmpl(self, t, subs, i):
"""Generate the text for an undefined type t with an indent of i."""
ret = self.tmpl(subs=subs, tmpl="none_def", t=t, i=" " * i)
return ret
def tmpl(self, subs, tmpl, strip=True, **kwargs):
"""Generate the text for tmpl using subs and kwargs as substitutions."""
tv = {}
tv.update(self.INDENTS)
tv.update(subs)
tv.update(kwargs)
real_tmpl_name = "{n}.tmpl".format(n=tmpl)
tmpl = self.TMPLS[real_tmpl_name]
tmpl = tmpl.strip() if strip else tmpl
ret = tmpl.format(**tv)
self.USED_TMPLS = getattr(self, "USED_TMPLS", [])
if real_tmpl_name not in self.USED_TMPLS:
self.USED_TMPLS.append(real_tmpl_name)
return ret
def indent(self, text, lvl):
"""Indent text with space * lvl."""
s = " " * lvl
joiner = "\n{}".format(s).join
ret = s + joiner(text.splitlines())
return ret
@property
def now(self):
"""Get the time right now."""
ret = datetime.datetime.utcnow().strftime(self.DTF)
return ret
def read_file(self, path, encoding="utf-8"):
"""Read path in as binary format, then convert to encoding using automatic BOM detector."""
def bom_enc(raw, default="utf-8"):
"""Determine encoding of raw by checking for a BOM, defaulting to default if none found."""
boms = (
("utf-8-sig", (codecs.BOM_UTF8,)),
("utf-16", (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)),
("utf-32", (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)),
)
for enc, enc_boms in boms:
if any(raw.startswith(bom) for bom in enc_boms):
return enc
return default
with io.open(path, "rb") as fh:
raw = fh.read()
enc = bom_enc(raw=raw, default=encoding)
ret = codecs.decode(raw, enc)
m = "Read '{p}' with {l} bytes as encoding '{e}'"
m = m.format(p=path, l=len(ret), e=enc)
LOG.debug(m)
return ret
def read_tmpl(self, name):
"""Read template name under path in as a unicode string."""
tmpl_path = os.path.join(self.TMPL_PATH, name)
ret = self.read_file(tmpl_path)
return ret
def abspath(path):
"""Prepend path with THIS_PATH if it is not absolute."""
if not os.path.isabs(path):
path = os.path.join(THIS_PATH, path)
return path
class ShellParser(argparse.ArgumentParser):
"""Wrapper for argparse."""
def __init__(self, *args, **kwargs):
"""Constructor."""
self.script = kwargs.pop('script', __name__)
argparse.ArgumentParser.__init__(self, *args, **kwargs)
def _print_message(self, message, file=None):
if message:
if file is None:
file = sys.stdout
file.write(message)
def exit(self, status=0, message=None):
"""Exit handler."""
if message:
self._print_message(message, sys.stdout)
os._exit(status)
def error(self, message):
"""Error handler."""
self.print_help()
message = '\n!! Argument Parsing Error in "{}": {}\n'.format(self.script, message)
self.exit(2, message)
def print_help(self, **kwargs):
"""Help handler."""
super(ShellParser, self).print_help(**kwargs)
subs = [a for a in self._actions if isinstance(a, argparse._SubParsersAction)]
for sub in subs:
print("")
for _, s in sub.choices.items():
print(s.format_help())
def str2bool(value):
"""Argparse bool helper."""
true_vals = ("yes", "y", "true", "t", "1")
fail_vals = ("no", "n", "false", "f", "0")
if value.lower() in true_vals:
result = True
elif value.lower() in fail_vals:
result = False
else:
m = "{!r} not a valid choice, must be one of '{}' for true, or one of '{}' for false"
m = m.format(value, ", ".join(true_vals), ", ".join(fail_vals))
raise argparse.ArgumentTypeError(m)
return result
def add_arg(obj, arg_name, arg_config):
"""Add argument named arg_name using arg_config to argparse obj."""
default = arg_config.get("default", "")
suppress = arg_config.get("suppress", False)
req = arg_config.get("required", False)
dest = arg_config.get("dest", arg_name)
short = arg_config.get("short", True)
helps = arg_config.get("help", "No help provided!")
choices = arg_config.get("choices", None)
long_opt = "--{}".format(arg_name)
if short is True:
short_opt = "-{}".format(arg_name[0])
elif short:
short_opt = "-{}".format(short)
else:
short_opt = ""
if isinstance(default, bool):
type_val = str2bool
elif isinstance(default, int):
type_val = int
elif isinstance(default, float):
type_val = float
else:
type_val = None
defval = argparse.SUPPRESS if suppress else default
ctxt = " [valid choices: {c}]".format(c=", ".join(choices)) if choices else ""
rtxt = "[REQUIRED] " if req else ""
helpstr = "{r}{h} [default: '{d}']{c}".format(h=helps, d=default, c=ctxt, r=rtxt)
helpstr = helpstr.replace("%", "%%")
add_args = [x for x in [long_opt, short_opt] if x]
add_kwargs = dict(default=defval, required=req, dest=dest, type=type_val)
add_kwargs.update(choices=choices, help=helpstr)
obj.add_argument(*add_args, **add_kwargs)
def make_parser(script, doc, version, args):
"""Construct a ShellParser with args."""
base = ShellParser(script=script, description=doc, add_help=False)
for gname, gargs in args.items():
grp = base.add_argument_group(gname)
gargs_sort = sorted(gargs.items(), key=lambda x: x[1].get("priority", 1))
for arg_name, arg_config in [x for x in gargs_sort if x[1].get("required", False)]:
add_arg(obj=grp, arg_name=arg_name, arg_config=arg_config)
for arg_name, arg_config in [x for x in gargs_sort if not x[1].get("required", False)]:
add_arg(obj=grp, arg_name=arg_name, arg_config=arg_config)
parser = ShellParser(script=script, description=doc, parents=[base], add_help=False)
parser.add_argument('--version', action='version', version=version)
parser.add_argument('--help', action='help')
return parser
def run(args):
"""Run the script."""
verbose = args.loud
clean = args.clean
platform_version = args.platform_version
static_path = abspath(args.static_path)
wsdl_path = abspath(args.wsdl_path)
tmpl_path = abspath(args.tmpl_path)
output_path = abspath(args.output_path)
manual_wsdl_path = abspath(args.manual_wsdl)
wsdl_file = os.path.basename(wsdl_path)
api_file = "__init__.py"
llvl = logging.DEBUG if verbose else logging.INFO
lfmt = "[%(name)-12s] [%(funcName)-15s] %(levelname)-8s %(message)s"
logging.basicConfig(level=llvl, format=lfmt)
# by default, we try to parse platform_version from the filename
if platform_version == "from_filename":
try:
platform_version = os.path.splitext(os.path.basename(args.wsdl_path))[0].split("-", 1)[1].strip()
except Exception:
m = "Unable to parse platform version from {p}, expected 'name-$version.wsdl'"
m = m.format(p=args.wsdl_path)
raise Exception(m)
else:
m = "Parsed platform version from {p} as '{v}'"
m = m.format(p=args.wsdl_path, v=platform_version)
LOG.info(m)
# we use imp.load_source here to bootstrap parsers.py and version.py from static_path
# because we don't have a "built" package of tanium_soul yet
manual_wsdl = imp.load_source("wsdl_manual", manual_wsdl_path)
parser_path = os.path.join(static_path, "parsers.py")
parser = imp.load_source("tanium_soul.parsers", parser_path)
pkg_version_path = os.path.join(static_path, "version.py")
pkg_version = imp.load_source("pkg_version", pkg_version_path)
# instantiate our wsdl parser
wp = parser.WsdlParser()
LOG.info(wp)
# read wsdl_path in as bytes
with io.open(wsdl_path, "rb") as fh:
xml = fh.read()
m = "Read wsdl_path '{p}' as bytes with {l} bytes"
m = m.format(p=wsdl_path, l=len(xml))
LOG.info(m)
# parse xml for namespaces
wsdl_ns = wp.xml2nsmap(xml=xml, register=True)
m = "Parsed wsdl_path '{p}' for namespaces: {n}"
m = m.format(p=wsdl_path, n=wsdl_ns)
LOG.debug(m)
# parse xml with comments into wrapped root
root_el = wp.xml2el_comments(xml=xml, wrap_root=True)
m = "Parsed wsdl_path '{p}' with comments into wrapped root {e}"
m = m.format(p=wsdl_path, e=root_el)
LOG.debug(m)
# get version from comments in wrapped root
wsdl_version = wp.get_wsdl_version(el=root_el)
m = "Found wsdl_version '{v}' from comments under {e}"
m = m.format(v=wsdl_version, e=root_el)
LOG.debug(m)
# get the schema element under the wrapped root using XPATH_TYPES from wp
schema_el = wp.find(el=root_el, xpath=wp.XPATH_TYPES, nsmap=wsdl_ns)
m = "Found schema with {l} items under root element using xpath '{x}'"
m = m.format(l=len(schema_el), x=wp.XPATH_TYPES)
LOG.info(m)
# create a list of dicts using our schema
schema = wp.schema(schema_el=schema_el, nsmap=wsdl_ns, logger=LOG)
# create our baseline template variables
tmpl_vars = {}
tmpl_vars.update(pkg_version.VERSION)
tmpl_vars.update(script_name=THIS_SCRIPT, script_ver=__version__, wsdl_ns=wsdl_ns)
tmpl_vars.update(platform_version=platform_version, wsdl_file=wsdl_file, wsdl_version=wsdl_version)
# instantiate our template processor
tp = TemplateProcessor(tmpl_path=tmpl_path, tmpl_vars=tmpl_vars)
LOG.info(tp)
# parse schema into api
api = tp.api(schema=schema, manual_wsdl=manual_wsdl)
m = "Generated '{p}' with {l} bytes"
m = m.format(p=api_file, l=len(api))
LOG.info(m)
# build $output_path/$pkg_dir based on pkg_title from VERSION in version.py
pkg_dir = pkg_version.VERSION["pkg_title"]
pkg_path = os.path.join(output_path, pkg_dir)
if os.path.isdir(pkg_path):
if not clean:
m = "'{p}' already exists and clean is not True"
m = m.format(p=pkg_path)
raise Exception(m)
else:
m = "'{p}' already exists and clean is True, removing"
m = m.format(p=pkg_path)
LOG.info(m)
shutil.rmtree(pkg_path, True)
m = "Copying contents of '{s}' to '{p}'"
m = m.format(s=static_path, p=pkg_path)
LOG.info(m)
shutil.copytree(static_path, pkg_path)
shutil.copy(wsdl_path, pkg_path)
api_path = os.path.join(pkg_path, api_file)
with io.open(api_path, "w", encoding="utf-8") as fh:
fh.write(api)
m = "Wrote {l} bytes to {p}"
m = m.format(l=len(api), p=api_path)
LOG.info(m)
m = "Finished building {p}"
m = m.format(p=pkg_path)
LOG.info(m)
if __name__ == "__main__":
MANUAL_WSDL_FILE = "manual_wsdl.py"
MANUAL_WSDL_PATH = os.path.join(THIS_PATH, MANUAL_WSDL_FILE)
grp1 = {}
grp1["clean"] = {"default": False, "help": "Remove pre-existing 'tanium_soul' in output_path"}
grp1["output_path"] = {"default": THIS_PATH, "help": "Path to create 'tanium_soul' in"}
grp1["static_path"] = {"default": "build_statics", "help": "Path containing static sources"}
grp1["tmpl_path"] = {"default": "build_tmpls", "help": "Path containing templates"}
grp1["loud"] = {"default": False, "help": "Increase logging verbosity"}
grp1["wsdl_path"] = {"required": True, "help": "WSDL file to parse the schema from"}
grp1["manual_wsdl"] = {"default": MANUAL_WSDL_PATH, "help": "Manual WSDL definitions file"}
grp1["platform_version"] = {"default": "from_filename", "help": "Platform Version of wsdl_path"}
args = {"Build tanium_soul Options": grp1}
parser_args = {"script": THIS_SCRIPT, "doc": __doc__, "args": args, "version": __version__}
parser = make_parser(**parser_args)
args = parser.parse_args()
run(args)
| tanium/tanium_soul_py | build_soul.py | Python | mit | 24,066 |
__author__ = 'mslabicki'
import pygmo as pg
#
from pyltes.powerOptimizationProblemsDef import maximalThroughputProblemRR
from pyltes.powerOptimizationProblemsDef import local_maximalThroughputProblemRR
from pyltes.powerOptimizationProblemsDef import maximalMedianThrProblemRR
from pyltes.powerOptimizationProblemsDef import local_maximalMedianThrProblemRR
from pyltes.powerOptimizationProblemsDef import minInterQuartileRangeroblemRR
from pyltes.powerOptimizationProblemsDef import local_minInterQuartileRangeroblemRR
import copy
import math
import numpy as np
class pygmoPowerConfigurator:
def __init__(self,parent):
self.parent = parent
def findPowersRR(self, objectiveFunction="averageThr", sgaGenerations = 100, numberOfThreads = 11, numOfIndividuals = 10, evolveTimes = 10, method="global", x_arg=None, y_arg=None, expectedSignalLoss_arg=None):
if method == "local":
if x_arg == None:
x = self.parent.constraintAreaMaxX/2
else:
x = x_arg
if y_arg == None:
y = self.parent.constraintAreaMaxY/2
else:
y = y_arg
if expectedSignalLoss_arg == None:
maxDistance = min(self.parent.constraintAreaMaxX/2, self.parent.constraintAreaMaxY/2)
else:
maxDistance = returnDistanceFromSNR(expectedSignalLoss_arg)
localBsVector = []
for bs in self.parent.bs:
if math.sqrt((bs.x - x)**2 + (bs.y - y)**2) < maxDistance:
row = []
row.append(int(bs.ID))
row.append(math.sqrt((bs.x - x)**2 + (bs.y - y)**2))
localBsVector.append(row)
localBsVector = np.asarray(localBsVector)
if objectiveFunction == "averageThr":
if method == "local":
localListBS = []
for i in range(len(localBsVector)):
localListBS.append(localBsVector[i,0])
prob = pg.problem(local_maximalThroughputProblemRR(dim=len(localBsVector), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower, localListBS=localListBS))
if method == "global":
prob = pg.problem(maximalThroughputProblemRR(dim=len(self.parent.bs), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower))
if objectiveFunction == "medianThr":
if method == "local":
localListBS = []
for i in range(len(localBsVector)):
localListBS.append(localBsVector[i,0])
prob = pg.problem(local_maximalMedianThrProblemRR(dim=len(localBsVector), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower, localListBS=localListBS))
if method == "global":
prob = pg.problem(maximalMedianThrProblemRR(dim=len(self.parent.bs), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower))
if objectiveFunction == "minIQRthr":
if method == "local":
localListBS = []
for i in range(len(localBsVector)):
localListBS.append(localBsVector[i,0])
prob = pg.problem(local_minInterQuartileRangeroblemRR(dim=len(localBsVector), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower, localListBS=localListBS))
if method == "global":
prob = pg.problem(minInterQuartileRangeroblemRR(dim=len(self.parent.bs), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower))
prob.siec = copy.deepcopy(self.parent)
# algo = algorithm.sga(gen=sgaGenerations)
algo = pg.algorithm(pg.sga(gen=sgaGenerations))
# archi = archipelago(algo, prob, numberOfThreads, numOfIndividuals, topology = topology.barabasi_albert())
# archi.evolve(evolveTimes)
# archi.join()
population = pg.population(prob, numOfIndividuals)
population = algo.evolve(population)
theBestCostF = 0
islandNumber = -1
islandCounter = 0
# for island in archi:
# if theBestCostF > island.population.champion.f[0]:
# theBestCostF = island.population.champion.f[0]
# islandNumber = islandCounter
# islandCounter = islandCounter + 1
if method == "global":
for i in range(len(self.parent.bs)):
self.parent.bs[i].outsidePower = population.champion_x[i]
if method == "local":
for i in range(len(localListBS)):
# self.parent.bs[int(prob.bsList[i])].outsidePower = archi[islandNumber].population.champion.x[i]
self.parent.bs[int(localListBS[i])].outsidePower = population.champion_x[i]
return len(localBsVector)
def returnDistanceFromSNR(expectedSignalLoss):
lambda_val = 0.142758313333
a = 4.0
b = 0.0065
c = 17.1
d = 10.8
s = 15.8
ht = 40
hr = 1.5
f = 2.1
gamma = a - b*ht + c/ht
Xf = 6 * math.log10( f/2 )
Xh = -d * math.log10( hr/2 )
R0 = 100.0
R0p = R0 * pow(10.0,-( (Xf+Xh) / (10*gamma) ))
bandwidth=20
k = 1.3806488 * math.pow(10, -23)
T = 293.0
BW = bandwidth * 1000 * 1000
N = 10*math.log10(k*T) + 10*math.log10(BW)
alpha = 20 * math.log10( (4*math.pi*R0p) / lambda_val )
R = R0 * math.pow(10, (expectedSignalLoss - alpha-Xf-Xh-s - N)/(10*gamma))
return R
| iitis/PyLTEs | pyltes/powerConfigurator.py | Python | mit | 5,818 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Invoice.date_of_issue'
db.add_column('books_invoice', 'date_of_issue',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Invoice.date_of_issue'
db.delete_column('books_invoice', 'date_of_issue')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'books.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'books.client': {
'Meta': {'object_name': 'Client'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'street_adress': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'books.expense': {
'Meta': {'object_name': 'Expense'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['books.Category']", 'null': 'True', 'blank': 'True'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['books.Client']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'receipt': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'taxes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['books.Tax']", 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['books.Vendor']", 'null': 'True', 'blank': 'True'})
},
'books.invoice': {
'Meta': {'object_name': 'Invoice'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Client']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_issue': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_number': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'paid_notes': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Dr'", 'max_length': '2', 'null': 'True'}),
'sub_description': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'sub_notes': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'terms': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'books.item': {
'Meta': {'object_name': 'Item'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Client']", 'null': 'True'}),
'cost': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'})
},
'books.project': {
'Meta': {'object_name': 'Project'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Client']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'rate_per_hour': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'})
},
'books.report': {
'Meta': {'object_name': 'Report'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'expense': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'taxes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timesheet': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'books.task': {
'Meta': {'object_name': 'Task'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Project']"}),
'rate_per_hour': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'})
},
'books.tax': {
'Meta': {'object_name': 'Tax'},
'compound_tax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gouv_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2', 'blank': 'True'})
},
'books.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Invoice']", 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '1000', 'blank': 'True'}),
'rate_per_hour': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Task']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2', 'blank': 'True'})
},
'books.vendor': {
'Meta': {'object_name': 'Vendor'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['books'] | carquois/blobon | blobon/books/migrations/0030_auto__add_field_invoice_date_of_issue.py | Python | mit | 12,495 |
"""
Revision ID: 0356_add_webautn_auth_type
Revises: 0355_add_webauthn_table
Create Date: 2021-05-13 12:42:45.190269
"""
from alembic import op
revision = '0356_add_webautn_auth_type'
down_revision = '0355_add_webauthn_table'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("INSERT INTO auth_type VALUES ('webauthn_auth')")
op.drop_constraint('ck_users_mobile_or_email_auth', 'users', type_=None, schema=None)
op.execute("""
ALTER TABLE users ADD CONSTRAINT "ck_user_has_mobile_or_other_auth"
CHECK (auth_type in ('email_auth', 'webauthn_auth') or mobile_number is not null)
NOT VALID
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE users SET auth_type = 'sms_auth' WHERE auth_type = 'webauthn_auth'")
op.execute("UPDATE invited_users SET auth_type = 'sms_auth' WHERE auth_type = 'webauthn_auth'")
op.drop_constraint('ck_user_has_mobile_or_other_auth', 'users', type_=None, schema=None)
op.execute("""
ALTER TABLE users ADD CONSTRAINT "ck_users_mobile_or_email_auth"
CHECK (auth_type = 'email_auth' or mobile_number is not null)
NOT VALID
""")
op.execute("DELETE FROM auth_type WHERE name = 'webauthn_auth'")
# ### end Alembic commands ###
| alphagov/notifications-api | migrations/versions/0356_add_webautn_auth_type.py | Python | mit | 1,380 |
from human_bot import HumanBot
class AdaptivePlayBot(HumanBot):
def __init(self):
pass | crainiarc/poker-ai-planner | agents/adaptive_play_bot.py | Python | mit | 99 |
# pylint: disable=preferred-module # FIXME: remove once migrated per GH-725
import unittest
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.MetaChangeFromDefaultRule import MetaChangeFromDefaultRule
from ansiblelint.testing import RunFromText
DEFAULT_GALAXY_INFO = '''
galaxy_info:
author: your name
description: your description
company: your company (optional)
license: license (GPLv2, CC-BY, etc)
'''
class TestMetaChangeFromDefault(unittest.TestCase):
collection = RulesCollection()
collection.register(MetaChangeFromDefaultRule())
def setUp(self):
self.runner = RunFromText(self.collection)
def test_default_galaxy_info(self):
results = self.runner.run_role_meta_main(DEFAULT_GALAXY_INFO)
self.assertIn("Should change default metadata: author",
str(results))
self.assertIn("Should change default metadata: description",
str(results))
self.assertIn("Should change default metadata: company",
str(results))
self.assertIn("Should change default metadata: license",
str(results))
| willthames/ansible-lint | test/TestMetaChangeFromDefault.py | Python | mit | 1,169 |
import glob
import numpy as np
import pandas as pd
from numpy import nan
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/RRBS_anno_clean")
repeats = pd.read_csv("repeats_hg19.csv")
annofiles = glob.glob("RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.G*")
def between_range(row):
subset = repeats.loc[(row["chr"] == repeats.chr) & (row.start >= repeats.start) & (row.start <= repeats.end), :]
if subset.empty:
return np.nan
return subset.repeat_class
#newdf1 = pd.DataFrame()
for filename in annofiles:
df = pd.read_table(filename)
df["hg19_repeats"] = df.apply(between_range, axis = 1)
df.to_csv(str("repeatregions_") + filename + ".csv", index=False)
| evanbiederstedt/RRBSfun | scripts/repeat_finder_scripts/repeat_finder_RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.G.py | Python | mit | 706 |
# -*- coding: utf-8 -*-
from libqtile.manager import Key, Click, Drag, Screen, Group
from libqtile.command import lazy
from libqtile import layout, bar, widget, hook
from libqtile import xcbq
xcbq.keysyms["XF86AudioRaiseVolume"] = 0x1008ff13
xcbq.keysyms["XF86AudioLowerVolume"] = 0x1008ff11
xcbq.keysyms["XF86AudioMute"] = 0x1008ff12
def window_sorter(win):
patterns = (
('Яндекс.Почта', 'E-mail'),
('Gmail', 'E-mail'),
('SquirrelMail', 'E-mail'),
('zeromq', 'Docs'),
('PyYAML', 'Docs'),
('documentation', 'Docs'),
('-ietf-', 'Docs'),
('GNOME Live!', 'Docs'),
('Guide', 'Docs'),
)
for k, v in patterns:
if k in win.name:
return v
mod = "mod4"
keys = [
Key([mod], "j",
lazy.layout.down()),
Key([mod], "k",
lazy.layout.up()),
Key([mod, "shift"], "j",
lazy.layout.move_down()),
Key([mod, "shift"], "k",
lazy.layout.move_up()),
Key([mod, "control"], "j",
lazy.layout.section_down()),
Key([mod, "control"], "k",
lazy.layout.section_up()),
Key([mod], "h",
lazy.layout.collapse_branch()), # for tree layout
Key([mod], "l",
lazy.layout.expand_branch()), # for tree layout
Key([mod], "r",
lazy.layout.sort_windows(window_sorter)), # for tree layout
Key([mod, "shift"], "h",
lazy.layout.move_left()),
Key([mod, "shift"], "l",
lazy.layout.move_right()),
Key([mod, "control"], "l",
lazy.layout.increase_ratio()),
Key([mod, "control"], "h",
lazy.layout.decrease_ratio()),
Key([mod], "comma",
lazy.layout.increase_nmaster()),
Key([mod], "period",
lazy.layout.decrease_nmaster()),
Key([mod], "Tab",
lazy.group.next_window()),
Key([mod, "shift"], "Tab",
lazy.group.prev_window()),
Key([mod, "shift"], "Return",
lazy.layout.rotate()),
Key([mod, "shift"], "space",
lazy.layout.toggle_split()),
Key([mod], "w",
lazy.to_screen(0)),
Key([mod], "e",
lazy.to_screen(1)),
Key([mod], "space",
lazy.nextlayout()),
Key([mod], "c",
lazy.window.kill()),
Key([mod], "t",
lazy.window.disable_floating()),
Key([mod, "shift"], "t",
lazy.window.enable_floating()),
Key([mod], "p",
lazy.spawn("exec dmenu_run "
"-fn 'Consolas:size=13' -nb '#000000' -nf '#ffffff' -b")),
Key([mod], "b",
lazy.spawn("~/note/conf/uzbl/open_history")),
Key([mod, "shift"], "b",
lazy.spawn("~/note/conf/uzbl/open_bookmark")),
Key([mod], "s",
lazy.spawn("~/note/conf/uzbl/open_ddg")),
Key([mod, "shift"], "s",
lazy.spawn("~/note/conf/uzbl/open_goog")),
Key([mod], "q",
lazy.spawn('xtrlock')),
Key([mod], "y",
lazy.spawn('xclip -o -selection primary | xclip -selection clipboard')),
Key([mod], "u",
lazy.spawn('xclip -o -selection clipboard | xclip -selection primary')),
Key([], "XF86AudioRaiseVolume",
lazy.spawn("amixer sset Master 5%+")),
Key([], "XF86AudioLowerVolume",
lazy.spawn("amixer sset Master 5%-")),
Key([], "XF86AudioMute",
lazy.spawn("amixer sset Master toggle")),
Key(["shift"], "XF86AudioRaiseVolume",
lazy.spawn("mpc volume +5")),
Key(["shift"], "XF86AudioLowerVolume",
lazy.spawn("mpc volume -5")),
Key(["shift"], "XF86AudioMute",
lazy.spawn("mpc toggle")),
Key([mod], "Left",
lazy.prevgroup()),
Key([mod], "Right",
lazy.nextgroup()),
]
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
border = dict(
border_normal='#808080',
border_width=2,
)
layouts = [
layout.Tile(**border),
layout.Max(),
layout.Stack(**border),
layout.TreeTab(sections=['Surfing', 'E-mail', 'Docs', 'Incognito']),
layout.Slice('left', 320, wmclass='pino',
fallback=layout.Slice('right', 320, role='roster',
fallback=layout.Stack(1, **border))),
layout.Slice('left', 192, role='gimp-toolbox',
fallback=layout.Slice('right', 256, role='gimp-dock',
fallback=layout.Stack(1, **border))),
]
floating_layout = layout.Floating(**border)
groups = [
Group('1'),
Group('2', layout='max'),
Group('3'),
Group('4', layout='treetab'),
Group('5'),
Group('6'),
Group('7'),
Group('8'),
Group('9'),
]
for i in groups:
keys.append(
Key([mod], i.name, lazy.group[i.name].toscreen())
)
keys.append(
Key([mod, "shift"], i.name, lazy.window.togroup(i.name))
)
screens = [
Screen(
top = bar.Bar(
[
widget.GroupBox(borderwidth=2,
font='Consolas',fontsize=18,
padding=1, margin_x=1, margin_y=1),
widget.Sep(),
widget.WindowName(
font='Consolas',fontsize=18, margin_x=6),
widget.Sep(),
widget.Battery(
font='Consolas',fontsize=18, margin_x=6),
widget.Sep(),
widget.CPUGraph(),
widget.MemoryGraph(),
widget.SwapGraph(foreground='C02020'),
widget.Sep(),
widget.Systray(),
widget.Sep(),
widget.Clock('%H:%M:%S %d.%m.%Y',
font='Consolas', fontsize=18, padding=6),
],
24,
),
),
]
@hook.subscribe.client_new
def dialogs(window):
if(window.window.get_wm_type() == 'dialog'
or window.window.get_wm_transient_for()):
window.floating = True
| andrelaszlo/qtile | examples/config/tailhook-config.py | Python | mit | 6,113 |
import os
import glob
from setuptools import setup
from setuptools.command.install import install
def post_install(install_path):
"""
Post install script for pyCUDA applications to warm the cubin cache
"""
import pycuda.autoinit
from pycuda.compiler import SourceModule
CACHE_DIR = os.path.join(install_path, 'cache')
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
for kernel in glob.glob(os.path.join(install_path, 'kernel', '*.cu')):
SourceModule(open(kernel).read(), cache_dir=CACHE_DIR)
class CudaInstall(install):
def run(self):
install.run(self)
post_install(os.path.join(self.install_lib, 'sciguppy'))
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
all_requirements = read('requirements.txt').split()
setup_requirements = filter(lambda r: 'pycuda' in r or 'pytools' in r, all_requirements)
install_requirements = filter(lambda r: not r.startswith('git'), all_requirements)
dependency_links = filter(lambda r: r.startswith('git'), all_requirements)
setup(
name = "sciguppy",
version = "0.0.8",
author="Captricity",
author_email="[email protected]",
description="SciGuppy is a library that accelerates scipy functions using the GPU",
packages=["sciguppy"],
package_data={'': ['**/*.cu']},
cmdclass={
'install': CudaInstall
},
scripts=['scripts/sciguppy_benchmark'],
setup_requires=setup_requirements,
install_requires=install_requirements,
dependency_links=dependency_links
)
| Captricity/sciguppy | setup.py | Python | mit | 1,576 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUIs\CoMPlEx_hwConfig_Dialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_hwConfig_dialog(object):
def setupUi(self, hwConfig_dialog):
hwConfig_dialog.setObjectName(_fromUtf8("hwConfig_dialog"))
hwConfig_dialog.resize(531, 816)
self.verticalLayout = QtGui.QVBoxLayout(hwConfig_dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(hwConfig_dialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.formLayout = QtGui.QFormLayout(self.groupBox)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.afmIpLine = QtGui.QLineEdit(self.groupBox)
self.afmIpLine.setObjectName(_fromUtf8("afmIpLine"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.afmIpLine)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.afmSubPortNum = QtGui.QSpinBox(self.groupBox)
self.afmSubPortNum.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.afmSubPortNum.setMaximum(100000000)
self.afmSubPortNum.setObjectName(_fromUtf8("afmSubPortNum"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.afmSubPortNum)
self.label_14 = QtGui.QLabel(self.groupBox)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_14)
self.afmPubPortNum = QtGui.QSpinBox(self.groupBox)
self.afmPubPortNum.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.afmPubPortNum.setMaximum(100000000)
self.afmPubPortNum.setObjectName(_fromUtf8("afmPubPortNum"))
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.afmPubPortNum)
self.label_15 = QtGui.QLabel(self.groupBox)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_15)
self.curveNameLine = QtGui.QLineEdit(self.groupBox)
self.curveNameLine.setObjectName(_fromUtf8("curveNameLine"))
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.curveNameLine)
self.label_16 = QtGui.QLabel(self.groupBox)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_16)
self.monitNameLine = QtGui.QLineEdit(self.groupBox)
self.monitNameLine.setObjectName(_fromUtf8("monitNameLine"))
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.monitNameLine)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_3)
self.xyCmdTagLine = QtGui.QLineEdit(self.groupBox)
self.xyCmdTagLine.setObjectName(_fromUtf8("xyCmdTagLine"))
self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.xyCmdTagLine)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_4)
self.xyResTagLine = QtGui.QLineEdit(self.groupBox)
self.xyResTagLine.setObjectName(_fromUtf8("xyResTagLine"))
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.xyResTagLine)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(hwConfig_dialog)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.formLayout_2 = QtGui.QFormLayout(self.groupBox_2)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.label_5 = QtGui.QLabel(self.groupBox_2)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_5)
self.maxPiezoVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxPiezoVoltNumDbl.setMinimum(-99.0)
self.maxPiezoVoltNumDbl.setObjectName(_fromUtf8("maxPiezoVoltNumDbl"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.maxPiezoVoltNumDbl)
self.label_6 = QtGui.QLabel(self.groupBox_2)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_6)
self.minPiezoVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.minPiezoVoltNumDbl.setMinimum(-99.0)
self.minPiezoVoltNumDbl.setObjectName(_fromUtf8("minPiezoVoltNumDbl"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.minPiezoVoltNumDbl)
self.label_7 = QtGui.QLabel(self.groupBox_2)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_7)
self.maxPiezoExtNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxPiezoExtNumDbl.setMinimum(-99.0)
self.maxPiezoExtNumDbl.setObjectName(_fromUtf8("maxPiezoExtNumDbl"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.maxPiezoExtNumDbl)
self.label_8 = QtGui.QLabel(self.groupBox_2)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_8)
self.minPiezoExtNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.minPiezoExtNumDbl.setMinimum(-99.0)
self.minPiezoExtNumDbl.setObjectName(_fromUtf8("minPiezoExtNumDbl"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.minPiezoExtNumDbl)
self.label_9 = QtGui.QLabel(self.groupBox_2)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_9)
self.farNearCmbBox = QtGui.QComboBox(self.groupBox_2)
self.farNearCmbBox.setObjectName(_fromUtf8("farNearCmbBox"))
self.farNearCmbBox.addItem(_fromUtf8(""))
self.farNearCmbBox.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.farNearCmbBox)
self.label_19 = QtGui.QLabel(self.groupBox_2)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_19)
self.toStartSpeedNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.toStartSpeedNumDbl.setMinimum(1.0)
self.toStartSpeedNumDbl.setMaximum(20000.0)
self.toStartSpeedNumDbl.setObjectName(_fromUtf8("toStartSpeedNumDbl"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.FieldRole, self.toStartSpeedNumDbl)
self.maxSpeedNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxSpeedNumDbl.setKeyboardTracking(False)
self.maxSpeedNumDbl.setMinimum(1.0)
self.maxSpeedNumDbl.setMaximum(20000.0)
self.maxSpeedNumDbl.setObjectName(_fromUtf8("maxSpeedNumDbl"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.maxSpeedNumDbl)
self.label_22 = QtGui.QLabel(self.groupBox_2)
self.label_22.setObjectName(_fromUtf8("label_22"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_22)
self.label_23 = QtGui.QLabel(self.groupBox_2)
self.label_23.setObjectName(_fromUtf8("label_23"))
self.formLayout_2.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_23)
self.movingObjCmbBox = QtGui.QComboBox(self.groupBox_2)
self.movingObjCmbBox.setObjectName(_fromUtf8("movingObjCmbBox"))
self.movingObjCmbBox.addItem(_fromUtf8(""))
self.movingObjCmbBox.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(5, QtGui.QFormLayout.FieldRole, self.movingObjCmbBox)
self.label_5.raise_()
self.maxPiezoVoltNumDbl.raise_()
self.label_6.raise_()
self.minPiezoVoltNumDbl.raise_()
self.label_7.raise_()
self.label_8.raise_()
self.maxPiezoExtNumDbl.raise_()
self.minPiezoExtNumDbl.raise_()
self.label_9.raise_()
self.farNearCmbBox.raise_()
self.toStartSpeedNumDbl.raise_()
self.label_19.raise_()
self.maxSpeedNumDbl.raise_()
self.label_22.raise_()
self.label_23.raise_()
self.movingObjCmbBox.raise_()
self.verticalLayout.addWidget(self.groupBox_2)
self.groupBox_3 = QtGui.QGroupBox(hwConfig_dialog)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.formLayout_3 = QtGui.QFormLayout(self.groupBox_3)
self.formLayout_3.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_3.setObjectName(_fromUtf8("formLayout_3"))
self.label_10 = QtGui.QLabel(self.groupBox_3)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_10)
self.deflSignCmbBox = QtGui.QComboBox(self.groupBox_3)
self.deflSignCmbBox.setObjectName(_fromUtf8("deflSignCmbBox"))
self.deflSignCmbBox.addItem(_fromUtf8(""))
self.deflSignCmbBox.addItem(_fromUtf8(""))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.deflSignCmbBox)
self.label_11 = QtGui.QLabel(self.groupBox_3)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_11)
self.sumThrNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.sumThrNumDbl.setObjectName(_fromUtf8("sumThrNumDbl"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.FieldRole, self.sumThrNumDbl)
self.label_12 = QtGui.QLabel(self.groupBox_3)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_12)
self.iGainMaxNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.iGainMaxNumDbl.setMaximum(100000.0)
self.iGainMaxNumDbl.setObjectName(_fromUtf8("iGainMaxNumDbl"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.FieldRole, self.iGainMaxNumDbl)
self.label_13 = QtGui.QLabel(self.groupBox_3)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_13)
self.pGainMaxNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.pGainMaxNumDbl.setMaximum(100000.0)
self.pGainMaxNumDbl.setObjectName(_fromUtf8("pGainMaxNumDbl"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.FieldRole, self.pGainMaxNumDbl)
self.defBaseNameLine = QtGui.QLineEdit(self.groupBox_3)
self.defBaseNameLine.setObjectName(_fromUtf8("defBaseNameLine"))
self.formLayout_3.setWidget(7, QtGui.QFormLayout.FieldRole, self.defBaseNameLine)
self.defDirLine = QtGui.QLineEdit(self.groupBox_3)
self.defDirLine.setObjectName(_fromUtf8("defDirLine"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.FieldRole, self.defDirLine)
self.label_17 = QtGui.QLabel(self.groupBox_3)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_17)
self.label_18 = QtGui.QLabel(self.groupBox_3)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.formLayout_3.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_18)
self.maxDeflVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.maxDeflVoltNumDbl.setMinimum(-99.0)
self.maxDeflVoltNumDbl.setObjectName(_fromUtf8("maxDeflVoltNumDbl"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.maxDeflVoltNumDbl)
self.minDeflVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.minDeflVoltNumDbl.setMinimum(-99.0)
self.minDeflVoltNumDbl.setObjectName(_fromUtf8("minDeflVoltNumDbl"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.FieldRole, self.minDeflVoltNumDbl)
self.label_20 = QtGui.QLabel(self.groupBox_3)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_20)
self.label_21 = QtGui.QLabel(self.groupBox_3)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_21)
self.verticalLayout.addWidget(self.groupBox_3)
self.saveNcancBtnBox = QtGui.QDialogButtonBox(hwConfig_dialog)
self.saveNcancBtnBox.setOrientation(QtCore.Qt.Horizontal)
self.saveNcancBtnBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Save)
self.saveNcancBtnBox.setObjectName(_fromUtf8("saveNcancBtnBox"))
self.verticalLayout.addWidget(self.saveNcancBtnBox)
self.verticalLayout.setStretch(0, 3)
self.verticalLayout.setStretch(1, 3)
self.verticalLayout.setStretch(2, 3)
self.verticalLayout.setStretch(3, 1)
self.retranslateUi(hwConfig_dialog)
QtCore.QObject.connect(self.saveNcancBtnBox, QtCore.SIGNAL(_fromUtf8("accepted()")), hwConfig_dialog.accept)
QtCore.QObject.connect(self.saveNcancBtnBox, QtCore.SIGNAL(_fromUtf8("rejected()")), hwConfig_dialog.reject)
QtCore.QMetaObject.connectSlotsByName(hwConfig_dialog)
def retranslateUi(self, hwConfig_dialog):
hwConfig_dialog.setWindowTitle(_translate("hwConfig_dialog", "Hardware Config", None))
self.groupBox.setTitle(_translate("hwConfig_dialog", "Connections", None))
self.label.setText(_translate("hwConfig_dialog", "AFM IP", None))
self.label_2.setText(_translate("hwConfig_dialog", "AFM Sub Port", None))
self.label_14.setText(_translate("hwConfig_dialog", "AFM Pub Port", None))
self.label_15.setText(_translate("hwConfig_dialog", "High res. device name", None))
self.label_16.setText(_translate("hwConfig_dialog", "Monitor device name", None))
self.label_3.setText(_translate("hwConfig_dialog", "XY CMD Tag", None))
self.label_4.setText(_translate("hwConfig_dialog", "XY RES Tag", None))
self.groupBox_2.setTitle(_translate("hwConfig_dialog", "Piezo settings", None))
self.label_5.setText(_translate("hwConfig_dialog", "Max Piezo voltage [V]", None))
self.label_6.setText(_translate("hwConfig_dialog", "Min Piezo voltage [V]", None))
self.label_7.setText(_translate("hwConfig_dialog", "Max Piezo ext [um]", None))
self.label_8.setText(_translate("hwConfig_dialog", "Min Piezo ext [um]", None))
self.label_9.setText(_translate("hwConfig_dialog", "Min piezo ext =", None))
self.farNearCmbBox.setItemText(0, _translate("hwConfig_dialog", "Far", None))
self.farNearCmbBox.setItemText(1, _translate("hwConfig_dialog", "Near", None))
self.label_19.setText(_translate("hwConfig_dialog", "Start speed [nm/s]", None))
self.label_22.setText(_translate("hwConfig_dialog", "Max Speed [nm/s]", None))
self.label_23.setText(_translate("hwConfig_dialog", "Moving object =", None))
self.movingObjCmbBox.setItemText(0, _translate("hwConfig_dialog", "Tip", None))
self.movingObjCmbBox.setItemText(1, _translate("hwConfig_dialog", "Sample", None))
self.groupBox_3.setTitle(_translate("hwConfig_dialog", "Other", None))
self.label_10.setText(_translate("hwConfig_dialog", "Deflection sign", None))
self.deflSignCmbBox.setItemText(0, _translate("hwConfig_dialog", "Signal increase = force increase", None))
self.deflSignCmbBox.setItemText(1, _translate("hwConfig_dialog", "Signal increase = force decrease", None))
self.label_11.setText(_translate("hwConfig_dialog", "Sum Threshold [V]", None))
self.label_12.setText(_translate("hwConfig_dialog", "Integral Gain Max", None))
self.label_13.setText(_translate("hwConfig_dialog", "Proportional Gain Max", None))
self.label_17.setText(_translate("hwConfig_dialog", "Default data directory", None))
self.label_18.setText(_translate("hwConfig_dialog", "Default file base name", None))
self.label_20.setText(_translate("hwConfig_dialog", "Hi-res Defl max [V]", None))
self.label_21.setText(_translate("hwConfig_dialog", "Hi-res Defl min [V]", None))
| elandini/CoMPlEx | GUIs/CoMPlEx_hwConfig_Dialog.py | Python | mit | 17,425 |
from abc import ABC, abstractmethod
class Selector(ABC):
"""docstring"""
def __init__(self):
pass
@abstractmethod
def make(self, population, selectSize, tSize):
pass
| akkenoth/TSPGen | Operators/Selection/Selector.py | Python | mit | 201 |
from rest_framework.permissions import BasePermission
class IsOwnerOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
return obj.user == request.user | videetssinghai/Blog-Rest-Api | posts/api/permissions.py | Python | mit | 196 |
#!/usr/bin/env python
import json
import time
import sys
import os
from collections import OrderedDict as dict
content = u"""\n
type cmdConf struct {
name string
argDesc string
group string
readonly bool
}
"""
def json_to_js(json_path, js_path):
"""Convert `commands.json` to `commands.js`"""
keys = []
with open(json_path) as fp:
_json = json.load(fp)
for k in _json.keys():
keys.append(k.encode('utf-8'))
with open(js_path, "w") as fp:
generate_time(fp)
fp.write("module.exports = [\n")
for k in sorted(keys):
fp.write('\t"%s",\n' % k.lower())
fp.write("]")
def json_to_go_array(json_path, go_path):
g_fp = open(go_path, "w")
with open(json_path) as fp:
_json = json.load(fp)
generate_time(g_fp)
g_fp.write("package main\n\nvar helpCommands = [][]string{\n")
_json_sorted = dict(sorted(_json.items(), key=lambda x: x[0]))
for k, v in _json_sorted.iteritems():
g_fp.write('\t{"%s", "%s", "%s"},\n' % (k, v["arguments"], v["group"]))
g_fp.write("}\n")
g_fp.close()
def json_to_command_cnf(json_path, go_path):
g_fp = open(go_path, "w")
with open(json_path) as fp:
_json = json.load(fp)
generate_time(g_fp)
g_fp.write("package server")
print >> g_fp, content
g_fp.write("var cnfCmds = []cmdConf{\n")
for k, v in _json.iteritems():
g_fp.write('\t{\n\t\t"%s",\n\t\t"%s",\n\t\t"%s", \n\t\t%s,\n\t},\n' %
(k, v["arguments"], v["group"], "true" if v["readonly"] else "false" ))
g_fp.write("}\n")
g_fp.close()
def generate_time(fp):
fp.write("//This file was generated by ./generate.py on %s \n" %
time.strftime('%a %b %d %Y %H:%M:%S %z'))
if __name__ == "__main__":
usage = """
Usage: python %s src_path dst_path"
1. for Node.js client:
python generate.py /path/to/commands.json /path/to/commands.js
2. for cmd/ledis_cli/const.go
python generate.py /path/to/commands.json /path/to/const.go
3. for server/command_cnf.go
python generate.py /path/to/commands.json /path/to/command_cnf.go
"""
if len(sys.argv) != 3:
sys.exit(usage % os.path.basename(sys.argv[0]))
src_path, dst_path = sys.argv[1:]
dst_path_base = os.path.basename(dst_path)
if dst_path_base.endswith(".js"):
json_to_js(src_path, dst_path)
elif dst_path_base.startswith("const.go"):
json_to_go_array(src_path, dst_path)
elif dst_path_base.startswith("command"):
json_to_command_cnf(src_path, dst_path)
else:
print "Not support arguments"
| holys-archive/ledisdb | generate.py | Python | mit | 2,737 |
def getSpeciesValue(species):
"""
Return the initial amount of a species.
If species.isSetInitialAmount() == True, return the initial amount.
Otherwise, return the initial concentration.
***** args *****
species: a libsbml.Species object
"""
if species.isSetInitialAmount():
return species.getInitialAmount()
else:
return species.getInitialConcentration()
def generateTemplate(source, filename, sumname, dataname=None):
"""
Generate a model summary file (model_summary.txt) and a template file (filename) from one or more SBML source files.
***** args *****
source: a list of strings.
Each entry describes a SBML file.
***** kwargs *****
filename: a string.
The name of the template to be generated.
sumnname: a string.
The name of the summary to be generated.
dataname: a string.
The name of a datafile.
"""
out_file=open(filename,"w")
sum_file=open(sumname,"w")
have_data = False
times = []
vars = []
nvar = 0
first = True
if dataname != None:
have_data = True
df = open(dataname,'r')
for line in df:
strs = str(line).split(' ')
vals = [float(i) for i in strs]
if first==True:
for j in range(1,len(vals)):
vars.append([])
first=False
nvar = len(vals)-1
times.append(vals[0])
for j in range(1,len(vals)):
vars[j-1].append(vals[j])
#print times
#print vars
out_file.write("<input>\n\n")
out_file.write("######################## number of models\n\n")
out_file.write("# Number of models for which details are described in this input file\n")
out_file.write("<modelnumber> "+repr(len(source))+ " </modelnumber>\n\n")
out_file.write("######################## restart\n\n")
out_file.write("# Restart from previous (pickled) population?\n")
out_file.write("<restart> False </restart>\n\n")
out_file.write("######################## epsilon schedule\n\n")
out_file.write("# Automatic epsilon schedule. Provide a vector of final epsilons and the alpha (defaults to 0.9)\n")
out_file.write("<autoepsilon>\n")
out_file.write("<finalepsilon> 1.0 </finalepsilon>\n")
out_file.write("<alpha> 0.9 </alpha>\n")
out_file.write("</autoepsilon>\n\n")
out_file.write("# OR\n")
out_file.write("# Series of epsilons. (Whitespace delimited list)\n")
out_file.write("# Multiple epsilon schedules can be specified by giving additional vectors enclosed in <e2> </e2>, <e3> </e3> etc\n")
out_file.write("# NOTE: the parser always reads them in order and ignores the tag value\n")
out_file.write("<!-- <epsilon> -->\n")
out_file.write("<!-- <e1> 5.0 3.0 2.0 1.0 </e1> -->\n")
out_file.write("<!--</epsilon> -->\n")
out_file.write("\n")
out_file.write("######################## particles\n\n")
out_file.write("<particles> 100 </particles>\n\n")
out_file.write("######################## beta\n\n")
out_file.write("# Beta is the number of times to simulate each sampled parameter set.\n# This is only applicable for models simulated using Gillespie and SDE\n")
out_file.write("<beta> 1 </beta>\n\n")
out_file.write("######################## dt\n\n")
out_file.write("# Internal timestep for solver.\n# Make this small for a stiff model.\n")
out_file.write("<dt> 0.01 </dt>\n\n")
out_file.write("######################## perturbation kernels : OPTIONAL (default uniform)\n\n")
out_file.write("# The pertubation kernels are computed with respect to the previous parameter distribution\n")
out_file.write("# Currently uniform and normal are implemented\n")
out_file.write("<kernel> uniform </kernel>\n\n")
out_file.write("######################## model kernel : OPTIONAL (default 0.7)\n\n")
out_file.write("# Probability of perturbing the sampled model (ignored when modelnumber = 1)\n")
out_file.write("<modelkernel> 0.7 </modelkernel>\n\n")
out_file.write("######################## ODE solver control : OPTIONAL \n\n")
out_file.write("# rtol and atol can be specified here. If the model is stiff then setting these to small might help the simulation to run\n")
out_file.write("#<rtol> </rtol> \n#<atol> </atol>\n\n")
out_file.write("######################## User-supplied data\n\n")
out_file.write("<data>\n")
out_file.write("# times: For ABC SMC, times must be a whitespace delimited list\n")
out_file.write("# In simulation mode these are the timepoints for which the simulations will be output\n")
if have_data == False:
out_file.write("<times> 0 1 2 3 4 5 6 7 8 9 10 </times>\n\n")
else:
out_file.write("<times>");
for i in times:
out_file.write(" "+repr(i) )
out_file.write(" </times>\n\n");
out_file.write("# variables: For ABC SMC, whitespace delimited lists of concentrations (ODE or SDE) or molecule numbers (Gillespie)\n")
out_file.write("# Denote your data via tags <v1> </v1> or <var1> </var1> or <v2> </v2> etc. The tags are ignored and the data read in order\n")
out_file.write("# For simulation these data are ignored\n")
out_file.write("# See fitting instruction below if the dimensionality of your data sets differ from the dimensionality of your model\n")
out_file.write("<variables>\n")
if have_data == False:
out_file.write(" <var1> </var1>\n")
else:
for k in range(nvar):
out_file.write("<var"+repr(k+1)+"> ");
for i in vars[k]:
out_file.write(" "+repr(i) )
out_file.write(" </var"+repr(k+1)+">\n");
out_file.write("</variables>\n")
out_file.write("</data>\n\n")
out_file.write("######################## Models\n\n")
out_file.write("<models>\n")
import libsbml
reader=libsbml.SBMLReader()
for i in range(0,len(source)):
sum_file.write("Model "+repr(i+1)+"\n")
sum_file.write("name: model"+repr(i+1)+"\nsource: "+source[i]+"\n\n")
out_file.write("<model"+repr(i+1)+">\n")
out_file.write("<name> model"+repr(i+1)+" </name>\n<source> "+source[i]+" </source>\n\n")
out_file.write("# type: the method used to simulate your model. ODE, SDE or Gillespie.\n")
out_file.write("<type> SDE </type>\n\n")
out_file.write("# Fitting information. If fit is None, all species in the model are fitted to the data in the order they are listed in the model.\n")
out_file.write("# Otherwise, give a whitespace delimited list of fitting instrictions the same length as the dimensions of your data.\n")
out_file.write("# Use speciesN to denote the Nth species in your model. Simple arithmetic operations can be performed on the species from your model.\n")
out_file.write("# For example, to fit the sum of the first two species in your model to your first variable, write fit: species1+species2\n")
out_file.write("<fit> None </fit>\n\n")
document=reader.readSBML(source[i])
model=document.getModel()
numSpecies=model.getNumSpecies()
numGlobalParameters=model.getNumParameters()
parameter=[]
parameterId=[]
parameterId2=[]
listOfParameter=[]
r1=0
r2=0
r3=0
listOfRules=model.getListOfRules()
for k in range(0, len(listOfRules)):
if model.getRule(k).isAlgebraic(): r1=r1+1
if model.getRule(k).isAssignment(): r2=r2+1
if model.getRule(k).isRate(): r3=r3+1
comp=0
NumCompartments=model.getNumCompartments()
for k in range(0,NumCompartments):
if model.getCompartment(k).isSetVolume():
comp=comp+1
numGlobalParameters=numGlobalParameters+1
parameter.append(model.getListOfCompartments()[k].getVolume())
parameterId.append(model.getListOfCompartments()[k].getId())
parameterId2.append('compartment'+repr(k+1))
listOfParameter.append(model.getListOfCompartments()[k])
for k in range(0,numGlobalParameters-comp):
param=model.getParameter(k)
parameter.append(param.getValue())
parameterId.append(param.getId())
parameterId2.append('parameter'+repr(k+1))
listOfParameter.append(param)
numLocalParameters=0
NumReactions=model.getNumReactions()
for k in range(0,NumReactions):
local=model.getReaction(k).getKineticLaw().getNumParameters()
numLocalParameters=numLocalParameters+local
for j in range(0,local):
parameter.append(model.getListOfReactions()[k].getKineticLaw().getParameter(j).getValue())
parameterId.append(model.getListOfReactions()[k].getKineticLaw().getParameter(j).getId())
x=len(parameterId)-comp
parameterId2.append('parameter'+repr(x))
listOfParameter.append(model.getListOfReactions()[k].getKineticLaw().getParameter(j))
numParameters=numLocalParameters+numGlobalParameters
species = model.getListOfSpecies()
##for k in range(0, len(species)):
##if (species[k].getConstant() == True):
##numParameters=numParameters+1
##parameter.append(getSpeciesValue(species[k]))
##parameterId.append(species[k].getId())
##parameterId2.append('species'+repr(k+1))
##numSpecies=numSpecies-1
sum_file.write("number of compartments: "+repr(NumCompartments)+"\n")
sum_file.write("number of reactions: "+repr(NumReactions)+"\n")
sum_file.write("number of rules: "+repr(model.getNumRules())+"\n")
if model.getNumRules()>0:
sum_file.write("\t Algebraic rules: "+repr(r1)+"\n")
sum_file.write("\t Assignment rules: "+repr(r2)+"\n")
sum_file.write("\t Rate rules: "+repr(r3)+"\n\n")
sum_file.write("number of functions: "+repr(model.getNumFunctionDefinitions())+"\n")
sum_file.write("number of events: "+repr(model.getNumEvents())+"\n\n")
paramAsSpecies=0
sum_file.write("Species with initial values: "+repr(numSpecies)+"\n")
out_file.write("# Priors on initial conditions and parameters:\n")
out_file.write("# one of \n")
out_file.write("# constant, value \n")
out_file.write("# normal, mean, variance \n")
out_file.write("# uniform, lower, upper \n")
out_file.write("# lognormal, mean, variance \n\n")
out_file.write("<initial>\n")
x=0
for k in range(0,len(species)):
##if (species[k].getConstant() == False):
x=x+1
#out_file.write(repr(getSpeciesValue(species[k]))+", ")
out_file.write(" <ic"+repr(x)+"> constant "+repr(getSpeciesValue(species[k]))+" </ic"+repr(x)+">\n")
sum_file.write("S"+repr(x)+":\t"+species[k].getId()+"\tspecies"+repr(k+1)+"\t("+repr(getSpeciesValue(species[k]))+")\n")
for k in range(0,len(listOfParameter)):
if listOfParameter[k].getConstant()==False:
for j in range(0, len(listOfRules)):
if listOfRules[j].isRate():
if parameterId[k]==listOfRules[j].getVariable():
x=x+1
paramAsSpecies=paramAsSpecies+1
#out_file.write(repr(listOfParameter[k].getValue())+", ")
out_file.write(" <ic"+repr(x)+"> constant "+repr(listOfParameter[k].getValue())+" </ic"+repr(x)+">\n")
sum_file.write("S"+repr(x)+":\t"+listOfParameter[k].getId()+"\tparameter"+repr(k+1-comp)+"\t("+repr(listOfParameter[k].getValue())+") (parameter included in a rate rule and therefore treated as species)\n")
out_file.write("</initial>\n\n")
sum_file.write("\n")
if(numGlobalParameters==0): string=" (all of them are local parameters)\n"
elif(numGlobalParameters==1): string=" (the first parameter is a global parameter)\n"
elif(numLocalParameters==0): string=" (all of them are global parameters)\n"
else: string=" (the first "+repr(numGlobalParameters)+" are global parameter)\n"
sum_file.write("Parameter: "+repr(numParameters)+string)
sum_file.write("("+repr(paramAsSpecies)+" parameter is treated as species)\n")
out_file.write("<parameters>\n")
counter=0
for k in range(0,numParameters-paramAsSpecies):
Print = True
if k<len(listOfParameter):
if listOfParameter[k].getConstant()==False:
for j in range(0, len(listOfRules)):
if listOfRules[j].isRate():
if parameterId[k]==listOfRules[j].getVariable(): Print = False
else: Print == True
if Print ==True:
counter=counter+1
sum_file.write("P"+repr(counter)+":\t"+parameterId[k]+"\t"+parameterId2[k]+"\t("+repr(parameter[k])+")\n")
out_file.write("<parameter"+repr(counter)+">")
out_file.write(" constant ")
out_file.write(repr(parameter[k])+" </parameter"+repr(counter)+">\n")
sum_file.write("\n############################################################\n\n")
out_file.write("</parameters>\n")
out_file.write("</model"+repr(i+1)+">\n\n")
out_file.write("</models>\n\n")
out_file.write("</input>\n\n")
out_file.close()
sum_file.close()
| MichaelPHStumpf/Peitho | peitho/errors_and_parsers/abc_sysbio/abcsysbio/generateTemplate.py | Python | mit | 13,980 |
from distutils.core import setup
import py2exe
import os
import sys
sys.argv.append('py2exe')
# The filename of the script you use to start your program.
target_file = 'main.py'
# The root directory containing your assets, libraries, etc.
assets_dir = '.\\'
# Filetypes not to be included in the above.
excluded_file_types = ['py','pyc','project','pydevproject']
def get_data_files(base_dir, target_dir, list=[]):
"""
" * get_data_files
" * base_dir: The full path to the current working directory.
" * target_dir: The directory of assets to include.
" * list: Current list of assets. Used for recursion.
" *
" * returns: A list of relative and full path pairs. This is
" * specified by distutils.
"""
for file in os.listdir(base_dir + target_dir):
full_path = base_dir + target_dir + file
if os.path.isdir(full_path):
get_data_files(base_dir, target_dir + file + '\\', list)
elif os.path.isfile(full_path):
if (len(file.split('.')) == 2 and file.split('.')[1] not in excluded_file_types):
list.append((target_dir, [full_path]))
return list
# The directory of assets to include.
my_files = get_data_files(sys.path[0] + '\\', assets_dir)
# Build a dictionary of the options we want.
opts = { 'py2exe': {
'ascii':'True',
'excludes':['_ssl','_hashlib'],
'includes' : ['anydbm', 'dbhash'],
'bundle_files':'1',
'compressed':'True'}}
# Run the setup utility.
setup(console=[target_file],
data_files=my_files,
zipfile=None,
options=opts) | lantra/vugamedev | src/setup.py | Python | mit | 1,710 |
# Import time (for delay) library (for SmartHome api) and GPIO (for raspberry pi gpio)
from library import SmartHomeApi
import RPi.GPIO as GPIO
import time
from datetime import datetime
# 7 -> LED
# Create the client with pre-existing credentials
api = SmartHomeApi("http://localhost:5000/api/0.1", id=10, api_key="api_eMxSb7n6G10Svojn3PlU5P6srMaDrFxmKAnWvnW6UyzmBG")
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
last_status = "UNKNOWN"
while True:
preferences = api.GetUserPrefences(2)['results']
print(preferences)
preference = (item for item in preferences if item["key"] == "bedtime").next()
if not preference:
print("Could not fin 'bedtime' preference!")
api.AddPreference(2, "bedtime", "00:00")
print("Created bedtime preference! Please set it to the correct value in your dashboard")
else:
bedtime = preference['value']
if not bedtime:
print("Unexpected error occured!")
else:
print(bedtime)
time_str = datetime.now().strftime('%H:%M')
print("time: {}".format(time_str))
bedtime_dt = datetime.strptime(bedtime, "%H:%M")
time_hm = datetime.strptime(time_str, "%H:%M")
if time_hm >= bedtime_dt:
print("Going to bed! Currently: {}, going to bed at {}".format(time_str, bedtime))
GPIO.output(7, GPIO.LOW)
else:
print("Not yet time for bed. Currently: {}, going to bed at {}".format(time_str, bedtime))
GPIO.output(7, GPIO.HIGH)
time.sleep(1)
| How2Compute/SmartHome | cli/demo2.py | Python | mit | 1,643 |
# File: etl.py
# Purpose: To do the `Transform` step of an Extract-Transform-Load.
# Programmer: Amal Shehu
# Course: Exercism
# Date: Thursday 22 September 2016, 03:40 PM
def transform(words):
new_words = dict()
for point, letters in words.items():
for letter in letters:
new_words[letter.lower()] = point
return new_words
| amalshehu/exercism-python | etl/etl.py | Python | mit | 382 |
from importlib import import_module
from django.apps import AppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "portal"
def ready(self):
import_module("portal.receivers")
| acarl123/acuity | portal/apps.py | Python | mit | 207 |
class Solution(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
| xingjian-f/Leetcode-solution | 354. Russian Doll Envelopes.py | Python | mit | 123 |
import wordtools
import random
from forms.form import Form
class MarkovForm(Form):
def __init__(self):
self.data={}
self.data[""]={}
self.limiter=0
def validate(self,tweet):
cleaned = wordtools.clean(tweet)
if wordtools.validate(cleaned) and len(cleaned)>=2:
return cleaned
else:
return None
def save(self,a):
a.insert(0,"")
a.append("")
for i in range(0,len(a)-1):
if not a[i] in self.data:
self.data[a[i]]={}
if a[i+1] in self.data[a[i]]:
self.data[a[i]][a[i+1]]+=1
else:
self.data[a[i]][a[i+1]]=1
def build(self):
self.limiter+=1
if self.limiter < 1000 or not self.limiter%300==0:
return None
s = ""
lastWord = ""
while True:
total = 0
for word in self.data[lastWord]:
total+=self.data[lastWord][word]
choice = random.randint(0,total-1)
total = 0
for word in self.data[lastWord]:
total+=self.data[lastWord][word]
if total>choice:
lastWord=word
s+=word+" "
break
if lastWord=="":
break
return s.lower()
| mlesicko/automaticpoetry | forms/markov.py | Python | mit | 1,023 |
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey, Enum, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from . import Base
from .utils import ModelMixin
class Source(Base, ModelMixin):
__tablename__ = 'source'
__repr_props__ = ['id', 'name']
# internal id
id = Column(Integer, Sequence('source_id_seq'), primary_key=True, unique=True, nullable=False)
# describe source
name = Column(String(50), unique=True, nullable=False)
description = Column(String(250))
class Species(Base, ModelMixin):
__tablename__ = 'species'
__repr_props__ = ['id', 'external_id', 'name']
# internal id
id = Column(Integer, Sequence('species_id_seq'), primary_key=True, unique=True, nullable=False)
# record origin
external_id = Column(Integer, unique=True, nullable=False, index=True)
source_id = Column(Integer, ForeignKey('source.id'), nullable=False)
source = relationship('Source')
name = Column(String(150), unique=False, nullable=False)
class Compound(Base, ModelMixin):
__tablename__ = 'compound'
__repr_props__ = ['id', 'external_id']
# internal id
id = Column(Integer, Sequence('compound_id_seq'), primary_key=True, unique=True, nullable=False)
# record origin
external_id = Column(String, unique=True, nullable=False, index=True)
source_id = Column(Integer, ForeignKey('source.id'), nullable=False)
source = relationship('Source')
smiles = Column(String(750), nullable=False)
class Target(Base, ModelMixin):
__tablename__ = 'target'
__repr_props__ = ['id', 'external_id']
# internal id
id = Column(Integer, Sequence('target_id_seq'), primary_key=True, unique=True, nullable=False)
# record origin
external_id = Column(String, unique=True, nullable=False, index=True)
source_id = Column(Integer, ForeignKey('source.id'), nullable=False)
source = relationship('Source')
# define species
species_id = Column(Integer, ForeignKey('species.id'), nullable=False)
species = relationship('Species', backref='targets')
# define target sequence
sequence = Column(String)
ASSAYS = Enum('ADMET', 'Binding', 'Functional', 'Property', 'Unassigned',
name='assay_type')
ACTIVITIES = Enum('Kd', 'AC50', 'Potency', 'XC50', 'IC50', 'Ki', 'EC50',
name='activity_type')
RELATIONS = Enum('=', '>', '<', '<=', '>=', name='relation')
class Activity(Base, ModelMixin):
__tablename__ = 'activity'
__repr_props__ = ['id', 'compound', 'relation', 'value']
# internal id
id = Column(Integer, Sequence('activity_id_seq'), primary_key=True, unique=True, nullable=False)
# record origin
external_id = Column(String, nullable=False)
source_id = Column(Integer, ForeignKey('source.id'), nullable=False)
source = relationship('Source') # many to one, no map back
# define the activity
relation = Column(RELATIONS, nullable=False)
value = Column(Float, nullable=False)
assay_type = Column(ASSAYS, nullable=False)
activity_type = Column(ACTIVITIES, nullable=False)
confidence_score = Column(Integer, index=True)
#Link to target
target_id = Column(Integer, ForeignKey('target.id'), nullable=False)
target = relationship('Target', backref='activities')
#Link to compound
compound_id = Column(Integer, ForeignKey('compound.id'), nullable=False)
compound = relationship('Compound', backref='activities')
def __repr__(self):
return '<Activity(id=\'{id}\' compound=\'{compound}\' '\
'target=\'{target}\' relation=\'{relation}{value}\')>'\
.format(id=self.id,
relation=self.relation,
target=self.target.external_id,
compound=self.compound.external_id,
value=self.value)
| richlewis42/qsardb | qsardb/models/models.py | Python | mit | 3,946 |
from investor_lifespan_model.investor import Investor
from investor_lifespan_model.market import Market
from investor_lifespan_model.insurer import Insurer
from investor_lifespan_model.lifespan_model import LifespanModel
from investor_lifespan_model.mortality_data import π, G, tf
| moehle/investor_lifespan_model | investor_lifespan_model/__init__.py | Python | mit | 282 |
# -*- coding: utf-8 -*-
import pytest
from tests.models.test_etl_record import etl_records # noqa
from tests.models.test_abstract_records import dynamodb_connection # noqa
from mycroft.backend.worker.etl_status_helper import ETLStatusHelper
import mock
RECORDS = [
{'status': 'error', 'date': '2014-09-01', 'start_time': 4, 'end_time': 10,
'error_info': {'crash_a': 'error_a', 'crash_b': 'error_b'}},
{'status': 'success', 'date': '2014-09-02', 'start_time': 6, 'end_time': 12,
'error_info': {}},
]
MSG = {
'uuid': 'some-uuid',
'redshift_id': 'some-rs-id',
}
KWARGS = {
'hash_key': None,
'etl_status': None,
'et_starttime': None,
'load_starttime': None,
'data_date': None,
'run_by': None,
'redshift_id': None,
'job_id': None,
'etl_error': None,
}
class TestETLStatusHelper(object):
@pytest.yield_fixture # noqa
def get_etl_helper(self, etl_records):
with mock.patch(
'mycroft.models.aws_connections.TableConnection.get_connection'
) as mocked_etl:
mocked_etl.return_value = etl_records
yield ETLStatusHelper()
def test_etl_step_started(self, get_etl_helper):
etl = get_etl_helper
for r in RECORDS:
date = r['date']
step = 'et'
# run twice to hit new and update record cases
etl.etl_step_started(MSG, date, step)
etl.etl_step_started(MSG, date, step)
entry = etl.etl_db.get(hash_key='some-uuid', data_date=date)
entry_dict = entry.get(**KWARGS)
assert entry_dict['hash_key'] == 'some-uuid'
assert entry_dict['data_date'] == date
with pytest.raises(ValueError):
etl.etl_step_started(MSG, None, 'et')
def test_etl_step_complete(self, get_etl_helper):
etl = get_etl_helper
for r in RECORDS:
date = r['date']
step = 'et'
# test case: no previous record
etl.etl_step_complete(MSG, date, step, r)
# test case: existing record
etl.etl_step_started(MSG, date, step)
etl.etl_step_complete(MSG, date, step, r)
entry = etl.etl_db.get(hash_key='some-uuid', data_date=date)
entry_dict = entry.get(**KWARGS)
assert entry_dict['hash_key'] == 'some-uuid'
assert entry_dict['data_date'] == date
if entry_dict['etl_status'] == 'load_success':
assert entry_dict.get('etl_error') is None
elif entry_dict['etl_status'] == 'load_error':
assert entry_dict['etl_error'] == str(r['error_info'])
with pytest.raises(ValueError):
etl.etl_step_complete(MSG, None, 'et', RECORDS[0])
| Yelp/mycroft | mycroft/tests/backend/test_etl_helper.py | Python | mit | 2,789 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.tcm.v20210413 import models
class TcmClient(AbstractClient):
_apiVersion = '2021-04-13'
_endpoint = 'tcm.tencentcloudapi.com'
_service = 'tcm'
def DescribeMesh(self, request):
"""查询网格详情
:param request: Request instance for DescribeMesh.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMesh", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeMeshList(self, request):
"""查询网格列表
:param request: Request instance for DescribeMeshList.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshListRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMeshList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | codespace/python/tencentcloud/tcm/v20210413/tcm_client.py | Python | mit | 3,255 |
"""
WSGI config for veterinario project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "veterinario.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| taopypy/taopypy-django-tests | veterinario/veterinario/wsgi.py | Python | mit | 397 |
from MuellerBrown import getPotentialAndForces
from PlotUtils import PlotUtils
import numpy as np
import matplotlib.pyplot as plt
import MuellerBrown as mbpot
m=1.0
def getKineticEnergy(velocity):
return 0.5*m*(velocity[0]**2+velocity[1]**2)
dt = 0.01
num_steps = 1000
#initial_position = np.array( [ 0.0 , 0.0 ] )
initial_position = mbpot.saddlePoints[0]
initial_velocity = np.array( [ 1.0 , -1.0 ] )
position = np.zeros([num_steps+1,2])
velocity = np.zeros([num_steps+1,2])
potential_energy = np.zeros(num_steps+1)
kinetic_energy = np.zeros(num_steps+1)
total_energy = np.zeros(num_steps+1)
times = np.arange(num_steps+1)*dt
time = 0.0
position[0,:] = initial_position
velocity[0,:] = initial_velocity
kinetic_energy[0] = getKineticEnergy(initial_velocity)
(pot, force) = getPotentialAndForces(initial_position)
potential_energy[0] = pot
for i in range(0,num_steps):
# get position at t+dt
position[i+1] = position[i] + velocity[i]*dt+0.5*(force/m)*dt**2
# get velocity at t+dt
(new_pot, new_force) = getPotentialAndForces(position[i+1])
velocity[i+1] = velocity[i] + (0.5/m)*(new_force+force)*dt
# add stuff
kinetic_energy[i+1] = getKineticEnergy(velocity[i+1])
potential_energy[i+1] = new_pot
force = new_force
total_energy = potential_energy + kinetic_energy
pu = PlotUtils(mbpot,[200,200])
pu.plotPotential(trajectory=position)
plt.figure(1)
plt.plot(times,position[:,0])
plt.figure(2)
plt.plot(times,position[:,1])
plt.figure(3)
plt.plot(times,potential_energy)
plt.figure(4)
plt.plot(times,kinetic_energy)
plt.figure(5)
plt.ylim(0, np.max(total_energy)+1.0)
plt.plot(times,total_energy)
plt.show()
| valsson/MD-MC-Codes-2016 | MuellerBrown-MD/MD-MuellerBrown.py | Python | mit | 1,665 |
#-*- coding: utf-8 -*-
import os
import pandas as pd
import config
import pandas
import re
import math
from modules.valuations.valuation import Valuation
# 현 EPS 과거 5년 PER 평균을 곱한 값
class PER(Valuation):
def __init__(self, valuation):
data = valuation.get_data()
json = valuation.get_json()
Valuation.__init__(self, data, json)
self.set_json('PER', self.valuate())
def valuate(self):
try:
json = self.get_json()
return int(json['EPS'] * json['PER_5'])
except:
return None | jongha/stock-ai | modules/valuations/per.py | Python | mit | 540 |
class HighScores(object):
def __init__(self, scores):
self.scores = scores
def latest(self):
return self.scores[-1]
def personal_best(self):
return max(self.scores)
def personal_top_three(self):
return sorted(self.scores, reverse=True)[:3]
| N-Parsons/exercism-python | exercises/high-scores/example.py | Python | mit | 291 |
# -*- coding: utf-8 -*-
#
# GitComponentVersion documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 11 10:51:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GitComponentVersion'
copyright = u'2017, Kevin Johnson'
author = u'Kevin Johnson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GitComponentVersiondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GitComponentVersion.tex', u'GitComponentVersion Documentation',
u'Kevin Johnson', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gitcomponentversion', u'GitComponentVersion Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GitComponentVersion', u'GitComponentVersion Documentation',
author, 'GitComponentVersion', 'One line description of project.',
'Miscellaneous'),
]
| kjjuno/GitComponentVersion | docs/source/conf.py | Python | mit | 4,802 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 TinEye. All rights reserved worldwide.
from .matchengine_request import MatchEngineRequest
class MobileEngineRequest(MatchEngineRequest):
"""
Class to send requests to a MobileEngine API.
Adding an image using data:
>>> from tineyeservices import MobileEngineRequest, Image
>>> api = MobileEngineRequest(api_url='http://localhost/rest/')
>>> image = Image(filepath='/path/to/image.jpg')
>>> api.add_image(images=[image])
{u'error': [], u'method': u'add', u'result': [], u'status': u'ok'}
Searching for an image using an image URL:
>>> api.search_url(url='https://tineye.com/images/meloncat.jpg')
{'error': [],
'method': 'search',
'result': [{'filepath': 'match1.png',
'score': '97.2',
'overlay': 'overlay/query.png/match1.png[...]'}],
'status': 'ok'}
"""
def __repr__(self):
return "MobileEngineRequest(api_url=%r, username=%r, password=%r)" %\
(self.api_url, self.username, self.password)
| TinEye/tineyeservices_python | tineyeservices/mobileengine_request.py | Python | mit | 1,115 |
#!/home/elsa/Ureka/variants/common/bin/python
#Code to generate an image with perlin noise as background.
#Used in UO scientific computing course Spring 2016
#Perlin code and noise package is from Casey Duncan
#https://github.com/caseman/noise/examples/2dtexture.py
#Remaining code by Elsa M. Johnson
from noise import pnoise2, snoise2
import numpy as np
from matplotlib import pyplot as plt
import random as random
from PIL import Image
import matplotlib.image as mpimg
#Create noise -
#size is image size
#octaves creates different sized noise regions
def nbackground(sx=1000,sy=1000,octaves=50):
# size=1000
# array = np.zeros((size, size), np.float)
array = np.zeros((sx, sy), np.float)
# octaves = 50
freq = 16.0 * octaves
for y in xrange(sy):
for x in xrange(sx):
data=snoise2(x / freq, y / freq, octaves) * 127.0 + 128.0
array[x, y] += data
return array
plt.imshow(array, cmap=plt.cm.Greys_r)
#To get the pasting right, use cpaste, paste isn't working.
#Now creating a small image note that the image must be smaller than sx and sy
#make sure scspiral.png is in directory
#Will later make the size more variable
#the
def mkimg(infile='scspiral.png',sz=1000):
data =[]
# infile = 'scspiral.png'
im=Image.open(infile).convert('L')
plt.imshow(im)
imlist = list(im.getdata())
x=int(im.size[1])
y=im.size[0]
im.format
data=np.array(imlist).reshape(x,y)
cdata=data[50:150,50:150] #cropping image a bit
#pad with zeros to fit noise image:
xx,yy=cdata.shape #size of object
#randomly pick a beginning number for
#location of image and pad with zeros
xlim = sz-xx-10
ylim = sz-yy-10
# These were specific numbers based on the image
# Which in the end might be better than random placement
# begx=436
# begy=596
begx=random.randint(1,xlim)
begy=random.randint(1,ylim)
print 'center of embedded image',begx+50,begy+50
# Create limits to pad with zeros
zx1 = begx-1
zx2 = sz-zx1-xx
zy1 = begy-1
zy2 = sz-zy1-yy
bimg=np.lib.pad(cdata,((zx1,zx2),(zy1,zy2)),'constant',constant_values=(0,0))
return bimg
# This combines both images and scales the added image based on the S/N ratio
# imarr is the image from mkimg and noisearr is from nbackground
# sz = is the size of box for doing S/N calculations and s2n is the desired
# S/N ratio
def combineimg(imarr,noisearr,s2n=5.0,thresh=100,sz=10,):
b=imarr
b[b<thresh]=0
x,y=np.where(b==b.max())
sig=b[x[0]-5:x[0]+5, y[0]-5:y[0]+5].sum()
nse=noisearr[x[0]-5:x[0]+5, y[0]-5:y[0]+5].sum()
#quadratic formula to find fct so that S/N is correct
fs = (s2n*s2n +np.sqrt(s2n**4.+4*nse*s2n*s2n))/2
fct=sig/fs
b=b/fct
#note to find location of b max: where(b==b.max())
totimg = b+noisearr
plt.figure()
plt.imshow(totimg,cmap=plt.cm.Greys_r)
return totimg
#Next routine calculates the mean and stand dev of random pixels
def imgstats(arr,sz=100):
# imm=np.mean(arr[x-sz/2:x+sz/2,y-sz/2:y+sz/2])
# ims=np.std(arr[x-sz/2:x+sz/2,y-sz/2:y+sz/2])
ax,ay=arr.shape
begx=np.random.randint(1,ax-sz)
begy=np.random.randint(1,ay-sz)
rm = np.mean(arr[begx:begx+sz,begy:begy+sz])
rs = np.std(arr[begx:begx+sz,begy:begy+sz])
# print 'mean,std about image', imm,ims
print 'random center, mean, std',begx,begy,rm,rs
#previous values
#np.mean(stuff[436:536,596:696])
#np.std(stuff[436:536,596:696])
#np.mean(stuff[461:511,621:671])
#np.std(stuff[461:511,621:671])
def svimg(totarr):
#print it out:
x,y=totarr.shape
vl = np.around(totarr.flatten(),5)#round to 5 digits
xx = np.repeat(np.arange(x),x)+1
yy = np.tile(np.arange(y),y)+1
big =np.column_stack((xx,yy,vl))
np.savetxt("noisyimage.txt",big,fmt=('%4.1f','%4.1f','%10.5f'))
##Add this if you want to
##read it out to make sure it works
##Otherwise slows down routine.
#row,col,data=np.loadtxt("noisyimage.txt",unpack=True)
#rsize = int(max(row))
#csize = int(max(col))
#data=np.array(data).reshape(rsize,csize)
# plt.imshow(data, interpolation='None',cmap=plt.cm.Greys_r)
def main():
noiseimg = nbackground()
hiddenimg = mkimg()
timg = combineimg(hiddenimg,noiseimg)
imgstats(timg)
svimg(timg)
main()
plt.show()
| ElsaMJohnson/pythonprograms | galaxyperlin.py | Python | mit | 4,116 |
from .workout import Workout
from .duration import Time
from .duration import Distance | claha/suunto | suunto/__init__.py | Python | mit | 86 |
from sqlalchemy import and_
from DBtransfer import *
from zlib import *
#retrun compressed
def generateFromDB(DBSession, InternData, tmp_name) :
run_list=[]
user_data = DBSession.query(InternData).filter(InternData.timestamp == tmp_name)
for data in user_data :
if not data.run in run_list :
run_list.append(data.run)
return compressList(run_list)
def getknown_runsAndrun_list(DBSession, Mass_specData, InternData, tmp_name) : #CR: rename to splitKnownAndTodo
#~ knownRuns = [] # devide runs from upload into known runs (in DB) ...
#~ runList = [] #...and the usual run_list, to get data from these runs
#CR:
runs_in_upload = decompressList(generateFromDB(DBSession, InternData, tmp_name))
#~ known_runs = [x for x in DBSession.query(Mass_specData.filename).all() if x in runs_in_upload]
known_runs = [x.filename for x in DBSession.query(Mass_specData).filter(Mass_specData.filename.in_(runs_in_upload))]
run_list = [x for x in runs_in_upload if x not in known_runs]
#~ allRuns = getAllRuns_Filename(DBSession, Mass_specData)# in DB saved runs
#~ decomruns_in_upload = decompressList(runs_in_upload)
#~ for run in decomruns_in_upload :
#~ if run in allRuns :
#~ knownRuns.append(run)
#~ else :
#~ runList.append(run)
return (known_runs, run_list)
#input compressed
#output not compressed
def usedRuns(run_list, params) :
list_of_used_runs = []
runs = decompressList(run_list)
for i in range(0, len(runs)) :
if runs[i] in params :
list_of_used_runs.append(runs[i])
return list_of_used_runs
# input not compressed
# output InternData objects
def rowsToFill(DBSession, InternData, tmp_name, used_runs) :
users_rows = getUserRows(DBSession, InternData, tmp_name)
rows = []
for row in users_rows :
if row.run in used_runs :
rows.append(row)
return rows
#input compressed, not compressed
def throughOutUsedRuns(run_list, used_runs) : # not compressed
rl = decompressList(run_list)
for run in used_runs :
rl.pop(rl.index(run))
if len(rl) > 0 :
return compressList(rl)
else :
return []
#
def compressList(list) :
return compress('$$'.join(list))
#input compressed
def decompressList(run_list) :
return decompress(run_list).split('$$')
| mwalzer/Ligandomat | ligandomat/run_list_handling.py | Python | mit | 2,262 |
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout,Submit
from .models import Details, Feedback
from crispy_forms.bootstrap import TabHolder, Tab
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
class AddmeForm(forms.ModelForm):
class Meta:
model = Details
exclude = ['']
"""Forms for the ``feedback_form`` app."""
class FeedbackForm(forms.ModelForm):
class Meta:
model = Feedback
fields = ('email', 'message')
| Thuruv/pilgrim | blood/forms.py | Python | mit | 537 |
#!/usr/bin/env python3
"""
make_confidence_report_bundle_examples.py
Usage:
make_confidence_report_bundle_examples.py model.joblib a.npy
make_confidence_report_bundle_examples.py model.joblib a.npy b.npy c.npy
where model.joblib is a file created by cleverhans.serial.save containing
a picklable cleverhans.model.Model instance and each examples_i.npy is
a saved numpy array containing adversarial examples for a whole dataset.
Usually example_i.npy is the output of make_confidence_report.py or
make_confidence_report_bundled.py.
This script uses max-confidence attack bundling
( https://openreview.net/forum?id=H1g0piA9tQ )
to combine adversarial example datasets that were created earlier.
It will save a ConfidenceReport to to model_bundled_examples_report.joblib.
The report can be later loaded by another
script using cleverhans.serial.load.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import warnings
import numpy as np
import tensorflow as tf
from cleverhans.utils_tf import silence
# We need to disable pylint's complaints about import order because `silence`
# works only if it is called before the other imports.
# pylint: disable=C0413
silence()
from cleverhans.attack_bundling import bundle_examples_with_goal, MaxConfidence
from cleverhans import serial
from cleverhans.compat import flags
from cleverhans.confidence_report import BATCH_SIZE
from cleverhans.confidence_report import TRAIN_START, TRAIN_END
from cleverhans.confidence_report import TEST_START, TEST_END
from cleverhans.confidence_report import WHICH_SET
FLAGS = flags.FLAGS
def main(argv=None):
"""
Make a confidence report and save it to disk.
"""
assert len(argv) >= 3
_name_of_script = argv[0]
model_filepath = argv[1]
adv_x_filepaths = argv[2:]
sess = tf.Session()
with sess.as_default():
model = serial.load(model_filepath)
factory = model.dataset_factory
factory.kwargs['train_start'] = FLAGS.train_start
factory.kwargs['train_end'] = FLAGS.train_end
factory.kwargs['test_start'] = FLAGS.test_start
factory.kwargs['test_end'] = FLAGS.test_end
dataset = factory()
adv_x_list = [np.load(filepath) for filepath in adv_x_filepaths]
x, y = dataset.get_set(FLAGS.which_set)
for adv_x in adv_x_list:
assert adv_x.shape == x.shape, (adv_x.shape, x.shape)
# Make sure these were made for the right dataset with right scaling
# arguments, etc.
assert adv_x.min() >= 0. - dataset.kwargs['center'] * dataset.max_val
assert adv_x.max() <= dataset.max_val
data_range = dataset.max_val * (1. + dataset.kwargs['center'])
if adv_x.max() - adv_x.min() <= .8 * data_range:
warnings.warn("Something is weird. Your adversarial examples use "
"less than 80% of the data range."
"This might mean you generated them for a model with "
"inputs in [0, 1] and are now using them for a model "
"with inputs in [0, 255] or something like that. "
"Or it could be OK if you're evaluating on a very small "
"batch.")
report_path = FLAGS.report_path
if report_path is None:
suffix = "_bundled_examples_report.joblib"
assert model_filepath.endswith('.joblib')
report_path = model_filepath[:-len('.joblib')] + suffix
goal = MaxConfidence()
bundle_examples_with_goal(sess, model, adv_x_list, y, goal,
report_path, batch_size=FLAGS.batch_size)
if __name__ == '__main__':
flags.DEFINE_string('report_path', None, 'Report path')
flags.DEFINE_integer('train_start', TRAIN_START, 'Starting point (inclusive)'
'of range of train examples to use')
flags.DEFINE_integer('train_end', TRAIN_END, 'Ending point (non-inclusive) '
'of range of train examples to use')
flags.DEFINE_integer('test_start', TEST_START, 'Starting point '
'(inclusive) of range of test examples to use')
flags.DEFINE_integer('test_end', TEST_END, 'End point (non-inclusive) of '
'range of test examples to use')
flags.DEFINE_string('which_set', WHICH_SET, '"train" or "test"')
flags.DEFINE_integer('batch_size', BATCH_SIZE, 'batch size')
tf.app.run()
| openai/cleverhans | scripts/make_confidence_report_bundle_examples.py | Python | mit | 4,354 |
bookprefix = {
'Genesis' : '1',
'genesis' : '1',
'Gen' : '1',
'gen' : '1',
'Exodus' : '2',
'exodus' : '2',
'Exo' : '2',
'exo' : '2',
'Ex' : '2',
'ex' : '2',
'Leviticus' : '3',
'leviticus' : '3',
'Lev' : '3',
'lev' : '3',
'Numbers' : '4',
'numbers' : '4',
'Numb' : '4',
'numb' : '4',
'Num' : '4',
'num' : '4',
'Deuteronomy' : '5',
'deuteronomy' : '5',
'Deut' : '5',
'deut' : '5',
'Joshua' : '6',
'joshua' : '6',
'Josh' : '6',
'josh' : '6',
'Judges' : '7' ,
'judges' : '7' ,
'Judg' : '7',
'judg' : '7',
'Ruth' : '8',
'ruth' : '8',
'1Samuel' : '9',
'1samuel' : '9',
'1Sam' : '9',
'1sam' : '9',
'2Samuel' : '10',
'2samuel' : '10',
'2Sam' : '10',
'2sam' : '10',
'1Kings' : '11',
'1kings' : '11',
'1King' : '11',
'1king' : '11',
'1Ki' : '11',
'1ki' : '11',
'2Kings' : '12',
'2kings' : '12',
'2King' : '12',
'2king' : '12',
'2Ki' : '12',
'2ki' : '12',
'1Chronicles' : '13',
'1chronicles' : '13',
'1Chron' : '13',
'1chron' : '13',
'2Chronicles' : '14',
'2chronicles' : '14',
'2Chron' : '14',
'2chron' : '14',
'Ezra' : '15',
'ezra' : '15',
'Nehemiah' : '16',
'nehemiah' : '16',
'Neh' : '16',
'neh' : '16',
'Esther' : '17',
'esther' : '17',
'Job' : '18',
'job' : '18',
'Psalms' : '19',
'psalms' : '19',
'Psalm' : '19',
'psalm' : '19',
'Ps' : '19',
'ps' : '19',
'Proverbs' : '20',
'proverbs' : '20',
'Proverb' : '20',
'proverb' : '20',
'Prov' : '20',
'prov' : '20',
'Ecclesiastes' : '21',
'ecclesiastes' : '21',
'Eccl' : '21',
'eccl' : '21',
'SongofSolomon' : '22',
'songofSolomon' : '22',
'songofsolomon' : '22',
'SongofSol' : '22',
'songofSol' : '22',
'songofsol' : '22',
'Isaiah' : '23',
'isaiah' : '23',
'Isa' : '23',
'isa' : '23',
'Jeremiah' : '24',
'jeremiah' : '24',
'Jer' : '24',
'jer' : '24',
'Lamentations' : '25',
'lamentations' : '25',
'Lam' : '25',
'lam' : '25',
'Ezekiel' : '26',
'ezekiel' : '26',
'Ez' : '26',
'ez' : '26',
'Daniel' : '27',
'daniel' : '27',
'Dan' : '27',
'dan' : '27',
'Hosea' : '28',
'hosea' : '28',
'Hos' : '28',
'hos' : '28',
'Joel' : '29',
'joel' : '29',
'Amos' : '30',
'amos' : '30',
'Obadiah' : '31',
'obadiah' : '31',
'Obad' : '31',
'obad' : '31',
'Jonah' : '32',
'jonah' : '32',
'Micah' : '33',
'micah' : '33',
'Mic' : '33',
'mic' : '33',
'Nahum' : '34' ,
'nahum' : '34' ,
'Nah' : '34',
'nah' : '34',
'Habakkuk' : '35',
'habakkuk' : '35',
'Hab' : '35',
'hab' : '35',
'Zephaniah' : '36',
'zephaniah' : '36',
'Zeph' : '36',
'zeph' : '36',
'Haggai' : '37',
'haggai' : '37',
'Hag' : '37',
'hag' : '37',
'Zechariah' : '38',
'zechariah' : '38',
'Zech' : '38',
'zech' : '38',
'Malachi' : '39',
'malachi' : '39',
'Mal' : '39',
'mal' : '39',
'Matthew' : '40',
'matthew' : '40',
'Matt' : '40',
'matt' : '40',
'Mark' : '41',
'mark' : '41',
'Luke' : '42',
'luke' : '42',
'John' : '43',
'john' : '43',
'Acts' : '44',
'acts' : '44',
'Act' : '44',
'act' : '44',
'Romans' : '45',
'romans' : '45',
'Rom' : '45',
'rom' : '45',
'1Corinthians' : '46',
'1corinthians' : '46',
'1Cor' : '46',
'1cor' : '46',
'2Corinthians' : '47',
'2corinthians' : '47',
'2Cor' : '47',
'2cor' : '47',
'Galatians' : '48',
'galatians' : '48',
'Gal' : '48',
'gal' : '48',
'Ephesians' : '49',
'ephesians' : '49',
'Eph' : '49',
'eph' : '49',
'Philippians' : '50',
'philippians' : '50',
'Phil' : '50',
'phil' : '50',
'Colossians' : '51',
'colossians' : '51',
'Col' : '51',
'col' : '51',
'1Thessalonians' : '52',
'1thessalonians' : '52',
'1Thess' : '52',
'1thess' : '52',
'2Thessalonians' : '53',
'2thessalonians' : '53',
'2Thess' : '53',
'2thess' : '53',
'1Timothy' : '54',
'1timothy' : '54',
'1Tim' : '54',
'1tim' : '54',
'2Timothy' : '55',
'2timothy' : '55',
'2Tim' : '55',
'2tim' : '55',
'Titus' : '56',
'titus' : '56',
'Philemon' : '57',
'philemon' : '57',
'Philem' : '57',
'philem' : '57',
'Hebrews' : '58',
'hebrews' : '58',
'Heb' : '58',
'heb' : '58',
'James' : '59',
'james' : '59',
'Jas' : '59',
'jas' : '59',
'1Peter' : '60',
'1peter' : '60',
'1Pet' : '60',
'1pet' : '60',
'2Peter' : '61',
'2peter' : '61',
'2Pet' : '61',
'2pet' : '61',
'1John' : '62',
'1john' : '62',
'2John' : '63',
'2john' : '63',
'3John' : '64',
'3john' : '64',
'Jude' : '65',
'jude' : '65',
'Revelation' : '66',
'revelation' : '66',
'Rev' : '66',
'rev' : '66'
}
| AugustG98/NWT-Bot | NWT-Bot/books.py | Python | mit | 5,132 |
import os
import six
from aleph.util import checksum
class Archive(object):
def _get_file_path(self, meta):
ch = meta.content_hash
if ch is None:
raise ValueError("No content hash available.")
path = os.path.join(ch[:2], ch[2:4], ch[4:6], ch)
file_name = 'data'
if meta.file_name is not None:
file_name = meta.file_name
else:
if meta.extension is not None:
file_name = '%s.%s' % (file_name, meta.extension)
return os.path.join(six.text_type(path), six.text_type(file_name))
def _update_metadata(self, filename, meta):
meta.content_hash = checksum(filename)
return meta
def upgrade(self):
"""Run maintenance on the store."""
pass
def archive_file(self, filename, meta, move=False):
"""Import the given file into the archive.
Return an updated metadata object. If ``move`` is given, the
original file will not exist afterwards.
"""
pass
def load_file(self, meta):
pass
def cleanup_file(self, meta):
pass
def generate_url(self, meta):
return
| smmbllsm/aleph | aleph/archive/archive.py | Python | mit | 1,182 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-16 06:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('voximplant', '0008_auto_20160514_0800'),
]
operations = [
migrations.RemoveField(
model_name='calllist',
name='completed',
),
migrations.AddField(
model_name='calllist',
name='downloaded',
field=models.DateTimeField(blank=True, help_text='Last datetime of checking state from VoxImplant', null=True),
),
migrations.AlterField(
model_name='calllistphone',
name='completed',
field=models.DateTimeField(blank=True, null=True),
),
]
| telminov/django-voximplant | voximplant/migrations/0009_auto_20160516_0649.py | Python | mit | 813 |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 20:55:19 2016
@author: ajaver
"""
import json
import os
from collections import OrderedDict
import zipfile
import numpy as np
import pandas as pd
import tables
from tierpsy.helper.misc import print_flush
from tierpsy.analysis.feat_create.obtainFeaturesHelper import WormStats
from tierpsy.helper.params import read_unit_conversions, read_ventral_side, read_fps
def getWCONMetaData(fname, READ_FEATURES=False, provenance_step='FEAT_CREATE'):
def _order_metadata(metadata_dict):
ordered_fields = ['strain', 'timestamp', 'gene', 'chromosome', 'allele',
'strain_description', 'sex', 'stage', 'ventral_side', 'media', 'arena', 'food',
'habituation', 'who', 'protocol', 'lab', 'software']
extra_fields = metadata_dict.keys() - set(ordered_fields)
ordered_fields += sorted(extra_fields)
ordered_metadata = OrderedDict()
for field in ordered_fields:
if field in metadata_dict:
ordered_metadata[field] = metadata_dict[field]
return ordered_metadata
with tables.File(fname, 'r') as fid:
if not '/experiment_info' in fid:
experiment_info = {}
else:
experiment_info = fid.get_node('/experiment_info').read()
experiment_info = json.loads(experiment_info.decode('utf-8'))
provenance_tracking = fid.get_node('/provenance_tracking/' + provenance_step).read()
provenance_tracking = json.loads(provenance_tracking.decode('utf-8'))
commit_hash = provenance_tracking['commit_hash']
if 'tierpsy' in commit_hash:
tierpsy_version = commit_hash['tierpsy']
else:
tierpsy_version = commit_hash['MWTracker']
MWTracker_ver = {"name":"tierpsy (https://github.com/ver228/tierpsy-tracker)",
"version": tierpsy_version,
"featureID":"@OMG"}
if not READ_FEATURES:
experiment_info["software"] = MWTracker_ver
else:
#add open_worm_analysis_toolbox info and save as a list of "softwares"
open_worm_ver = {"name":"open_worm_analysis_toolbox (https://github.com/openworm/open-worm-analysis-toolbox)",
"version":commit_hash['open_worm_analysis_toolbox'],
"featureID":""}
experiment_info["software"] = [MWTracker_ver, open_worm_ver]
return _order_metadata(experiment_info)
def __reformatForJson(A):
if isinstance(A, (int, float)):
return A
good = ~np.isnan(A) & (A != 0)
dd = A[good]
if dd.size > 0:
dd = np.abs(np.floor(np.log10(np.abs(dd)))-2)
precision = max(2, int(np.min(dd)))
A = np.round(A.astype(np.float64), precision)
A = np.where(np.isnan(A), None, A)
#wcon specification require to return a single number if it is only one element list
if A.size == 1:
return A[0]
else:
return A.tolist()
def __addOMGFeat(fid, worm_feat_time, worm_id):
worm_features = OrderedDict()
#add time series features
for col_name, col_dat in worm_feat_time.iteritems():
if not col_name in ['worm_index', 'timestamp']:
worm_features[col_name] = col_dat.values
worm_path = '/features_events/worm_%i' % worm_id
worm_node = fid.get_node(worm_path)
#add event features
for feature_name in worm_node._v_children:
feature_path = worm_path + '/' + feature_name
worm_features[feature_name] = fid.get_node(feature_path)[:]
return worm_features
def _get_ventral_side(features_file):
ventral_side = read_ventral_side(features_file)
if not ventral_side or ventral_side == 'unknown':
ventral_type = '?'
else:
#we will merge the ventral and dorsal contours so the ventral contour is clockwise
ventral_type='CW'
return ventral_type
def _getData(features_file, READ_FEATURES=False, IS_FOR_WCON=True):
if IS_FOR_WCON:
lab_prefix = '@OMG '
else:
lab_prefix = ''
with pd.HDFStore(features_file, 'r') as fid:
if not '/features_timeseries' in fid:
return {} #empty file nothing to do here
features_timeseries = fid['/features_timeseries']
feat_time_group_by_worm = features_timeseries.groupby('worm_index');
ventral_side = _get_ventral_side(features_file)
with tables.File(features_file, 'r') as fid:
#fps used to adjust timestamp to real time
fps = read_fps(features_file)
#get pointers to some useful data
skeletons = fid.get_node('/coordinates/skeletons')
dorsal_contours = fid.get_node('/coordinates/dorsal_contours')
ventral_contours = fid.get_node('/coordinates/ventral_contours')
#let's append the data of each individual worm as a element in a list
all_worms_feats = []
#group by iterator will return sorted worm indexes
for worm_id, worm_feat_time in feat_time_group_by_worm:
worm_id = int(worm_id)
#read worm skeletons data
worm_skel = skeletons[worm_feat_time.index]
worm_dor_cnt = dorsal_contours[worm_feat_time.index]
worm_ven_cnt = ventral_contours[worm_feat_time.index]
#start ordered dictionary with the basic features
worm_basic = OrderedDict()
worm_basic['id'] = str(worm_id)
worm_basic['head'] = 'L'
worm_basic['ventral'] = ventral_side
worm_basic['ptail'] = worm_ven_cnt.shape[1]-1 #index starting with 0
worm_basic['t'] = worm_feat_time['timestamp'].values/fps #convert from frames to seconds
worm_basic['x'] = worm_skel[:, :, 0]
worm_basic['y'] = worm_skel[:, :, 1]
contour = np.hstack((worm_ven_cnt, worm_dor_cnt[:, ::-1, :]))
worm_basic['px'] = contour[:, :, 0]
worm_basic['py'] = contour[:, :, 1]
if READ_FEATURES:
worm_features = __addOMGFeat(fid, worm_feat_time, worm_id)
for feat in worm_features:
worm_basic[lab_prefix + feat] = worm_features[feat]
if IS_FOR_WCON:
for x in worm_basic:
if not x in ['id', 'head', 'ventral', 'ptail']:
worm_basic[x] = __reformatForJson(worm_basic[x])
#append features
all_worms_feats.append(worm_basic)
return all_worms_feats
def _getUnits(features_file, READ_FEATURES=False):
fps_out, microns_per_pixel_out, _ = read_unit_conversions(features_file)
xy_units = microns_per_pixel_out[1]
time_units = fps_out[2]
units = OrderedDict()
units["size"] = "mm" #size of the plate
units['t'] = time_units #frames or seconds
for field in ['x', 'y', 'px', 'py']:
units[field] = xy_units #(pixels or micrometers)
if READ_FEATURES:
#TODO how to change microns to pixels when required
ws = WormStats()
for field, unit in ws.features_info['units'].iteritems():
units['@OMG ' + field] = unit
return units
def exportWCONdict(features_file, READ_FEATURES=False):
metadata = getWCONMetaData(features_file, READ_FEATURES)
data = _getData(features_file, READ_FEATURES)
units = _getUnits(features_file, READ_FEATURES)
#units = {x:units[x].replace('degrees', '1') for x in units}
#units = {x:units[x].replace('radians', '1') for x in units}
wcon_dict = OrderedDict()
wcon_dict['metadata'] = metadata
wcon_dict['units'] = units
wcon_dict['data'] = data
return wcon_dict
def getWCOName(features_file):
return features_file.replace('_features.hdf5', '.wcon.zip')
def exportWCON(features_file, READ_FEATURES=False):
base_name = os.path.basename(features_file).replace('_features.hdf5', '')
print_flush("{} Exporting data to WCON...".format(base_name))
wcon_dict = exportWCONdict(features_file, READ_FEATURES)
wcon_file = getWCOName(features_file)
#with gzip.open(wcon_file, 'wt') as fid:
# json.dump(wcon_dict, fid, allow_nan=False)
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
wcon_txt = json.dumps(wcon_dict, allow_nan=False, separators=(',', ':'))
zf.writestr(zip_name, wcon_txt)
print_flush("{} Finised to export to WCON.".format(base_name))
if __name__ == '__main__':
features_file = '/Users/ajaver/OneDrive - Imperial College London/Local_Videos/single_worm/global_sample_v3/883 RC301 on food R_2011_03_07__11_10_27___8___1_features.hdf5'
#exportWCON(features_file)
wcon_file = getWCOName(features_file)
wcon_dict = exportWCONdict(features_file)
wcon_txt = json.dumps(wcon_dict, allow_nan=False, indent=4)
#%%
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
zf.writestr(zip_name, wcon_txt)
#%%
# import wcon
# wc = wcon.WCONWorms()
# wc = wc.load_from_file(JSON_path, validate_against_schema = False)
| ljschumacher/tierpsy-tracker | tierpsy/analysis/wcon_export/exportWCON.py | Python | mit | 9,522 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_06_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_06_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/aio/operations/_route_filter_rules_operations.py | Python | mit | 28,535 |
'''
ΪÁËÈÃÄãµÄ´úÂ뾡¿ÉÄÜ¿ì, µ«Í¬Ê±±£Ö¤¼æÈݵͰ汾µÄ Python ,Äã¿ÉÒÔʹÓÃÒ»¸öС¼¼ÇÉÔÚ cStringIO ²»¿ÉÓÃʱÆôÓà StringIO Ä£¿é, Èç ÏÂÀý Ëùʾ.
'''
try:
import cStringIO
StringIO = cStringIO
except ImportError:
import StringIO
print StringIO | iamweilee/pylearn | cstringio-example-2.py | Python | mit | 249 |
from __future__ import absolute_import
from django.conf.urls import url
from oauth2_provider import views
from .views import CoffeestatsApplicationRegistration, \
CoffeestatsApplicationDetail, \
CoffeestatsApplicationApproval, \
CoffeestatsApplicationRejection, \
CoffeestatsApplicationFullList
urlpatterns = (
url(r'^authorize/$', views.AuthorizationView.as_view(), name="authorize"),
url(r'^token/$', views.TokenView.as_view(), name="token"),
url(r'^revoke_token/$', views.RevokeTokenView.as_view(),
name="revoke-token"),
)
# Application management views
urlpatterns += (
url(r'^applications/$', views.ApplicationList.as_view(), name="list"),
url(r'^applications/register/$',
CoffeestatsApplicationRegistration.as_view(), name="register"),
url(r'^applications/(?P<pk>\d+)/$', CoffeestatsApplicationDetail.as_view(),
name="detail"),
url(r'^applications/(?P<pk>\d+)/delete/$',
views.ApplicationDelete.as_view(), name="delete"),
url(r'^applications/(?P<pk>\d+)/update/$',
views.ApplicationUpdate.as_view(), name="update"),
url(r'^applications/(?P<pk>\d+)/approve/$',
CoffeestatsApplicationApproval.as_view(), name="approve"),
url(r'^applications/(?P<pk>\d+)/reject/$',
CoffeestatsApplicationRejection.as_view(), name="reject"),
url(r'^all-applications/$',
CoffeestatsApplicationFullList.as_view(), name="list_all"),
)
urlpatterns += (
url(r'^authorized_tokens/$', views.AuthorizedTokensListView.as_view(),
name="authorized-token-list"),
url(r'^authorized_tokens/(?P<pk>\d+)/delete/$',
views.AuthorizedTokenDeleteView.as_view(),
name="authorized-token-delete"),
)
| coffeestats/coffeestats-django | coffeestats/caffeine_oauth2/urls.py | Python | mit | 1,723 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#import modułów konektora msg_stream_connector
from ComssServiceDevelopment.connectors.tcp.msg_stream_connector import InputMessageConnector, OutputMessageConnector
#import modułów klasy bazowej Service oraz kontrolera usługi
from ComssServiceDevelopment.service import Service, ServiceController
import cv2 #import modułu biblioteki OpenCV
import numpy as np #import modułu biblioteki Numpy
import os
import threading
from time import time
OPACITY = 0.4 # rectangle opacity
SIZE = 0.25 # occupied by rectangle
RECT_DISPLAY_LEN = 3 # seconds?
class MarkFrameService(Service):
"""klasa usługi musi dziedziczyć po ComssServiceDevelopment.service.Service"""
def __init__(self):
""""nie"konstruktor, inicjalizator obiektu usługi"""
#wywołanie metody inicjalizatora klasy nadrzędnej
super(MarkFrameService, self).__init__()
self.filters_lock = threading.RLock()
self.last_rect_shown_time = None
def declare_outputs(self):
"""deklaracja wyjść"""
#deklaracja wyjścia "videoOutput" będącego interfejsem
#wyjściowym konektora msg_stream_connector
self.declare_output("videoOutput", OutputMessageConnector(self))
def declare_inputs(self):
"""deklaracja wejść"""
#deklaracja wejścia "videoInput" będącego interfejsem wyjściowym konektora msg_stream_connector
self.declare_input("videoInput", InputMessageConnector(self))
def run(self):
"""główna metoda usługi"""
video_input = self.get_input("videoInput") #obiekt interfejsu wejściowego
video_output = self.get_output("videoOutput") #obiekt interfejsu wyjściowego
#pętla główna usługi
while self.running():
frame_obj = video_input.read() #odebranie danych z interfejsu wejściowego
frame = np.loads(frame_obj) #załadowanie ramki do obiektu NumPy
# filters
time_now = time()
with self.filters_lock:
current_filters = self.get_parameter("filtersOn")
if 1 in current_filters:
# self.set_parameter("filtersOn", [])
self.update_parameters({"filtersOn": []}) # reset filters
self.last_rect_shown_time = time_now
self.__draw_rectangle(frame)
elif self.last_rect_shown_time and (time_now - self.last_rect_shown_time) < RECT_DISPLAY_LEN:
self.__draw_rectangle(frame)
# forward
video_output.send(frame.dumps()) #przesłanie ramki za pomocą interfejsu wyjściowego
def __draw_rectangle(self, frame):
height, width, _ = frame.shape
overlay = frame.copy()
cv2.rectangle(overlay,(0,0),(int(width*SIZE),int(height*SIZE)),(255,0,0),-1)
cv2.addWeighted(overlay, OPACITY, frame, 1 - OPACITY, 0, frame)
if __name__=="__main__":
#utworzenie obiektu kontrolera usługi
config_name = os.path.join( os.path.dirname(__file__), "service.json") # f.e. src\mark_frame_service\service.json
sc = ServiceController(MarkFrameService, config_name)
sc.start() #uruchomienie usługi
| michaellas/streaming-vid-to-gifs | src/mark_frame_service/service.py | Python | mit | 3,218 |
from flask import request
from structlog import get_logger
from ghinbox import app
from ghinbox.tasks import create_issue
logger = get_logger()
@app.route('/hooks/postmark', methods=['POST'])
def postmark_incomming_hook():
# TODO #2 HTTP Basic Auth
inbound = request.json
if not inbound:
return 'ERR', 400
logger.debug('postmark', data=inbound)
title = inbound['Subject']
body = inbound['TextBody']
logger.debug('creating issue', title=title)
create_issue.delay(title, body)
return 'OK'
| sibson/ghinbox | ghinbox/webhooks.py | Python | mit | 543 |
from zope import schema
from sparc.entity import IEntity
from sparc.organization import ICompany
from sparc.organization import IOrganizableEntity
class IAddress(IEntity):
"""A generic address"""
address = schema.Text(
title = u'Address',
description = u'The entity address',
)
class IEmailAddress(IAddress):
"""An email address (entity name identifies type...work, personal, etc)"""
class IPhoneNumber(IAddress):
"""A telephone number (entity name identifies type...work, personal, etc)"""
class IPostalAddress(IAddress):
"""A snail mail address location (entity name identifies type...work, personal, etc)"""
class IContact(IOrganizableEntity):
"""Contact information for an entity"""
email_addresses = schema.List(
title = u'Email Addresses',
description = u'Email addresses for contact',
value_type = schema.TextLine(title=u'email')
)
phone_numbers = schema.List(
title = u'Phone Numbers',
description = u'Phone numbers for contact',
value_type = schema.TextLine(title=u'phone')
)
postal_addresses = schema.List(
title = u'Postal Addresses',
description = u'Postal addresses for contact',
value_type = schema.Text(title=u'address')
)
companies = schema.List(
title = u'Associated Companies',
description = u'Companies associated with contact',
value_type = schema.Field(
constraint = lambda v: ICompany.providedBy(v)
)
)
| davisd50/sparc.organization | sparc/organization/contacts/interfaces.py | Python | mit | 1,650 |
import os
from cpenv import api, paths
from cpenv.cli import core
from cpenv.module import parse_module_path
class Create(core.CLI):
'''Create a new Module.'''
def setup_parser(self, parser):
parser.add_argument(
'where',
help='Path to new module',
)
def run(self, args):
where = paths.normalize(args.where)
if os.path.isdir(where):
core.echo()
core.echo('Error: Can not create module in existing directory.')
core.exit(1)
default_name, default_version = parse_module_path(where)
core.echo()
core.echo('This command will guide you through creating a new module.')
core.echo()
name = core.prompt(' Module Name [%s]: ' % default_name)
version = core.prompt(' Version [%s]: ' % default_version.string)
description = core.prompt(' Description []: ')
author = core.prompt(' Author []: ')
email = core.prompt(' Email []: ')
core.echo()
core.echo('- Creating your new Module...', end='')
module = api.create(
where=where,
name=name or default_name,
version=version or default_version.string,
description=description,
author=author,
email=email,
)
core.echo('OK!')
core.echo()
core.echo(' ' + module.path)
core.echo()
core.echo('Steps you might take before publishing...')
core.echo()
core.echo(' - Include binaries your module depends on')
core.echo(' - Edit the module.yml file')
core.echo(' - Add variables to the environment section')
core.echo(' - Add other modules to the requires section')
core.echo(' - Add python hooks like post_activate')
core.echo()
| cpenv/cpenv | cpenv/cli/create.py | Python | mit | 1,854 |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 22 12:07:53 2014
@author: Gouthaman Balaraman
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
import re
import numpy as np
import os
#####################################################
# A bunch of constants used throught the script. #
#####################################################
_curdir= os.path.abspath(os.path.curdir)
_posdat = re.compile('(\w+):(\d+)px')
_topdat = re.compile('top:(\d+)px')
_leftdat = re.compile('top:(\d+)px')
# this is the full format with all columns; The numbers here bracket the columns
maptbl_long = [(0,75),(75,145),(145,212),(212,283),(283,350),(350,418),(418,486),
(486,554),(554,621),(621,688),(688,756),(756,823),(823,890),(890,958),
(958,1026),(1026,1094),(1094,1199)]
# This provides a mapping to the column with the text
mptbltxt = ['RD','MURDER','MANSLTR','FORCED_RAPE','ROBBERY','AGGRAV_ASSAULT',
'BURGLARY_RES','BURGLARY_COM','AUTO_BURG','GRAND_THEFT','PETTY_THEFT',
'BIKE_THEFT','AUTO_THEFT','ARSON','TOTAL_PART1','TOTAL_PART2','GRAND_TOTAL']
#this a truncate version I found for some months; The numbers here bracket the columns
maptbl_short=[(0,133),(133,194.5),(194.5,264),(264,329),(329,396),(396,466),(466,531),
(531,597),(597,667.5),(667.5,736),(736,803),(803,871),(871,938),(938,1004),(1004,1300)
]
def load_html(filename):
soup = BeautifulSoup(file(filename).read())
return soup
def grab_pages(soup):
return soup.body.find_all('div')
def cleanup_data(data):
# remove  
data = data.replace(u'\xa0','')
return data
def create_buckets(arr):
'''
Here we bin the rows based on 'top' value
'''
sarr = np.sort(arr)
# coarseness ; this is used to separate different rows
crsns = 10# np.mean(sdiff)
s = 0
prev = sarr[0]
buckets = []
for sa in sarr[1:]:
if sa-prev>crsns:
e = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
'''
For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set.
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def convert_all_pdfs(pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
ddf = ddf[mptbltxt]
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages = grab_pages(soup)
num_nodes = len(pages[0])
leftmrkr = 75 if num_nodes > 440 else 133 # to handle two pdf formats
mptbl = maptbl_long if num_nodes > 440 else maptbl_short
#filetype = 1 if num_nodes > 480 else 0 # 1 if long type else 0
pvts = []
for i,p in enumerate(pages):
print 'Page-'+str(i)
dlist = extract_nodes(p,leftmrkr)
#df = create_frame(dlist,mptbl0,mptbltxt,leftmrkr)
df = create_frame(dlist,mptbl,mptbltxt,leftmrkr)
pvts.append(df)
ddf = pd.concat(pvts)
exclrows = set(['0'+str(i)for i in range(2000,2020,1)]) | set(['%CHG'])
exclrows = exclrows & set(ddf.index)
ddf.drop(exclrows,inplace=True)
ddf.fillna(0,inplace=True)
#cleanup
ddf = _finalize_dataframe(ddf)
csvdir = os.path.join(_curdir,'files','CSV')
if not os.path.exists(csvdir):
os.makedirs(csvdir)
htmlfile = os.path.abspath(htmlfile)
fileprefix = os.path.split(htmlfile)[1].split('.html')[0]
csvfile = os.path.join(csvdir,fileprefix+".csv")
ddf.to_csv(csvfile)
except Exception as e:
print str(e)
def convert_all_htmls(htmldir):
'''
This is a top leve driver which calls create_csv in a loop
'''
for f in os.listdir(htmldir):
if f.endswith('.html'):
create_csv(os.path.join(htmldir,f))
#break
if __name__=='__main__':
'''
Here is a complete example to loop over all pdfs and create all csvs.
>>>pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
>>>convert_all_pdfs(pdfdir)
>>>htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
>>>convert_all_htmls(htmldir)
Or you can do individual file conversions:
>>>pdffile = os.path.join(pdfdir,'January_2013.pdf')
>>>create_html(pdffile)
'''
# Convert pdfs to html
pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
pdffile = os.path.join(pdfdir,'January_2013.pdf')
create_html(pdffile)
#convert_all_pdfs(pdfdir)
# Then convert html to csv
htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
html = os.path.join(htmldir,'January_2013.html')
create_csv(html)
#convert_all_htmls(htmldir) | gouthambs/OpenData | src/longbeach_crime_stats.py | Python | mit | 10,310 |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from appconf import AppConf
trans_app_label = _('Core')
class OppsCoreConf(AppConf):
DEFAULT_URLS = ('127.0.0.1', 'localhost',)
SHORT = 'googl'
SHORT_URL = 'googl.short.GooglUrlShort'
CHANNEL_CONF = {}
VIEWS_LIMIT = None
PAGINATE_BY = 10
PAGINATE_SUFFIX = u''
PAGINATE_NOT_APP = []
CHECK_MOBILE = False
DOMAIN_MOBILE = u''
PROTOCOL_MOBILE = u'http'
ADMIN_RULES = {}
RELATED_POSTS_PLACEHOLDER = "---related---"
CACHE_PREFIX = 'opps'
CACHE_EXPIRE = 300
CACHE_EXPIRE_LIST = 300
CACHE_EXPIRE_DETAIL = 300
RSS_LINK_TEMPLATE = '<a href="{}" class="ir ico ico-rss">RSS</a>'
LIST_MODELS = ('Post',)
RECOMMENDATION_RANGE_DAYS = 180
SMART_SLUG_ENABLED = True
MENU = True
MIRROR_CHANNEL = False
CONTAINERS_BLACKLIST = ['Entry']
CONTAINERS_SITE_ID = None
# default settings for tinymce
EDITOR = {
'editor': 'tinymce',
'height': 400,
'js': ('/static/tinymce/tinymce.min.js',),
"theme": "modern",
"plugins": [
"""advlist autolink lists link image charmap print preview hr
anchor pagebreak """,
"searchreplace wordcount visualblocks visualchars code fullscreen",
"""insertdatetime media nonbreaking save table contextmenu
directionality""",
"template paste textcolor opps"
],
"toolbar1": """insertfile undo redo | styleselect | bold italic |
alignleft aligncenter alignright alignjustify |
bullist numlist outdent indent | link image media |
print preview | forecolor backcolor | opps""",
"image_advtab": True,
"templates": [
{"title": 'Related', "content": RELATED_POSTS_PLACEHOLDER},
],
"file_browser_callback": 'CustomFileBrowser',
}
class Meta:
prefix = 'opps'
class GrapelliConf(AppConf):
ADMIN_TITLE = "Opps CMS Admin"
INDEX_DASHBOARD = 'opps.contrib.admin.dashboard.CustomIndexDashboard'
class Meta:
prefix = 'GRAPPELLI'
class AdminConf(AppConf):
SHORTCUTS = [
{
'shortcuts': [
{
'url_name': 'admin:articles_post_add',
'title': '+ Notícia',
'class': 'file3',
'help': 'Clique para adicionar uma nova notícia'
},
{
'url_name': 'admin:articles_post_changelist',
'title': 'Notícias',
'count': 'opps.contrib.admin.shortcuts.count_posts',
'class': 'file2',
'help': 'Clique para visualisar todas as notícias'
},
{
'url_name': 'admin:images_image_add',
'title': '+ Imagem',
'class': 'picture',
'help': 'Clique para adicionar uma nova imagem'
},
{
'url_name': 'admin:articles_album_changelist',
'title': 'Álbum',
'count': 'opps.contrib.admin.shortcuts.count_albums',
'class': 'camera',
'help': 'Clique para visualisar todos os álbuns'
},
{
'url': '/',
'open_new_window': True,
'help': 'Clique para visualizar a home page do site'
},
]
}
]
SHORTCUTS_SETTINGS = {
'hide_app_list': True,
'open_new_window': False,
}
SHORTCUTS_CLASS_MAPPINGS_EXTRA = [
('blogs_blogpost', 'blog')
]
class Meta:
prefix = 'ADMIN'
class StaticSiteMapsConf(AppConf):
ROOT_SITEMAP = 'opps.sitemaps.feed.sitemaps'
class Meta:
prefix = 'staticsitemaps'
class HaystackConf(AppConf):
CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
}
}
class Meta:
prefix = 'haystack'
class ThumborConf(AppConf):
SERVER = 'http://localhost:8888'
MEDIA_URL = 'http://localhost:8000/media'
SECURITY_KEY = ''
ARGUMENTS = {}
ENABLED = False
class Meta:
prefix = 'thumbor'
class DjangoConf(AppConf):
CACHES = {'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}
| laborautonomo/opps | opps/core/__init__.py | Python | mit | 4,511 |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseBadRequest, HttpResponse
from bootcamp.tasks.models import Task
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from bootcamp.tasks.forms import TaskForm
from django.contrib.auth.decorators import login_required
from bootcamp.decorators import ajax_required
import markdown
from django.template.loader import render_to_string
import requests,json
from bootcamp.utils.loadconfig import get_vars
def getvrflist(network):
if network.lower() == 'emc'.lower():
filename = '/etc/netbot/emcvrflist.txt'
elif network.lower() == 'mtn'.lower():
filename = '/etc/netbot/mtnvrflist.txt'
vrfnames = []
with open(filename) as f:
for line in f:
vrfnames.append(line)
return vrfnames
@login_required
def traceroute(request):
# task = get_object_or_404(Task, status=Task.ACTIVE)
emcvrfname=getvrflist('emc')
return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':""})
@login_required
def inttraceroute(request):
# task = get_object_or_404(Task, status=Task.ACTIVE)
emcvrfname=getvrflist('emc')
return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':""})
@login_required()
def runtrace(request):
sourceip = request.POST.get('sourceip')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
network = request.POST.get('network')
vrfname = request.POST.get('vrfname')
baseurl = get_vars('ansibengineemc')
emcvrfname=getvrflist('emc')
if sourceip == '' or destip == '' or vrf == '' or vrfname == '' or network == '':
return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':"Please fill in all the details!!"})
if str(network).lower() == 'EMC'.lower():
baseurl = get_vars('ansibengineemc')
else:
baseurl = get_vars('ansibenginemtn')
if vrf == 'True':
vrf="True"
else:
vrf="False"
return render(request, 'traceroute/runtraceroute.html', {'sourceip': sourceip, 'destip':destip,'vrfname': vrfname, 'vrf':vrf,'baseurl':baseurl})
@login_required()
def runtraceapi(request):
sourceip = request.POST.get('sourceip')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
vrfname = request.POST.get('vrfname')
baseurl = request.POST.get('baseurl')
url = baseurl+'/ansibengine/api/v1.0/runtrace'
headers = {'content-type': 'application/json'}
temp= {}
data= {}
data['sourceip']=sourceip
data['destip']=destip
data['vrfname']=vrfname
if vrf == 'True':
data['vrf']="True"
else:
data['vrf']="False"
try:
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
if not response.status_code == 201 :
temp['value']="Error!! Unexpected response. Please report this"
return HttpResponse(json.dumps(temp), content_type = "application/json")
except requests.exceptions.RequestException as e:
# return "Error: {}".format(e)
temp['value']="Error connecting to API. Please report this"
return HttpResponse(json.dumps(temp), content_type = "application/json")
return HttpResponse(response.text, content_type = "application/json")
@login_required()
def runinterfacetrace(request):
routerip = request.POST.get('sourceip')
interfaceip = request.POST.get('sourceint')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
network = request.POST.get('network')
vrfname = request.POST.get('vrfdropdown')
baseurl = get_vars('ansibengineemc')
emcvrfname=getvrflist('emc')
if routerip == '' or interfaceip == '' or destip == '' or vrf == '' or vrfname == '' or network == '':
return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':"Please fill in all the details!!"})
if str(network).lower() == 'EMC'.lower():
baseurl = get_vars('ansibengineemc')
else:
baseurl = get_vars('ansibenginemtn')
if vrf == 'True':
vrf="True"
else:
vrf="False"
return render(request, 'traceroute/runinterfacetraceroute.html', {'routerip': routerip, 'interfaceip':interfaceip, 'destip':destip,'vrfname': vrfname, 'vrf':vrf,'baseurl':baseurl})
@login_required()
def runinterfacetraceapi(request):
routerip = request.POST.get('routerip')
interfaceip = request.POST.get('interfaceip')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
vrfname = request.POST.get('vrfname')
baseurl = request.POST.get('baseurl')
url = baseurl+'/ansibengine/api/v1.0/runinterfacetrace'
headers = {'content-type': 'application/json'}
temp= {}
data= {}
data['routerip']=routerip
data['interfaceip']=interfaceip
data['destip']=destip
data['vrfname']=vrfname
if vrf == 'True':
data['vrf']="True"
else:
data['vrf']="False"
try:
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
if not response.status_code == 201 :
temp['value']="Error!! Unexpected response. Please report this"
return HttpResponse(json.dumps(temp), content_type = "application/json")
except requests.exceptions.RequestException as e:
# return "Error: {}".format(e)
temp['value']="Error connecting to API. Please report this"
return HttpResponse(json.dumps(temp), content_type = "application/json")
return HttpResponse(response.text, content_type = "application/json")
##deprecated method
@login_required()
def gettraceroute(request):
sourceip = request.POST.get('sourceip')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
network = request.POST.get('network')
vrfname = request.POST.get('vrfdropdown')
baseurl = get_vars('ansibengineemc')
if str(network).lower() == 'EMC'.lower():
baseurl = get_vars('ansibengineemc')
else:
baseurl = get_vars('ansibenginemtn')
url = baseurl+'/ansibengine/api/v1.0/gettraceroute'
headers = {'content-type': 'application/json'}
emcvrfname=getvrflist('emc')
if vrf is True:
data= {}
data['sourceip']=sourceip
data['destip']=destip
data['vrf']="True"
data['vrfname']=vrfname
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
statuscode = response.status_code
if int(statuscode) == 200:
return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."})
else:
data= {}
data['sourceip']=sourceip
data['destip']=destip
data['vrf']="False"
data['vrfname']=vrfname
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
statuscode = response.status_code
if int(statuscode) == 200:
return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."})
return render(request, 'traceroute/runtraceroute.html', {'task': "task",'baseurl':baseurl})
##deprecated method
@login_required()
def getinterfacetraceroute(request):
routerip = request.POST.get('sourceip')
interfaceip = request.POST.get('sourceint')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
network = request.POST.get('network')
vrfname = request.POST.get('vrfdropdown')
baseurl = get_vars('ansibengineemc')
if network.lower() == 'EMC'.lower():
baseurl = get_vars('ansibengineemc')
else:
baseurl = get_vars('ansibenginemtn')
url = baseurl+'/ansibengine/api/v1.0/getinterfacetraceroute'
headers = {'content-type': 'application/json'}
emcvrfname=getvrflist('emc')
if vrf is True:
data= {}
data['routerip']=routerip
data['interfaceip']=interfaceip
data['destip']=destip
data['vrf']="True"
data['vrfname']=vrfname
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
statuscode = response.status_code
if int(statuscode) == 200:
return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."})
else:
data= {}
data['routerip']=routerip
data['interfaceip']=interfaceip
data['destip']=destip
data['vrf']="False"
data['vrfname']=vrfname
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
statuscode = response.status_code
if int(statuscode) == 200:
return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."})
return render(request, 'traceroute/runinterfacetraceroute.html', {'task': "task",'baseurl':baseurl})
##deprecated method
def runtraceroute(request):
baseurl = get_vars('ansibengineemc')
if request.method == 'POST':
baseurl = request.POST.get('baseurl')
# if request.method == 'POST':
# baseurl = request.POST.get('baseurl')
url = baseurl+'/ansibengine/api/v1.0/runtraceroute'
headers = {'content-type': 'application/json'}
data= {}
data['value']="some"
data['ipath']='new value'
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
return HttpResponse(response.text, content_type = "application/json")
##deprecated method
def runinterfacetraceroute(request):
baseurl = get_vars('ansibengineemc')
if request.method == 'POST':
baseurl = request.POST.get('baseurl')
# if request.method == 'POST':
# baseurl = request.POST.get('baseurl')
url = baseurl+'/ansibengine/api/v1.0/runinterfacetraceroute'
headers = {'content-type': 'application/json'}
data= {}
data['value']=url
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
return HttpResponse(response.text, content_type = "application/json")
# task = get_object_or_404(Task, status=Task.ACTIVE)
# return render(request, 'traceroute/runtraceroute.html', {'task': "task"})
| davismathew/netbot-django | bootcamp/traceroute/views.py | Python | mit | 10,645 |
# Find the Lowest Common Ancestor (LCA) in a Binary Search Tree
# A Binary Search Tree node
class Node:
# Constructor to initialise node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class BST:
def __init__(self):
self.root = None
def insert_node(self, data):
if self.root is None:
self.root = Node(data)
else:
self._insert(data, self.root)
def _insert(self, data, current_node):
if data <= current_node.data:
if current_node.left is not None:
self._insert(data, current_node.left)
else:
current_node.left = Node(data)
else:
if current_node.right is not None:
self._insert(data, current_node.right)
else:
current_node.right = Node(data)
def inorder(self):
current_node = self.root
self._inorder(current_node)
print('End')
def _inorder(self, current_node):
if current_node is None:
return
self._inorder(current_node.left)
print(current_node.data, " -> ", end='')
self._inorder(current_node.right)
# assuming both nodes are present in the tree
def lca_bst(root, value1, value2):
while root is not None:
if value2 > root.data < value1:
root = root.right
elif value2 < root.data > value1:
root = root.left
else:
return root.data
if __name__ == '__main__':
tree = BST()
tree.insert_node(6)
tree.insert_node(8)
tree.insert_node(9)
tree.insert_node(6)
tree.insert_node(5)
tree.insert_node(7)
tree.insert_node(3)
tree.insert_node(2)
tree.insert_node(4)
print(lca_bst(tree.root, 4, 2))
"""
given tree:
6
6 8
5 7 9
3
2 4
"""
| anubhavshrimal/Data_Structures_Algorithms_In_Python | Tree/BinarySearchTree/Lowest_Common_Ancestor.py | Python | mit | 1,935 |
import random
from decimal import Decimal, ROUND_HALF_UP
from django.test import TestCase
from django.core.validators import ValidationError
from .models import *
def setup():
"""
Create dummy data
"""
Status.objects.create(
name="Hero",
overdraft="0"
)
Status.objects.create(
name="Villain",
overdraft="250"
)
Customer.objects.create(
firstname="Bruce",
lastname="Wayne",
nickname="batman",
email="[email protected]",
status=Status.objects.get(name="Hero")
)
Customer.objects.create(
firstname="James",
lastname="Gordon",
nickname="jim",
email="[email protected]",
status=Status.objects.get(name="Hero")
)
Customer.objects.create(
firstname="Oswald",
lastname="Cobblepot",
nickname="penguin",
email="[email protected]",
status=Status.objects.get(name="Villain")
)
Product.objects.create(
name="Shotgun",
price="50.00"
)
Product.objects.create(
name="Umbrella",
price="5"
)
Payment.objects.create(
customer=Customer.objects.get(nickname="penguin"),
amount="1000"
)
class CustomerTests(TestCase):
def test_balance_calcul(self):
"""
Test balance is sum of payments minus sum of purchases
"""
setup()
amount = Decimal(200)
Payment.objects.create(
customer=Customer.objects.get(nickname="jim"),
amount=amount
)
for i in range(25):
if(random.choice((True, False))):
Purchase.objects.create(
customer=Customer.objects.get(nickname="jim"),
product=Product.objects.get(name="Umbrella")
)
amount -= 5
else:
m = random.randrange(0, 20000) / 100
Payment.objects.create(
customer=Customer.objects.get(nickname="jim"),
amount=m
)
amount += Decimal(m)
self.assertEqual(
Customer.objects.get(nickname="jim").balance,
amount.quantize(Decimal('.001'), rounding=ROUND_HALF_UP)
)
class PurchaseTests(TestCase):
def test_purchase_auto_amount(self):
"""
Test the amount field is automatically created
"""
setup()
p = Purchase.objects.create(
customer=Customer.objects.get(nickname="penguin"),
product=Product.objects.get(name="Umbrella")
)
self.assertEqual(Purchase.objects.get(pk=p.pk).amount, 5)
def test_purchase_no_money(self):
"""
Test that a purchase can't be made without enough balance
"""
setup()
Payment.objects.create(
customer=Customer.objects.get(nickname="batman"),
amount="49"
)
self.assertTrue(
Customer.objects.get(nickname="batman").balance
<
Product.objects.get(name="Shotgun").price
)
p = Purchase(
customer=Customer.objects.get(nickname="batman"),
product=Product.objects.get(name="Shotgun")
)
self.assertRaises(
ValidationError,
p.full_clean
)
class PaymentTests(TestCase):
def test_no_negative_payment(self):
"""
Test that there can't be a negative payment
"""
setup()
p = Payment(
customer=Customer.objects.get(nickname="penguin"),
amount="-24"
)
self.assertRaises(
ValidationError,
p.full_clean
)
| Babaritech/babar3 | back/babar_server/tests.py | Python | mit | 3,743 |
"""
Initialize the module.
Author:
Panagiotis Tsilifis
Date:
5/22/2014
"""
from _forward_model_dmnless import *
| PredictiveScienceLab/inverse-bgo | demos/catalysis/__init__.py | Python | mit | 124 |
"""TestCases for multi-threaded access to a DB.
"""
import os
import sys
import time
import errno
from random import random
DASH = '-'
try:
WindowsError
except NameError:
class WindowsError(Exception):
pass
import unittest
from test_all import db, dbutils, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class BaseThreadedTestCase(unittest.TestCase):
dbtype = db.DB_UNKNOWN # must be set in derived class
dbopenflags = 0
dbsetflags = 0
envflags = 0
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.setEnvOpts()
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
self.filename = self.__class__.__name__ + '.db'
self.d = db.DB(self.env)
if self.dbsetflags:
self.d.set_flags(self.dbsetflags)
self.d.open(self.filename, self.dbtype, self.dbopenflags|db.DB_CREATE)
def tearDown(self):
self.d.close()
self.env.close()
test_support.rmtree(self.homeDir)
def setEnvOpts(self):
pass
def makeData(self, key):
return DASH.join([key] * 5)
#----------------------------------------------------------------------
class ConcurrentDataStoreBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL
readers = 0 # derived class should set
writers = 0
records = 1000
def test01_1WriterMultiReaders(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers=[]
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print "%s: finished creating records" % name
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
for i in xrange(5) :
c = d.cursor()
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose:
print "%s: found %d records" % (name, count)
c.close()
if verbose:
print "%s: thread finished" % name
class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
#----------------------------------------------------------------------
class SimpleThreadedBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
readers = 10
writers = 2
records = 1000
def setEnvOpts(self):
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
def test02_SimpleLocks(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_SimpleLocks..." % self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
c = d.cursor()
count = 0
rec = dbutils.DeadlockWrap(c.first, max_retries=10)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = dbutils.DeadlockWrap(c.next, max_retries=10)
if verbose:
print "%s: found %d records" % (name, count)
c.close()
if verbose:
print "%s: thread finished" % name
class BTreeSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_BTREE
class HashSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class ThreadedTransactionsBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
envflags = (db.DB_THREAD |
db.DB_INIT_MPOOL |
db.DB_INIT_LOCK |
db.DB_INIT_LOG |
db.DB_INIT_TXN
)
readers = 0
writers = 0
records = 2000
txnFlag = 0
def setEnvOpts(self):
#self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
pass
def test03_ThreadedTransactions(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers=[]
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
dt = Thread(target = self.deadlockThread)
import sys
if sys.version_info[0] < 3 :
dt.setDaemon(True)
else :
dt.daemon = True
dt.start()
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
self.doLockDetect = False
dt.join()
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
count=len(keys)//len(readers)
while len(keys):
try:
txn = self.env.txn_begin(None, self.txnFlag)
keys2=keys[:count]
for x in keys2 :
key = '%04d' % x
d.put(key, self.makeData(key), txn)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
txn.commit()
keys=keys[count:]
readers.pop().start()
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name, val.args[1])
txn.abort()
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
c = d.cursor(txn)
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose: print "%s: found %d records" % (name, count)
c.close()
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name, val.args[1])
c.close()
txn.abort()
if verbose:
print "%s: thread finished" % name
def deadlockThread(self):
self.doLockDetect = True
while self.doLockDetect:
time.sleep(0.05)
try:
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
if verbose and aborted:
print "deadlock: Aborted %d deadlocked transaction(s)" \
% aborted
except db.DBError:
pass
class BTreeThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
suite.addTest(unittest.makeSuite(HashConcurrentDataStore))
suite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
suite.addTest(unittest.makeSuite(HashSimpleThreaded))
suite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
suite.addTest(unittest.makeSuite(HashThreadedTransactions))
suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
else:
print "Threads not available, skipping thread tests."
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/bsddb/test/test_thread.py | Python | mit | 16,484 |
import os
import sys
import asyncio
from pathlib import Path
import pendulum
sys.path.append(str(Path(__file__).absolute().parent.parent.parent))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner # noqa
import devpy.develop as log
# Boost the log max file size to 1Gb
log.handlers[0].maxBytes *= 1000
from vessels.models import VesselActivity # noqa
from vessels.crawler.ftp_client import crawl_csv, save_csv # noqa
from vessels.crawler.nh_client import process_xml, crawl_xml # noqa
class WampClient(ApplicationSession):
async def onJoin(self, details):
loop = asyncio.get_event_loop()
async def update_activity_status(id, status):
""" Update ship status for the given activity """
def _(id, value):
log.info(f'Update activity "{id}" status to "{status}"')
activity = VesselActivity.objects.get(id=id)
activity.status = status or None
activity.save()
return activity.to_dict(
timezone="Europe/Paris", include_vessel=True
)
activity = await loop.run_in_executor(None, _, id, status)
activity['timestamp'] = pendulum.utcnow().timestamp()
log.info(f'Update status info for activity {activity!r}')
self.publish('smit.activity.update', activity)
return activity
self.register(update_activity_status, 'smit.activity.update.status')
async def update_vessel_helico(id, helico):
""" Update helicopter approval for the vessel of this activity """
def _(id, value):
activity = VesselActivity.objects.get(id=id)
vessel = activity.vessel
log.info(f'Update vessel "{vessel.id}" helico to "{helico}"')
vessel.helico = helico or None
vessel.save()
return activity.to_dict(
timezone="Europe/Paris", include_vessel=True
)
activity = await loop.run_in_executor(None, _, id, helico)
activity['timestamp'] = pendulum.utcnow().timestamp()
log.info(f'Update helico info for activity {activity!r}')
self.publish('smit.activity.update', activity)
return activity
self.register(update_vessel_helico, 'smit.vessel.update.helico')
async def update_vessel_helico_obs(id, obs):
""" Update helicopter obs for the vessel of this activity """
def _(id, value):
activity = VesselActivity.objects.get(id=id)
vessel = activity.vessel
log.info(f'Update vessel "{vessel.id}" helico to "{obs}"')
vessel.helico_observation = obs or None
vessel.save()
return activity.to_dict(
timezone="Europe/Paris", include_vessel=True
)
activity = await loop.run_in_executor(None, _, id, obs)
activity['timestamp'] = pendulum.utcnow().timestamp()
log.info(f'Update helico obs for activity {activity!r}')
self.publish('smit.activity.update', activity)
return activity
self.register(update_vessel_helico_obs, 'smit.vessel.update.helico_obs')
async def publish_csv_update(stream):
activities = await save_csv(stream)
self.publish('smit.sirene.csv.update', activities)
coro = crawl_csv(
host="localhost",
login="user",
pwd="password",
port=2121,
path="fixture.csv",
csv_callback=publish_csv_update,
tick=3
)
asyncio.ensure_future(coro)
async def publish_xml_update(stream):
distances = await process_xml(stream)
self.publish('smit.nh.xml.update', distances)
asyncio.ensure_future(crawl_xml(xml_callback=publish_xml_update))
if __name__ == '__main__':
runner = ApplicationRunner("ws://127.0.0.1:3333/ws", "realm1")
runner.run(WampClient)
| ksamuel/smit | vessels/crawler/wamp_client.py | Python | mit | 4,286 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
冒泡排序(bubble sort):每个回合都从第一个元素开始和它后面的元素比较,
如果比它后面的元素更大的话就交换,一直重复,直到这个元素到了它能到达的位置。
每次遍历都将剩下的元素中最大的那个放到了序列的“最后”(除去了前面已经排好的那些元素)。
注意检测是否已经完成了排序,如果已完成就可以退出了。时间复杂度O(n2)
'''
def short_bubble_sort(a_list):
exchange = True
pass_num = len(a_list) - 1
while pass_num > 0 and exchange:
exchange = False
for i in range(pass_num):
if a_list[i] > a_list[i + 1]:
exchange = True
# temp = a_list[i]
# a_list[i] = a_list[i + 1]
# a_list[i + 1] = temp
a_list[i], a_list[i + 1] = a_list[i + 1], a_list[i]
pass_num = pass_num - 1
if __name__ == '__main__':
a_list = [20, 40, 50, 22, 100, 90]
short_bubble_sort(a_list)
print(a_list) # [20, 22, 40, 50, 90, 100]
| Lucky0604/algorithms | sort/bubble-sort.py | Python | mit | 1,125 |
import urllib
from cyclone.web import asynchronous
from twisted.python import log
from sockjs.cyclone import proto
from sockjs.cyclone.transports import pollingbase
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id):
return
# Might get already detached because connection was closed in
# connectionMade
if not self.session:
return
if self.session.send_queue.is_empty():
self.session.start_heartbeat()
else:
self.session.flush()
def connectionLost(self, reason):
self.session.delayed_close()
def send_pack(self, message):
# TODO: Just escape
msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type',
'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# FIXME
self.set_header('Etag', 'dummy')
self.write(msg)
self._detach()
self.safe_finish()
class JSONPSendHandler(pollingbase.PollingTransportBase):
def post(self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None:
self.set_status(404)
return
#data = self.request.body.decode('utf-8')
data = self.request.body
ctype = self.request.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
log.msg('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = urllib.unquote_plus(data[2:])
if not data:
log.msg('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
log.msg('jsonp_send: Invalid json encoding')
self.write("Broken JSON encoding.")
self.set_status(500)
return
try:
session.messagesReceived(messages)
except Exception:
log.msg('jsonp_send: messagesReceived() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200)
| flaviogrossi/sockjs-cyclone | sockjs/cyclone/transports/jsonp.py | Python | mit | 3,786 |
import py, os, cffi, re
import _cffi_backend
def getlines():
try:
f = open(os.path.join(os.path.dirname(cffi.__file__),
'..', 'c', 'commontypes.c'))
except IOError:
py.test.skip("cannot find ../c/commontypes.c")
lines = [line for line in f.readlines() if line.strip().startswith('EQ(')]
f.close()
return lines
def test_alphabetical_order():
lines = getlines()
assert lines == sorted(lines)
def test_dependencies():
r = re.compile(r'EQ[(]"([^"]+)",(?:\s*"([A-Z0-9_]+)\s*[*]*"[)])?')
lines = getlines()
d = {}
for line in lines:
match = r.search(line)
if match is not None:
d[match.group(1)] = match.group(2)
for value in d.values():
if value:
assert value in d
def test_get_common_types():
d = {}
_cffi_backend._get_common_types(d)
assert d["bool"] == "_Bool"
| johncsnyder/SwiftKitten | cffi/testing/cffi1/test_commontypes.py | Python | mit | 918 |
"""Main entry point
"""
from pyramid.config import Configurator
def main(global_config, **settings):
config = Configurator(settings=settings)
config.include("cornice")
config.scan("pyramidSparkBot.views")
return config.make_wsgi_app()
| jbogarin/ciscosparkapi | examples/pyramidSparkBot/pyramidSparkBot/__init__.py | Python | mit | 254 |
import requests
headers = {
'foo': 'bar',
}
response = requests.get('http://example.com/', headers=headers)
| NickCarneiro/curlconverter | fixtures/python/get_with_single_header.py | Python | mit | 114 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from wtforms import validators
from jinja2 import Markup
from studio.core.engines import db
from riitc.models import NaviModel, ChannelModel
from .base import BaseView
from .forms import CKTextAreaField
class Navi(BaseView):
column_labels = {'name': '名称', 'channels': '频道列表'}
column_list = ['id', 'name', 'channels', 'date_created']
def _list_channels(self, context, model, name):
lis = ''
for channel in model.channels:
lis += '<li>%s</li>' % (channel)
return Markup('<ol>' + lis + '</ol>')
column_formatters = {
'channels': _list_channels,
}
def __init__(self, **kwargs):
super(Navi, self).__init__(NaviModel, db.session, **kwargs)
def create_form(self, obj=None):
form = super(Navi, self).create_form()
delattr(form, 'date_created')
return form
def edit_form(self, obj=None):
form = super(Navi, self).edit_form(obj=obj)
delattr(form, 'date_created')
return form
class Channel(BaseView):
create_template = 'panel/channel_edit.html'
edit_template = 'panel/channel_edit.html'
column_labels = {'name': '名称',
'parent': '主分类(本身为主分类,不填写)',
'summary': '简介',
'date_created': '创建时间'}
column_searchable_list = ['name', ]
column_default_sort = ('date_created', True)
form_extra_fields = {
'summary': CKTextAreaField('简介',
validators=[validators.Required()]),
}
def __init__(self, **kwargs):
super(Channel, self).__init__(ChannelModel, db.session, **kwargs)
def create_form(self, obj=None):
form = super(Channel, self).create_form()
delattr(form, 'articles')
delattr(form, 'channels')
delattr(form, 'all_articles')
delattr(form, 'date_created')
return form
def edit_form(self, obj=None):
form = super(Channel, self).edit_form(obj=obj)
delattr(form, 'articles')
delattr(form, 'channels')
delattr(form, 'all_articles')
delattr(form, 'date_created')
return form
| qisanstudio/qsapp-riitc | src/riitc/panel/channel.py | Python | mit | 2,269 |
import logging
log = logging.getLogger(__name__)
def has_bin(arg):
"""
Helper function checks whether args contains binary data
:param args: list | tuple | bytearray | dict
:return: (bool)
"""
if type(arg) is list or type(arg) is tuple:
return reduce(lambda has_binary, item: has_binary or has_bin(item), arg, False)
if type(arg) is bytearray or hasattr(arg, 'read'):
return True
if type(arg) is dict:
return reduce(lambda has_binary, item: has_binary or has_bin(item), [v for k, v in arg.items()], False)
return False
| shuoli84/gevent_socketio2 | socketio/__init__.py | Python | mit | 583 |
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 2/6/15
###Function: Redraw figure 4A in Shifting Demographic Landscape (Bansal2010)
###Import data:
###Command Line: python
##############################################
### notes ###
### packages/modules ###
import csv
import numpy as np
import matplotlib.pyplot as plt
## local modules ##
### data structures ###
### parameters ###
### functions ###
### import data ###
childin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/scripts/WIPS2015/importData/child_attack_rate.txt','r')
child=csv.reader(childin, delimiter=' ')
adultin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/scripts/WIPS2015/importData/adult_attack_rate.txt','r')
adult=csv.reader(adultin, delimiter=' ')
### program ###
childlist, adultlist = [],[]
ct=0
for item1, item2 in zip(child, adult):
childlist = reduce(item1, []).split()
adultlist = reduce(item2, []).split()
ct+=1
print ct
childtest = [float(c) for c in childlist]
adulttest = [float(a) for a in adultlist]
print childtest
print adulttest
plt.plot(childtest, color='red', lwd=3)
plt.lines(adulttest, color='blue', lwd=3)
plt.ylabel('Time')
plt.xlabel('Attack Rate')
plt.show() | eclee25/flu-SDI-exploratory-age | scripts/WIPS2015/WIPS_Bansal2010_hierarchy.py | Python | mit | 1,282 |
from microbit_stub import *
while True:
if button_a.is_pressed():
for i in range(5):
if display.get_pixel(i, 0):
display.set_pixel(i, 0, 0)
sleep(10)
else:
display.set_pixel(i, 0, 9)
sleep(200)
break
elif button_b.is_pressed():
n = 0
for i in range(5):
if display.get_pixel(i, 0):
n = n + 2 ** i
display.clear()
display.show(str(n))
sleep(1000)
display.clear()
break
else:
sleep(50)
| casnortheast/microbit_stub | bitcounter-range.py | Python | mit | 601 |
import sys, os, subprocess, tempfile, shlex, glob
result = None
d = None
def format_msg(message, headline):
msg = "Line {0}:\n {1}\n{2}:\n{3}"\
.format(PARAMS["lineno"], PARAMS["source"], headline, message)
return msg
try:
#print("RUN", PARAMS["source"])
d = tempfile.mkdtemp(dir="/dev/shm")
command_params = []
created_files = []
for ref in PARAMS["refs"]:
if isinstance(ref, str):
value = globals()[ref]
inp_type = PARAMS["inputs"][ref]
if inp_type == "variable":
command_params.append(shlex.quote(value))
elif inp_type == "doc":
filename = d + "/doc_" + ref
open(filename, "w").write(value)
created_files.append(filename)
command_params.append(filename)
else:
raise TypeError(inp_type)
else:
typ, value = ref["type"], ref["value"]
if typ == "env":
command_params.append(value)
elif typ == "file":
if value is None:
filename = "/dev/null"
else:
value = os.path.expanduser(value)
filename = os.path.abspath(value)
command_params.append(filename)
elif typ == "varexp":
refs = ref["refs"]
ref_values = []
for r in refs:
if not r.startswith("$"):
v = globals()[r]
else: #env variable
v = os.environ[r[1:]]
ref_values.append(v)
value = value.format(*ref_values)
command_params.append(shlex.quote(value))
else:
raise TypeError(typ)
command = [param.format(*command_params) \
for param in PARAMS["command"]]
stdout = None
stderr = subprocess.PIPE
capture = False
return_mode = []
print_stdout = True
print_stderr = True
for output in PARAMS["output_refs"]:
if output["type"] == "stdout":
stdout = subprocess.PIPE
stderr = subprocess.PIPE
print_stdout = False
if output["name"] is not None:
return_mode.append("stdout")
elif output["type"] == "stderr":
stdout = subprocess.PIPE
stderr = subprocess.PIPE
print_stderr = False
if output["name"] is not None:
return_mode.append("stderr")
elif output["type"] == "stdout+stderr":
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
print_stdout = False
print_stderr = False
if output["name"] is not None:
return_mode.append("stdout")
elif output["type"] == "capture":
capture = True
return_mode.append("capture")
else:
raise TypeError(output["type"])
command = "cd %s;" % d + " ".join(command)
pragma = PARAMS.get("pragma", [])
monitor_delay = 2
monitor_preliminary = False
if "monitor" in pragma:
monitor_preliminary = True
monitor_delay = pragma[pragma.index("monitor")+1]
assert len(return_mode) <= 1, return_mode #TODO: stdout and stderr to different targets => return JSON
return_mode = return_mode[0] if len(return_mode) else None #TODO, see above
process = subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
last_stdout_data = b""
last_stderr_data = b""
while 1:
#print("MONITOR!")
try:
stdout_data, stderr_data = process.communicate(timeout=monitor_delay)
finished = True
except subprocess.TimeoutExpired:
finished = False
#TODO return_mode, see above
#dirty! but I don't know to do it better
stdout = process._fileobj2output[process.stdout]
curr_stdout_data = b''.join(stdout).decode("utf-8")
if len(curr_stdout_data) and \
curr_stdout_data != last_stdout_data:
if return_mode == "stdout" and process.stdout:
if monitor_preliminary:
return_preliminary(curr_stdout_data)
else:
sys.stdout.write(curr_stdout_data[len(last_stdout_data):])
last_stdout_data = curr_stdout_data
stderr = process._fileobj2output[process.stderr]
curr_stderr_data = b''.join(stderr).decode("utf-8")
if len(curr_stderr_data) and \
curr_stderr_data != last_stderr_data:
if return_mode == "stderr" and process.stderr:
if monitor_preliminary:
return_preliminary(curr_stderr_data)
else:
sys.stderr.write(curr_stderr_data[len(last_stderr_data):])
last_stderr_data = curr_stderr_data
if finished:
break
if stdout_data is not None:
stdout_data = stdout_data.decode("utf-8")
if stderr_data is not None:
stderr_data = stderr_data.decode("utf-8")
if process.returncode:
message = "Process exited with return code %d\n" % process.returncode
message += "Standard error:\n%s" % stderr_data
msg = format_msg(message, "Error message")
raise Exception(msg)
else:
if print_stdout and stdout_data is not None and len(stdout_data):
print(format_msg(stdout_data, "Standard output"))
#if print_stderr and len(stderr_data):
# print(format_msg(stderr_data, "Standard error"))
if capture:
new_files = []
for dirpath, dirnames, filenames in os.walk(d):
for filename in filenames:
new_file = os.path.join(dirpath, filename)
if new_file not in created_files:
new_files.append(new_file)
capture_data = {}
for f in new_files:
ff = f[len(d+"/"):]
capture_data[ff] = open(f).read()
#TODO return_mode, see above
if return_mode == "stdout":
result = stdout_data
elif return_mode == "stderr":
result = stderr_data
elif return_mode == "capture":
result = capture_data
finally:
if d is not None:
os.system("rm -rf %s" % d)
return result
| sjdv1982/seamless | docs/archive/slash/cell-command-standard.py | Python | mit | 6,435 |
# -*- coding:utf-8 -*-
import copy
from zope.interface import implementer
from .interfaces import (
IExecutor,
ISchemaValidation,
IDataValidation,
ICreate,
IDelete,
IEdit
)
from alchemyjsonschema.dictify import (
normalize,
validate_all,
ErrorFound
)
from jsonschema import FormatChecker
from jsonschema.validators import Draft4Validator
class ValidationError(Exception):
pass
@implementer(IExecutor)
class Executor(object):
def __init__(self, context, params):
self.context = context
self.raw_params = params
self.params = None
def validation(self, ob=None):
raise NotImplemented
def execute(self, ob=None):
raise NotImplemented
def default_validation(self, iface, ob=None, name=""):
fn = self.context.customized_or_default(iface, ISchemaValidation, name=name)
params = fn(self.context, self.raw_params)
fn2 = self.context.customized_or_default(iface, IDataValidation, name=name)
fn2(self.context, params, ob)
return params
class CreateExecutor(Executor):
def validation(self, ob=None):
self.params = default_validation(self, ICreate, ob)
def execute(self, ob=None):
if self.params is None:
raise RuntimeError("execute after validation")
ob = self.context.modelclass(**self.params)
self.context.session.add(ob)
self.context.session.flush()
return ob
class EditExecutor(Executor):
def validation(self, ob=None):
self.params = default_validation(self, IEdit, ob)
def execute(self, ob):
if self.params is None:
raise RuntimeError("execute after validation")
for k, v in self.params.items():
setattr(ob, k, v)
self.context.session.add(ob)
return ob
class DeleteExecutor(Executor):
def validation(self, ob=None):
self.params = default_validation(self, IDelete, ob)
def execute(self, ob):
self.context.session.delete(ob)
return ob
def create_jsonschema_validation(context, params, ob=None):
def customize_schema(schema):
schema = copy.deepcopy(schema)
# when creating model, id is not needed.
if "id" in schema["required"]:
schema["required"].remove("id")
if "id" in schema["properties"]:
schema["properties"].pop("id")
return schema
schema = customize_schema(context.schema)
schema_validator = Draft4Validator(schema, format_checker=FormatChecker())
try:
validate_all(params, schema_validator)
except ErrorFound as err:
raise ValidationError({e.path[0]: e.message for e in err.errors})
return normalize(params, schema)
def edit_jsonschema_validation(context, params):
schema = context.schema
schema_validator = Draft4Validator(schema, format_checker=FormatChecker())
try:
validate_all(params, schema_validator)
except ErrorFound as err:
raise ValidationError({e.path[0]: e.message for e in err.errors})
return normalize(params, schema)
def delete_jsonschema_validation(context, params):
return params
| podhmo/komet | komet/executors.py | Python | mit | 3,149 |
import sys
import time
import os.path
from collections import Counter
from vial import vfunc, vim, dref
from vial.utils import redraw, focus_window
from vial.widgets import make_scratch
collector = None
def get_collector():
global collector
if not collector:
collector = ResultCollector()
return collector
def run_test(project_dir, executable=None, match=None, files=None, env=None):
from subprocess import Popen
from multiprocessing.connection import Client, arbitrary_address
addr = arbitrary_address('AF_UNIX')
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pt.py')
executable = executable or sys.executable
args = [executable, filename, addr, '-q']
if match:
args.append('-k %s' % match)
environ = None
if env:
environ = os.environ.copy()
environ.update(env)
log = open('/tmp/vial-pytest.log', 'w')
if files:
args.extend(files)
proc = Popen(args, cwd=project_dir, env=environ, stdout=log, stderr=log, close_fds=True)
start = time.time()
while not os.path.exists(addr):
if time.time() - start > 5:
raise Exception('py.test launching timeout exceed')
time.sleep(0.01)
conn = Client(addr)
return proc, conn
def indent(width, lines):
return [' ' * width + r for r in lines]
@dref
def goto_file():
filename, line = vfunc.expand('<cWORD>').split(':')[:2]
for win in vim.windows:
if vfunc.buflisted(win.buffer.number):
focus_window(win)
vim.command('e +{} {}'.format(line, filename))
class ResultCollector(object):
def init(self, win, buf):
vim.command('setlocal syntax=vialpytest')
vim.command('nnoremap <buffer> gf :python {}()<cr>'.format(goto_file.ref))
def reset(self):
cwin = vim.current.window
_, self.buf = make_scratch('__vial_pytest__', self.init, 'pytest')
vim.command('normal! ggdG')
focus_window(cwin)
redraw()
def add_test_result(self, rtype, name, result):
self.counts[rtype] += 1
lines = ['{} {}'.format(name, rtype)]
trace, out = result
for k, v in out:
lines.append(' ----======= {} =======----'.format(k))
lines.extend(indent(1, v.splitlines()))
lines.append('')
if trace:
lines.extend(indent(1, trace.splitlines()))
lines.append('')
lines.append('')
buflen = len(self.buf)
self.buf[buflen-1:] = lines
redraw()
def collect(self, conn):
self.tests = []
self.counts = Counter()
self.reset()
while True:
msg = conn.recv()
cmd = msg[0]
if cmd == 'END':
return
elif cmd == 'COLLECTED_TESTS':
self.tests[:] = cmd[1]
elif cmd in ('PASS', 'ERROR', 'FAIL', 'SKIP', 'FAILED_COLLECT'):
self.add_test_result(*msg)
def run(*args):
project = os.getcwd()
files = None
if args:
files = [vfunc.expand(r) for r in args]
try:
f = vfunc.VialPythonGetExecutable
except vim.error:
executable = None
else:
executable = f()
proc, conn = run_test(project, files=files, executable=executable)
get_collector().collect(conn)
| baverman/vial-pytest | vial-plugin/vial_pytest/plugin.py | Python | mit | 3,367 |
import csv
import unittest
from datetime import datetime, timedelta
from hackertracker import event
from hackertracker.database import Model, Session
from sqlalchemy import create_engine
class TestEvents(unittest.TestCase):
def setUp(self):
engine = create_engine('sqlite:///:memory:', echo=True)
Model.metadata.create_all(engine)
Session.configure(bind=engine)
event.Event.for_name("Drink glass of water", create=True)
def tearDown(self):
Session.remove()
def assertDatetimesEqual(self, w1, w2):
"Assert datetimes are equal to the second"
self.assertEqual(w1.replace(microsecond=0), w2.replace(microsecond=0))
def test_get_event(self):
e = event.Event.for_name("Drink pint of water", create=True)
self.assertEqual(e.name, "Drink pint of water")
e = event.Event.for_name("Drink pint of water")
self.assertEqual(e.name, "Drink pint of water")
self.assertRaises(event.EventNotFound, event.Event.for_name, "You'll never find me")
def test_basic_track(self):
e = event.Event.for_name("Drink glass of water")
o = e.track()
self.assertEqual(list(e.entries()), [o])
def test_events_persist(self):
e = event.Event.for_name("Drink glass of water")
o = e.track(attrs=dict(size="16", location="office"))
when = o.when
attrs = dict(o.attrs)
# Reload from db
Session.commit()
Session.remove()
e = event.Event.for_name("Drink glass of water")
o1 = e.entries()[0]
self.assertDatetimesEqual(when, o1.when)
self.assertEqual(attrs, o1.attrs)
def test_entry_count(self):
e = event.Event.for_name("Drink glass of water")
e.track()
e.track()
e.track()
Session.commit()
self.assertEqual(e.entry_count(), 3)
def test_latest_entry(self):
e = event.Event.for_name("Drink glass of water")
e.track(when=earlier(seconds=3))
e.track(when=earlier(seconds=2))
f = e.track(when=earlier(seconds=1))
Session.commit()
self.assertEqual(e.latest_entry().id, f.id)
def test_display_entry(self):
e = event.Event.for_name("Drink glass of water")
o = e.track(when=datetime(2014, 1, 1, 16, 6, 20, 216238))
self.assertEqual(str(o), "Jan 01, 2014 04:06PM")
o = e.track(when=datetime(2015, 3, 2, 0, 34, 53, 327128))
self.assertEqual(str(o), "Mar 02, 2015 12:34AM")
def test_list_events(self):
e1 = event.Event.for_name("Drink glass of water")
e2 = event.Event.for_name("Clean litter box", create=True)
self.assertEqual(event.Event.all(), [e2, e1])
def test_alternate_time(self):
e = event.Event.for_name("Drink glass of water")
o = e.track()
self.assertDatetimesEqual(o.when, datetime.utcnow())
when = earlier(hours=10)
o = e.track(when)
self.assertDatetimesEqual(o.when, when)
def test_attributes(self):
e = event.Event.for_name("Drink glass of water")
o = e.track(attrs=dict(size="16", location="office"))
self.assertEqual(o.attrs, {
"size": "16",
"location": "office"
})
def test_list_attributes(self):
e = event.Event.for_name("Drink glass of water")
e.track(attrs=dict(size="16", location="office"))
e.track(attrs=dict(hello="world"))
e.track(attrs=dict(hello="goodbye", location="office"))
event.Event.for_name("Fire ze missile", create=True).track(attrs=dict(le_tired="true"))
Session.commit()
self.assertEqual(e.attributes(), ["hello", "location", "size"])
def test_slug(self):
e = event.Event.for_name("Drink glass of water")
self.assertEqual(e.slug, "Drink_glass_of_water")
def test_exports_csv(self):
e = event.Event.for_name("Drink glass of water")
o = e.track(when=earlier(seconds=-1), attrs=dict(size="16", location="office"))
e.track(attrs=dict(hello="world", when="now"))
e.track(attrs=dict(hello="goodbye", location="office"))
Session.commit()
csv_file = list(csv.reader(e.export_csv().splitlines()))
self.assertEqual(csv_file[0], ["When", "hello", "location", "size", "when"])
self.assertEqual(csv_file[1], [str(o.when), "", "office", "16", ""])
self.assertEqual(len(csv_file), 4)
def earlier(**kwargs):
return datetime.utcnow() - timedelta(**kwargs)
| sionide21/HackerTracker | tests/event_tests.py | Python | mit | 4,529 |
import pandas as pd
from pandas.io import gbq
def test_sepsis3_one_row_per_stay_id(dataset, project_id):
"""Verifies one stay_id per row of sepsis-3"""
query = f"""
SELECT
COUNT(*) AS n
FROM
(
SELECT stay_id FROM {dataset}.sepsis3 GROUP BY 1 HAVING COUNT(*) > 1
) s
"""
df = gbq.read_gbq(query, project_id=project_id, dialect="standard")
n = df.loc[0, 'n']
assert n == 0, 'sepsis-3 table has more than one row per stay_id'
| MIT-LCP/mimic-code | mimic-iv/tests/test_sepsis.py | Python | mit | 481 |
# -*- coding: utf-8 -*-
"""signal handlers registered by the imager_profile app"""
from __future__ import unicode_literals
from django.conf import settings
from django.db.models.signals import post_save
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from imager_profile.models import ImagerProfile
import logging
logger = logging.getLogger(__name__)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def ensure_imager_profile(sender, **kwargs):
"""Create and save an ImagerProfile after every new User is created."""
if kwargs.get('created', False):
try:
new_profile = ImagerProfile(user=kwargs['instance'])
new_profile.save()
except (KeyError, ValueError):
logger.error('Unable to create ImagerProfile for User instance.')
@receiver(pre_delete, sender=settings.AUTH_USER_MODEL)
def remove_imager_profile(sender, **kwargs):
try:
kwargs['instance'].profile.delete()
except (KeyError, AttributeError):
msg = (
"ImagerProfile instance not deleted for {}. "
"Perhaps it does not exist?"
)
logger.warn(msg.format(kwargs['instance']))
| flegald/django-imager | imagersite/imager_profile/handler.py | Python | mit | 1,201 |
from django import forms
from . import models
from apps.utils import forms as utils, constants
from django.forms import models as models_form
from apps.personas import models as persona_models
class VacanteForm(utils.BaseFormAllFields):
title = 'Vacante'
fecha = forms.DateField(input_formats=constants.INPUT_FORMATS)
class Meta(utils.BaseFormAllFields.Meta):
model = models.Vacante
def clean(self):
print(self.cleaned_data)
return self.cleaned_data
class VacantePersonaForm(utils.BaseFormAllFields):
title = 'Vacante Persona'
class Meta(utils.BaseFormAllFields.Meta):
model = models.VacantePersona
def get_vacante_persona_formset(form,
formset=models_form.BaseInlineFormSet,
**kwargs):
return models_form.inlineformset_factory(
persona_models.Persona,
models.VacantePersona,
form,
formset,
**kwargs
)
class FormacionTrabajoForm(utils.BaseFormAllFields):
title = 'Formacion para el trabajo'
fecha_creacion = forms.DateField(input_formats=constants.INPUT_FORMATS)
class Meta(utils.BaseFormAllFields.Meta):
model = models.FormacionTrabajo
class FormacionTrabajoPersonasForm(utils.BaseFormAllFields):
title = 'Formacion Trabajo Persona'
fecha_inscripcion = forms.DateField(input_formats=constants.INPUT_FORMATS)
fecha_proceso = forms.DateField(input_formats=constants.INPUT_FORMATS)
class Meta(utils.BaseFormAllFields.Meta):
model = models.FormacionTrabajoPersona
def get_formacion_trabajo_persona_formset(form,
formset=models_form.BaseInlineFormSet,
**kwargs):
return models_form.inlineformset_factory(
persona_models.Persona,
models.FormacionTrabajoPersona,
form,
formset,
**kwargs
)
| 0sw4l/villas-de-san-pablo | apps/empleabilidad/forms.py | Python | mit | 1,944 |
from collidable import *
from math_3d import *
class PixelCollidable( Collidable ) :
def __init__(self) :
self.spm = None
self.r = None
| sphereflow/space_combat | src/pixel_collidable.py | Python | mit | 152 |
#!/usr/bin/env python
class Solution:
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
i, j = 0, len(height)-1
l, r = height[i], height[j]
maxArea = (j - i) * min(l, r)
while j > i:
if l < r:
while height[i] <= l:
i += 1
elif r < l:
while height[j] <= r:
j -= 1
else:
i, j = i+1, j-1
l, r = height[i], height[j]
print(i, j, l, r)
area = (j - i) * min(l, r)
if area > maxArea:
maxArea = area
return maxArea
sol = Solution()
height_list = [
[1,8,6,2,5,4,8,3,7],
[1,2],
[1,2,4,3],
[2,3,4,5,18,17,6],
]
for height in height_list:
print(sol.maxArea(height))
| eroicaleo/LearningPython | interview/leet/011_Container_With_Most_Water.py | Python | mit | 845 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
from prompt_toolkit.completion import Completer, Completion
import azclishell.configuration
from azclishell.argfinder import ArgsFinder
from azclishell.command_tree import in_tree
from azclishell.layout import get_scope
from azclishell.util import parse_quotes
from azure.cli.core.parser import AzCliCommandParser
from azure.cli.core.util import CLIError
SELECT_SYMBOL = azclishell.configuration.SELECT_SYMBOL
def dynamic_param_logic(text):
""" validates parameter values for dynamic completion """
is_param = False
started_param = False
prefix = ""
param = ""
txtspt = text.split()
if txtspt:
param = txtspt[-1]
if param.startswith("-"):
is_param = True
elif len(txtspt) > 2 and txtspt[-2]\
and txtspt[-2].startswith('-'):
is_param = True
param = txtspt[-2]
started_param = True
prefix = txtspt[-1]
return is_param, started_param, prefix, param
def reformat_cmd(text):
""" reformat the text to be stripped of noise """
# remove az if there
text = text.replace('az', '')
# disregard defaulting symbols
if text and SELECT_SYMBOL['scope'] == text[0:2]:
text = text.replace(SELECT_SYMBOL['scope'], "")
if get_scope():
text = get_scope() + ' ' + text
return text
def gen_dyn_completion(comp, started_param, prefix, text):
""" how to validate and generate completion for dynamic params """
if len(comp.split()) > 1:
completion = '\"' + comp + '\"'
else:
completion = comp
if started_param:
if comp.lower().startswith(prefix.lower()) and comp not in text.split():
yield Completion(completion, -len(prefix))
else:
yield Completion(completion, -len(prefix))
def sort_completions(gen):
""" sorts the completions """
def _get_weight(val):
""" weights the completions with required things first the lexicographically"""
priority = ''
if val.display_meta and val.display_meta.startswith('[REQUIRED]'):
priority = ' ' # a space has the lowest ordinance
return priority + val.text
return sorted(list(gen), key=_get_weight)
# pylint: disable=too-many-instance-attributes
class AzCompleter(Completer):
""" Completes Azure CLI commands """
def __init__(self, commands, global_params=True):
# dictionary of command to descriptions
self.command_description = commands.descrip
# from a command to a list of parameters
self.command_parameters = commands.command_param
# a list of all the possible parameters
self.completable_param = commands.completable_param
# the command tree
self.command_tree = commands.command_tree
# a dictionary of parameter (which is command + " " + parameter name)
# to a description of what it does
self.param_description = commands.param_descript
# a dictionary of command to examples of how to use it
self.command_examples = commands.command_example
# a dictionary of which parameters mean the same thing
self.same_param_doubles = commands.same_param_doubles or {}
self._is_command = True
self.branch = self.command_tree
self.curr_command = ""
self.global_param = commands.global_param if global_params else []
self.output_choices = commands.output_choices if global_params else []
self.output_options = commands.output_options if global_params else []
self.global_param_descriptions = commands.global_param_descriptions if global_params else []
self.global_parser = AzCliCommandParser(add_help=False)
self.global_parser.add_argument_group('global', 'Global Arguments')
self.parser = AzCliCommandParser(parents=[self.global_parser])
from azclishell._dump_commands import CMD_TABLE
self.cmdtab = CMD_TABLE
self.parser.load_command_table(CMD_TABLE)
self.argsfinder = ArgsFinder(self.parser)
def validate_completion(self, param, words, text_before_cursor, double=True):
""" validates that a param should be completed """
return param.lower().startswith(words.lower()) and param.lower() != words.lower() and\
param not in text_before_cursor.split() and not \
text_before_cursor[-1].isspace() and\
(not (double and param in self.same_param_doubles) or
self.same_param_doubles[param] not in text_before_cursor.split())
def get_completions(self, document, complete_event):
text = document.text_before_cursor
self.branch = self.command_tree
self.curr_command = ''
self._is_command = True
text = reformat_cmd(text)
if text.split():
for comp in sort_completions(self.gen_cmd_and_param_completions(text)):
yield comp
for cmd in sort_completions(self.gen_cmd_completions(text)):
yield cmd
for val in sort_completions(self.gen_dynamic_completions(text)):
yield val
for param in sort_completions(self.gen_global_param_completions(text)):
yield param
def gen_enum_completions(self, arg_name, text, started_param, prefix):
""" generates dynamic enumeration completions """
try: # if enum completion
for choice in self.cmdtab[
self.curr_command].arguments[arg_name].choices:
if started_param:
if choice.lower().startswith(prefix.lower())\
and choice not in text.split():
yield Completion(choice, -len(prefix))
else:
yield Completion(choice, -len(prefix))
except TypeError: # there is no choices option
pass
def get_arg_name(self, is_param, param):
""" gets the argument name used in the command table for a parameter """
if self.curr_command in self.cmdtab and is_param:
for arg in self.cmdtab[self.curr_command].arguments:
for name in self.cmdtab[self.curr_command].arguments[arg].options_list:
if name == param:
return arg
# pylint: disable=too-many-branches
def gen_dynamic_completions(self, text):
""" generates the dynamic values, like the names of resource groups """
try: # pylint: disable=too-many-nested-blocks
is_param, started_param, prefix, param = dynamic_param_logic(text)
# command table specific name
arg_name = self.get_arg_name(is_param, param)
if arg_name and ((text.split()[-1].startswith('-') and text[-1].isspace()) or
text.split()[-2].startswith('-')):
for comp in self.gen_enum_completions(arg_name, text, started_param, prefix):
yield comp
parse_args = self.argsfinder.get_parsed_args(
parse_quotes(text, quotes=False))
# there are 3 formats for completers the cli uses
# this try catches which format it is
if self.cmdtab[self.curr_command].arguments[arg_name].completer:
try:
for comp in self.cmdtab[self.curr_command].arguments[arg_name].completer(
parsed_args=parse_args):
for comp in gen_dyn_completion(
comp, started_param, prefix, text):
yield comp
except TypeError:
try:
for comp in self.cmdtab[self.curr_command].\
arguments[arg_name].completer(prefix):
for comp in gen_dyn_completion(
comp, started_param, prefix, text):
yield comp
except TypeError:
try:
for comp in self.cmdtab[self.curr_command].\
arguments[arg_name].completer():
for comp in gen_dyn_completion(
comp, started_param, prefix, text):
yield comp
except TypeError:
pass # other completion method used
except CLIError: # if the user isn't logged in
pass
def gen_cmd_completions(self, text):
""" whether is a space or no text typed, send the current branch """
# if nothing, so first level commands
if not text.split() and self._is_command:
if self.branch.children is not None:
for com in self.branch.children:
yield Completion(com.data)
# if space show current level commands
elif len(text.split()) > 0 and text[-1].isspace() and self._is_command:
if self.branch is not self.command_tree:
for com in self.branch.children:
yield Completion(com.data)
def yield_param_completion(self, param, last_word):
""" yields a parameter """
return Completion(param, -len(last_word), display_meta=self.get_param_description(
self.curr_command + " " + str(param)).replace('\n', ''))
def gen_cmd_and_param_completions(self, text):
""" generates command and parameter completions """
temp_command = str('')
txtspt = text.split()
for word in txtspt:
if word.startswith("-"):
self._is_command = False
# building what the command is
elif self._is_command:
temp_command += ' ' + str(word) if temp_command else str(word)
mid_val = text.find(word) + len(word)
# moving down command tree
if self.branch.has_child(word) and len(text) > mid_val and text[mid_val].isspace():
self.branch = self.branch.get_child(word, self.branch.children)
if len(text) > 0 and text[-1].isspace():
if in_tree(self.command_tree, temp_command):
self.curr_command = temp_command
else:
self._is_command = False
else:
self.curr_command = temp_command
last_word = txtspt[-1]
# this is for single char parameters
if last_word.startswith("-") and not last_word.startswith("--"):
self._is_command = False
if self.has_parameters(self.curr_command):
for param in self.command_parameters[self.curr_command]:
if self.validate_completion(param, last_word, text) and\
not param.startswith("--"):
yield self.yield_param_completion(param, last_word)
elif last_word.startswith("--"): # for regular parameters
self._is_command = False
if self.has_parameters(self.curr_command): # Everything should, map to empty list
for param in self.command_parameters[self.curr_command]:
if self.validate_completion(param, last_word, text):
yield self.yield_param_completion(param, last_word)
if self.branch.children and self._is_command: # all underneath commands
for kid in self.branch.children:
if self.validate_completion(kid.data, txtspt[-1], text, False):
yield Completion(
str(kid.data), -len(txtspt[-1]))
elif self._is_command:
for param in self.command_parameters[self.curr_command.strip()]:
if param.startswith('--'):
yield self.yield_param_completion(param, '')
def gen_global_param_completions(self, text):
""" Global parameter stuff hard-coded in """
txtspt = text.split()
if txtspt and len(txtspt) > 0:
for param in self.global_param:
# for single dash global parameters
if txtspt[-1].startswith('-') \
and not txtspt[-1].startswith('--') and \
param.startswith('-') and not param.startswith('--') and\
self.validate_completion(param, txtspt[-1], text, double=False):
yield Completion(
param, -len(txtspt[-1]),
display_meta=self.global_param_descriptions[param])
# for double dash global parameters
elif txtspt[-1].startswith('--') and \
self.validate_completion(param, txtspt[-1], text, double=False):
yield Completion(
param, -len(txtspt[-1]),
display_meta=self.global_param_descriptions[param])
# if there is an output, gets the options without user typing
if txtspt[-1] in self.output_options:
for opt in self.output_choices:
yield Completion(opt)
# if there is an output option, if they have started typing
if len(txtspt) > 1 and\
txtspt[-2] in self.output_options:
for opt in self.output_choices:
if self.validate_completion(opt, txtspt[-1], text, double=False):
yield Completion(opt, -len(txtspt[-1]))
def is_completable(self, symbol):
""" whether the word can be completed as a command or parameter """
return self.has_parameters(symbol) or symbol in self.param_description.keys()
def get_param_description(self, param):
""" gets a description of an empty string """
if param in self.param_description:
return self.param_description[param]
else:
return ""
def has_parameters(self, command):
""" returns whether given command is valid """
return command in self.command_parameters.keys()
def has_description(self, param):
""" if a parameter has a description """
return param in self.param_description.keys() and \
not self.param_description[param].isspace()
| oakeyc/azure-cli-shell | azclishell/az_completer.py | Python | mit | 14,854 |
# I have to modify droidbox scripts to let it work with droidbot
# This is a compatible version which generate a report with the same format of original DroidBox
__author__ = 'yuanchun'
################################################################################
# (c) 2011, The Honeynet Project
# Author: Patrik Lantz [email protected] and Laurent Delosieres [email protected]
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
"""
Analyze dynamically Android applications
This script allows you to analyze dynamically Android applications.
It installs, runs, and analyzes Android applications.
At the end of each analysis, it outputs the Android application's characteristics in JSON.
Please keep in mind that all data received/sent,
read/written are shown in hexadecimal since the handled data can contain binary data.
"""
import json, time, signal, os, sys
import zipfile
import subprocess
import threading
from threading import Thread
from xml.dom import minidom
from subprocess import call, PIPE, Popen
from utils import AXMLPrinter
import hashlib
tags = {0x1: "TAINT_LOCATION", 0x2: "TAINT_CONTACTS", 0x4: "TAINT_MIC", 0x8: "TAINT_PHONE_NUMBER",
0x10: "TAINT_LOCATION_GPS", 0x20: "TAINT_LOCATION_NET", 0x40: "TAINT_LOCATION_LAST", 0x80: "TAINT_CAMERA",
0x100: "TAINT_ACCELEROMETER", 0x200: "TAINT_SMS", 0x400: "TAINT_IMEI", 0x800: "TAINT_IMSI",
0x1000: "TAINT_ICCID", 0x2000: "TAINT_DEVICE_SN", 0x4000: "TAINT_ACCOUNT", 0x8000: "TAINT_BROWSER",
0x10000: "TAINT_OTHERDB", 0x20000: "TAINT_FILECONTENT", 0x40000: "TAINT_PACKAGE", 0x80000: "TAINT_CALL_LOG",
0x100000: "TAINT_EMAIL", 0x200000: "TAINT_CALENDAR", 0x400000: "TAINT_SETTINGS"}
class LostADBException(Exception):
pass
class DroidBox(object):
def __init__(self, output_dir=None):
self.sendsms = {}
self.phonecalls = {}
self.cryptousage = {}
self.dexclass = {}
self.dataleaks = {}
self.opennet = {}
self.sendnet = {}
self.recvnet = {}
self.closenet = {}
self.fdaccess = {}
self.servicestart = {}
self.accessedfiles = {}
self.enabled = True
self.adb = None
self.application = None
self.apk_name = None
self.apk_hashes = None
self.applicationStarted = 0
self.is_counting_logs = False
self.timer = None
if output_dir:
self.output_dir = output_dir
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
else:
#Posibility that no output-files is generated
self.output_dir = None
def set_apk(self, apk_name):
if not self.enabled:
return
if apk_name is None:
return
# APK existing?
if not os.path.isfile(apk_name):
print("File %s not found" % apk_name)
sys.exit(1)
self.apk_name = os.path.abspath(apk_name)
self.application = Application(apk_name)
ret = self.application.processAPK()
# Error during the APK processing?
if ret == 0:
print("Failed to analyze the APK. Terminate the analysis.")
sys.exit(1)
main_activity = self.application.getMainActivity()
package_name = self.application.getPackage()
self.apk_hashes = self.application.getHashes()
# No Main acitvity found? Return an error
if main_activity == None:
print("No activity to start. Terminate the analysis.")
sys.exit(1)
# No packages identified? Return an error
if package_name == None:
print("No package found. Terminate the analysis.")
sys.exit(1)
# Execute the application
call(["adb", "logcat", "-c"])
ret = call(['monkeyrunner', 'monkeyrunner.py', apk_name,
package_name, main_activity], stderr=None,
cwd=os.path.dirname(os.path.realpath(__file__)))
if (ret == 1):
print("Failed to execute the application.")
sys.exit(1)
print("Starting the activity %s..." % main_activity)
# By default the application has not started
self.applicationStarted = 0
stringApplicationStarted = "Start proc %s" % package_name
# Open the adb logcat
if self.adb is None:
self.adb = Popen(["adb", "logcat", "DroidBox:W", "dalvikvm:W", "ActivityManager:I"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Wait for the application to start
while 1:
try:
logcatInput = self.adb.stdout.readline()
if not logcatInput:
raise Exception("We have lost the connection with ADB.")
# Application started?
if (stringApplicationStarted in logcatInput):
self.applicationStarted = 1
break
except:
break
if (self.applicationStarted == 0):
print("Analysis has not been done.")
# Kill ADB, otherwise it will never terminate
os.kill(self.adb.pid, signal.SIGTERM)
sys.exit(1)
print("Application started")
def start_unblocked(self, duration=0):
droidbox_thread = threading.Thread(target=self.start_blocked, args=(duration,))
droidbox_thread.start()
def stop(self):
self.enabled = False
if self.timer and self.timer.isAlive():
self.timer.cancel()
if self.adb is not None:
self.adb.terminate()
self.adb = None
def start_blocked(self, duration=0):
if not self.enabled:
return
# curses.setupterm()
# sys.stdout.write(curses.tigetstr("clear"))
sys.stdout.flush()
call(["adb", "wait-for-device"])
call(['adb', 'logcat', '-c'])
print " ____ __ ____"
print "/\ _`\ __ /\ \/\ _`\\"
print "\ \ \/\ \ _ __ ___ /\_\ \_\ \ \ \L\ \ ___ __ _"
print " \ \ \ \ \/\`'__\ __`\/\ \ /'_` \ \ _ <' / __`\/\ \/'\\"
print " \ \ \_\ \ \ \/\ \L\ \ \ \/\ \L\ \ \ \L\ \\ \L\ \/> </"
print " \ \____/\ \_\ \____/\ \_\ \___,_\ \____/ \____//\_/\_\\"
print " \/___/ \/_/\/___/ \/_/\/__,_ /\/___/ \/___/ \//\/_/"
count = CountingThread()
count.start()
timeStamp = time.time()
if duration:
self.timer = threading.Timer(duration, self.stop)
self.timer.start()
if self.adb is None:
self.adb = Popen(["adb", "logcat", "-v", "threadtime", "DroidBox:W", "dalvikvm:W", "ActivityManager:I"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Collect DroidBox logs
self.is_counting_logs = True
self.lastScreenshot = 0
first_log_time = None
from droidbot.state_monitor import StateMonitor
state_monitor = StateMonitor()
state_monitor.start()
while self.enabled:
try:
if self.output_dir and (time.time() - self.lastScreenshot) >=5:
# Take screenshots every 5 seconds.
os.system("adb shell screencap -p | sed 's/\r$//' > %s" % os.path.join(self.output_dir, "screen") \
+ "_$(date +%Y-%m-%d_%H%M%S).png")
self.lastScreenshot = time.time()
logcatInput = self.adb.stdout.readline()
if not logcatInput:
raise LostADBException("We have lost the connection with ADB.")
from droidbot import utils
log_data = utils.parse_log(logcatInput)
if log_data is None or log_data['tag'] != "DroidBox":
continue
log_time = log_data['datetime']
if first_log_time is None:
first_log_time = log_time
log_delta_seconds = (log_time - first_log_time).total_seconds()
log_content = json.loads(decode(log_data['content']))
# DroidBox style report
try:
# dirty workaround: filter out the logs produced by DroidBot
# self.filter_noises(log_content)
# DexClassLoader
if log_content.has_key('DexClassLoader'):
log_content['DexClassLoader']['type'] = 'dexload'
self.dexclass[log_delta_seconds] = log_content['DexClassLoader']
count.increaseCount()
# service started
if log_content.has_key('ServiceStart'):
log_content['ServiceStart']['type'] = 'service'
self.servicestart[log_delta_seconds] = log_content['ServiceStart']
count.increaseCount()
# received data from net
if log_content.has_key('RecvNet'):
host = log_content['RecvNet']['srchost']
port = log_content['RecvNet']['srcport']
self.recvnet[log_delta_seconds] = recvdata = {'type': 'net read', 'host': host,
'port': port,
'data': log_content['RecvNet']['data']}
count.increaseCount()
# fdaccess
if log_content.has_key('FdAccess'):
self.accessedfiles[log_content['FdAccess']['id']] = hexToStr(log_content['FdAccess']['path'])
# file read or write
if log_content.has_key('FileRW'):
log_content['FileRW']['path'] = self.accessedfiles[log_content['FileRW']['id']]
if log_content['FileRW']['operation'] == 'write':
log_content['FileRW']['type'] = 'file write'
else:
log_content['FileRW']['type'] = 'file read'
self.fdaccess[log_delta_seconds] = log_content['FileRW']
count.increaseCount()
# opened network connection log
if log_content.has_key('OpenNet'):
self.opennet[log_delta_seconds] = log_content['OpenNet']
count.increaseCount()
# closed socket
if log_content.has_key('CloseNet'):
self.closenet[log_delta_seconds] = log_content['CloseNet']
count.increaseCount()
# outgoing network activity log
if log_content.has_key('SendNet'):
log_content['SendNet']['type'] = 'net write'
self.sendnet[log_delta_seconds] = log_content['SendNet']
count.increaseCount()
# data leak log
if log_content.has_key('DataLeak'):
my_time = log_delta_seconds
log_content['DataLeak']['type'] = 'leak'
log_content['DataLeak']['tag'] = getTags(int(log_content['DataLeak']['tag'], 16))
self.dataleaks[my_time] = log_content['DataLeak']
count.increaseCount()
if log_content['DataLeak']['sink'] == 'Network':
log_content['DataLeak']['type'] = 'net write'
self.sendnet[my_time] = log_content['DataLeak']
count.increaseCount()
elif log_content['DataLeak']['sink'] == 'File':
log_content['DataLeak']['path'] = self.accessedfiles[log_content['DataLeak']['id']]
if log_content['DataLeak']['operation'] == 'write':
log_content['DataLeak']['type'] = 'file write'
else:
log_content['DataLeak']['type'] = 'file read'
self.fdaccess[my_time] = log_content['DataLeak']
count.increaseCount()
elif log_content['DataLeak']['sink'] == 'SMS':
log_content['DataLeak']['type'] = 'sms'
self.sendsms[my_time] = log_content['DataLeak']
count.increaseCount()
# sent sms log
if log_content.has_key('SendSMS'):
log_content['SendSMS']['type'] = 'sms'
self.sendsms[log_delta_seconds] = log_content['SendSMS']
count.increaseCount()
# phone call log
if log_content.has_key('PhoneCall'):
log_content['PhoneCall']['type'] = 'call'
self.phonecalls[log_delta_seconds] = log_content['PhoneCall']
count.increaseCount()
# crypto api usage log
if log_content.has_key('CryptoUsage'):
log_content['CryptoUsage']['type'] = 'crypto'
self.cryptousage[log_delta_seconds] = log_content['CryptoUsage']
count.increaseCount()
except ValueError:
pass
except KeyboardInterrupt:
break
except LostADBException:
break
except Exception as e:
print(e.message)
continue
self.is_counting_logs = False
count.stopCounting()
count.join()
# Kill ADB, otherwise it will never terminate
self.stop()
self.adb = None
print json.dumps(self.get_output())
if self.output_dir is None:
return
with open(os.path.join(self.output_dir, "analysis.json"),"w") as jsonfile:
jsonfile.write(json.dumps(self.get_output(),sort_keys=True, indent=4))
def get_output(self):
# Done? Store the objects in a dictionary, transform it in a dict object and return it
output = dict()
# Sort the items by their key
output["dexclass"] = self.dexclass
output["servicestart"] = self.servicestart
output["recvnet"] = self.recvnet
output["opennet"] = self.opennet
output["sendnet"] = self.sendnet
output["closenet"] = self.closenet
output["accessedfiles"] = self.accessedfiles
output["dataleaks"] = self.dataleaks
output["fdaccess"] = self.fdaccess
output["sendsms"] = self.sendsms
output["phonecalls"] = self.phonecalls
output["cryptousage"] = self.cryptousage
output["recvsaction"] = self.application.getRecvsaction()
output["enfperm"] = self.application.getEnfperm()
output["hashes"] = self.apk_hashes
output["apkName"] = self.apk_name
return output
def get_counts(self):
output = dict()
# Sort the items by their key
output["dexclass"] = len(self.dexclass)
output["servicestart"] = len(self.servicestart)
output["recvnet"] = len(self.recvnet)
output["opennet"] = len(self.opennet)
output["sendnet"] = len(self.sendnet)
output["closenet"] = len(self.closenet)
output["dataleaks"] = len(self.dataleaks)
output["fdaccess"] = len(self.fdaccess)
output["sendsms"] = len(self.sendsms)
output["phonecalls"] = len(self.phonecalls)
output["cryptousage"] = len(self.cryptousage)
output["sum"] = sum(output.values())
return output
def filter_noises(self, log):
"""
filter use less noises from log
:param log: log of Droidbox in dict format
:return: boolean
"""
if isinstance(log, dict):
# DexClassLoader
if 'DexClassLoader' in log.keys():
if log['DexClassLoader']['path'] in DEXCLASSLOADER_EXCLUDED:
log.pop('DexClassLoader')
# fdaccess
if 'FdAccess' in log.keys():
for excluded_prefix in FDACCESS_EXCLUDED_PREFIX:
if hexToStr(log['FdAccess']['path']).startswith(excluded_prefix):
log.pop('FdAccess')
break
# file read or write
if 'FileRW' in log.keys():
if log['FileRW']['id'] not in self.accessedfiles.keys():
log.pop('FileRW')
return log
DEXCLASSLOADER_EXCLUDED = [
"/system/framework/monkey.jar",
"/system/framework/input.jar",
"/system/framework/am.jar",
]
FDACCESS_EXCLUDED_PREFIX = [
"pipe:",
"socket:",
"/dev/input/event",
]
class CountingThread(Thread):
"""
Used for user interface, showing in progress sign
and number of collected logs from the sandbox system
"""
def __init__(self):
"""
Constructor
"""
Thread.__init__(self)
self.stop = False
self.logs = 0
def stopCounting(self):
"""
Mark to stop this thread
"""
self.stop = True
def increaseCount(self):
self.logs += 1
def run(self):
"""
Update the progress sign and
number of collected logs
"""
signs = ['|', '/', '-', '\\']
counter = 0
while 1:
sign = signs[counter % len(signs)]
sys.stdout.write(" \033[132m[%s] Collected %s sandbox logs\033[1m (Ctrl-C to view logs)\r" % (
sign, str(self.logs)))
sys.stdout.flush()
time.sleep(0.5)
counter = counter + 1
if self.stop:
sys.stdout.write(
" \033[132m[%s] Collected %s sandbox logs\033[1m%s\r" % ('*', str(self.logs), ' ' * 25))
sys.stdout.flush()
break
class Application:
"""
Used for extracting information of an Android APK
"""
def __init__(self, filename):
self.filename = filename
self.packageNames = []
self.enfperm = []
self.permissions = []
self.recvs = []
self.activities = {}
self.recvsaction = {}
self.mainActivity = None
def processAPK(self):
xml = {}
error = True
try:
zip = zipfile.ZipFile(self.filename)
for i in zip.namelist():
if i == "AndroidManifest.xml":
try:
xml[i] = minidom.parseString(zip.read(i))
except:
xml[i] = minidom.parseString(AXMLPrinter(zip.read(i)).getBuff())
for item in xml[i].getElementsByTagName('manifest'):
self.packageNames.append(str(item.getAttribute("package")))
for item in xml[i].getElementsByTagName('permission'):
self.enfperm.append(str(item.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('uses-permission'):
self.permissions.append(str(item.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('receiver'):
self.recvs.append(str(item.getAttribute("android:name")))
for child in item.getElementsByTagName('action'):
self.recvsaction[str(item.getAttribute("android:name"))] = (
str(child.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('activity'):
activity = str(item.getAttribute("android:name"))
self.activities[activity] = {}
self.activities[activity]["actions"] = list()
for child in item.getElementsByTagName('action'):
self.activities[activity]["actions"].append(str(child.getAttribute("android:name")))
for activity in self.activities:
for action in self.activities[activity]["actions"]:
if action == 'android.intent.action.MAIN':
self.mainActivity = activity
error = False
break
if (error == False):
return 1
else:
return 0
except:
return 0
def getEnfperm(self):
return self.enfperm
def getRecvsaction(self):
return self.recvsaction
def getMainActivity(self):
return self.mainActivity
def getActivities(self):
return self.activities
def getPermissions(self):
return self.permissions
def getRecvActions(self):
return self.recvsaction
def getPackage(self):
# One application has only one package name
return self.packageNames[0]
def getHashes(self, block_size=2 ** 8):
"""
Calculate MD5,SHA-1, SHA-256
hashes of APK input file
"""
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
f = open(self.filename, 'rb')
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
sha1.update(data)
sha256.update(data)
return [md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest()]
def decode(s, encodings=('ascii', 'utf8', 'latin1')):
for encoding in encodings:
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.decode('ascii', 'ignore')
def getTags(tagParam):
"""
Retrieve the tag names
"""
tagsFound = []
for tag in tags.keys():
if tagParam & tag != 0:
tagsFound.append(tags[tag])
return tagsFound
def hexToStr(hexStr):
"""
Convert a string hex byte values into a byte string
"""
bytes = []
hexStr = ''.join(hexStr.split(" "))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:i + 2], 16)))
return unicode(''.join(bytes), errors='replace')
def interruptHandler(signum, frame):
"""
Raise interrupt for the blocking call 'logcatInput = sys.stdin.readline()'
"""
raise KeyboardInterrupt
def main():
argv = sys.argv
if len(argv) < 2 or len(argv) > 3:
print("Usage: droidbox_compatible.py filename.apk <duration in seconds>")
sys.exit(1)
duration = 0
# Duration given?
if len(argv) == 3:
duration = int(argv[2])
apkName = sys.argv[1]
# APK existing?
if os.path.isfile(apkName) == False:
print("File %s not found" % argv[1])
sys.exit(1)
droidbox = DroidBox()
droidbox.set_apk(apkName)
droidbox.start_blocked(duration)
# droidbox.get_output()
if __name__ == "__main__":
main()
| nastya/droidbot | droidbox_scripts/droidbox_compatible.py | Python | mit | 24,119 |
import os.path
import logging
_logger = logging.getLogger(__name__)
from operator import itemgetter
from tornado.web import Application, RequestHandler, StaticFileHandler
from tornado.ioloop import IOLoop
config = {
'DEBUG': True,
'PORT' : 5000
}
HANDLERS = []
ROOT_DIR = os.path.abspath(os.path.join(os.path.split(__file__)[0], os.path.pardir))
GFXTABLET_DIR = os.path.join(ROOT_DIR, "node_modules", "gfxtablet")
if os.path.exists(GFXTABLET_DIR):
import sys
sys.path.insert(0, GFXTABLET_DIR)
from GfxTablet import GfxTabletHandler
HANDLERS.append((r'/gfxtablet', GfxTabletHandler))
class MainHandler(RequestHandler):
def get(self):
self.render("index.html")
def main():
global HANDLERS
HANDLERS += [(r'/(.+)', StaticFileHandler, {'path': ROOT_DIR}),
(r'/', MainHandler)]
app = Application(HANDLERS,
debug=config.get('DEBUG', False), static_path=ROOT_DIR)
_logger.info("app.settings:\n%s" % '\n'.join(['%s: %s' % (k, str(v))
for k, v in sorted(app.settings.items(),
key=itemgetter(0))]))
port = config.get('PORT', 5000)
app.listen(port)
_logger.info("""
listening on port %d
press CTRL-c to terminate the server
-----------
Y A W V R B
*************************
*********************************
STARTING TORNADO APP!!!!!!!!!!!!!
*********************************
*************************
Y A W V R B
-----------
""" % port)
IOLoop.instance().start()
if __name__ == "__main__":
logging.basicConfig(level=(logging.DEBUG if config.get('DEBUG') else logging.INFO),
format="%(asctime)s: %(levelname)s %(name)s %(funcName)s %(lineno)d: %(message)s")
main()
| jzitelli/yawvrb.js | test/tornado_server.py | Python | mit | 1,909 |
"""
The Jaccard similarity coefficient is a commonly used indicator of the
similarity between two sets. Let U be a set and A and B be subsets of U,
then the Jaccard index/similarity is defined to be the ratio of the number
of elements of their intersection and the number of elements of their union.
Inspired from Wikipedia and
the book Mining of Massive Datasets [MMDS 2nd Edition, Chapter 3]
https://en.wikipedia.org/wiki/Jaccard_index
https://mmds.org
Jaccard similarity is widely used with MinHashing.
"""
def jaccard_similariy(setA, setB, alternativeUnion=False):
"""
Finds the jaccard similarity between two sets.
Essentially, its intersection over union.
The alternative way to calculate this is to take union as sum of the
number of items in the two sets. This will lead to jaccard similarity
of a set with itself be 1/2 instead of 1. [MMDS 2nd Edition, Page 77]
Parameters:
:setA (set,list,tuple): A non-empty set/list
:setB (set,list,tuple): A non-empty set/list
:alternativeUnion (boolean): If True, use sum of number of
items as union
Output:
(float) The jaccard similarity between the two sets.
Examples:
>>> setA = {'a', 'b', 'c', 'd', 'e'}
>>> setB = {'c', 'd', 'e', 'f', 'h', 'i'}
>>> jaccard_similariy(setA,setB)
0.375
>>> jaccard_similariy(setA,setA)
1.0
>>> jaccard_similariy(setA,setA,True)
0.5
>>> setA = ['a', 'b', 'c', 'd', 'e']
>>> setB = ('c', 'd', 'e', 'f', 'h', 'i')
>>> jaccard_similariy(setA,setB)
0.375
"""
if isinstance(setA, set) and isinstance(setB, set):
intersection = len(setA.intersection(setB))
if alternativeUnion:
union = len(setA) + len(setB)
else:
union = len(setA.union(setB))
return intersection / union
if isinstance(setA, (list, tuple)) and isinstance(setB, (list, tuple)):
intersection = [element for element in setA if element in setB]
if alternativeUnion:
union = len(setA) + len(setB)
else:
union = setA + [element for element in setB if element not in setA]
return len(intersection) / len(union)
if __name__ == "__main__":
setA = {"a", "b", "c", "d", "e"}
setB = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similariy(setA, setB))
| TheAlgorithms/Python | maths/jaccard_similarity.py | Python | mit | 2,365 |
# __author__ = MelissaChan
# -*- coding: utf-8 -*-
# 16-4-16 下午10:53
import MySQLdb
def connect(id,name,gender,region,status,date,inter):
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd=' ',port=3306)
cur = conn.cursor()
# cur.execute('create database if not exists PythonDB')
conn.select_db('Facebook')
# cur.execute('create table Test(id int,name varchar(20),info varchar(20))')
value = [id,name,gender,region,status,date,inter]
cur.execute('insert into info values(%s,%s,%s,%s,%s,%s,%s)',value)
# values = []
# for i in range(20):
# values.append((i,'Hello World!','My number is '+str(i)))
#
# cur.executemany('insert into Test values(%s,%s,%s)',values)
# cur.execute('update Test set name="ACdreamer" where id=3')
conn.commit()
cur.close()
conn.close()
print 'insert ok~'
except MySQLdb.Error,msg:
print "MySQL Error %d: %s" %(msg.args[0],msg.args[1])
| MelissaChan/Crawler_Facebook | Crawler/facebook_mysql.py | Python | mit | 1,040 |
from sys import platform
import unittest
import checksieve
class TestVariables(unittest.TestCase):
def test_set(self):
sieve = '''
require "variables";
set "honorific" "Mr";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_mod_length(self):
sieve = '''
require "variables";
set :length "b" "${a}";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_wrong_tag(self):
sieve = '''
require "variables";
set :mime "b" "c";
'''
self.assertTrue(checksieve.parse_string(sieve, True))
def test_set_wrong_arg(self):
sieve = '''
require "variables";
set "a" "b" "c";
'''
self.assertTrue(checksieve.parse_string(sieve, True))
def test_too_many_args(self):
sieve = '''
require "variables";
set "a" "b" "c" "d";
'''
self.assertTrue(checksieve.parse_string(sieve, True))
def test_test_string(self):
sieve = '''
require "variables";
set "state" "${state} pending";
if string :matches " ${state} " "* pending *" {
# the above test always succeeds
stop;
}
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_numeral_varname(self):
sieve = '''
require "variables";
set "1" "${state} pending";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_bad_varname(self):
sieve = '''
require "variables";
set "bad-variable" "no dashes allowed!";
'''
self.assertTrue(checksieve.parse_string(sieve, True))
if __name__ == '__main__':
unittest.main()
| dburkart/check-sieve | test/5229/variables_test.py | Python | mit | 1,796 |
from spacyThrift import SpacyThrift
from spacyThrift.ttypes import Token
from spacy.en import English
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import logging
class Handler:
def __init__(self, nlp):
self.nlp = nlp
def tag(self, sentence):
document = nlp(sentence, parse=False, entity=False)
return [Token(element.orth_, element.tag_, element.lemma_)
for element in document]
if __name__ == '__main__':
logging.basicConfig()
logger = logging.getLogger()
logger.info("Loading ...")
nlp = English(parser=False, tagger=True, entity=False)
handler = Handler(nlp)
processor = SpacyThrift.Processor(handler)
transport = TSocket.TServerSocket(port=9090)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
logger.info("Serving ...")
server.serve()
| pasupulaphani/spacy-nlp-docker | thrift/spacyThrift/service.py | Python | mit | 1,083 |
Subsets and Splits
Unique Repositories with URLs
Lists unique repository names along with their GitHub URLs, providing basic identification information for each repository.